"
- echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.8.1 cmd/chartmuseum chartm"
+ echo "e.g: builder golang:1.11.2 github.com/helm/chartmuseum v0.9.0 cmd/chartmuseum chartm"
exit 1
}
@@ -13,7 +13,7 @@ if [ $# != 5 ]; then
fi
GOLANG_IMAGE="$1"
-CODE_PATH="$2"
+GIT_PATH="$2"
CODE_VERSION="$3"
MAIN_GO_PATH="$4"
BIN_NAME="$5"
@@ -27,7 +27,7 @@ mkdir -p binary
rm -rf binary/$BIN_NAME || true
cp compile.sh binary/
-docker run -it -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $CODE_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME
+docker run -it --rm -v $cur/binary:/go/bin --name golang_code_builder $GOLANG_IMAGE /bin/bash /go/bin/compile.sh $GIT_PATH $CODE_VERSION $MAIN_GO_PATH $BIN_NAME
#Clear
docker rm -f golang_code_builder
diff --git a/make/photon/chartserver/compile.sh b/make/photon/chartserver/compile.sh
index dca0d6c1d..4634c6d15 100644
--- a/make/photon/chartserver/compile.sh
+++ b/make/photon/chartserver/compile.sh
@@ -11,24 +11,21 @@ if [ $# != 4 ]; then
usage
fi
-CODE_PATH="$1"
+GIT_PATH="$1"
VERSION="$2"
MAIN_GO_PATH="$3"
BIN_NAME="$4"
-#Get the source code of chartmusem
-go get $CODE_PATH
-
+#Get the source code
+git clone $GIT_PATH src_code
+ls
+SRC_PATH=$(pwd)/src_code
set -e
#Checkout the released tag branch
-cd /go/src/$CODE_PATH
-git checkout tags/$VERSION -b $VERSION
-
-#Install the go dep tool to restore the package dependencies
-curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
-dep ensure
+cd $SRC_PATH
+git checkout tags/$VERSION -b $VERSION
#Compile
-cd /go/src/$CODE_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
+cd $SRC_PATH/$MAIN_GO_PATH && go build -a -o $BIN_NAME
mv $BIN_NAME /go/bin/
diff --git a/make/photon/core/Dockerfile b/make/photon/core/Dockerfile
index 39b7cf574..7eaa4191c 100644
--- a/make/photon/core/Dockerfile
+++ b/make/photon/core/Dockerfile
@@ -6,11 +6,11 @@ RUN tdnf install sudo -y >> /dev/null\
&& mkdir /harbor/
HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/ping || exit 1
-COPY ./make/photon/core/harbor_core ./make/photon/core/start.sh ./UIVERSION /harbor/
+COPY ./make/photon/core/harbor_core ./UIVERSION /harbor/
COPY ./src/core/views /harbor/views
COPY ./make/migrations /harbor/migrations
-RUN chmod u+x /harbor/start.sh /harbor/harbor_core
+RUN chmod u+x /harbor/harbor_core
WORKDIR /harbor/
-
-ENTRYPOINT ["/harbor/start.sh"]
+USER harbor
+ENTRYPOINT ["/harbor/harbor_core"]
diff --git a/make/photon/core/start.sh b/make/photon/core/start.sh
deleted file mode 100644
index 20267e671..000000000
--- a/make/photon/core/start.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-sudo -E -u \#10000 "/harbor/harbor_core"
-
diff --git a/make/photon/db/Dockerfile b/make/photon/db/Dockerfile
index 5672b2f25..e9d765393 100644
--- a/make/photon/db/Dockerfile
+++ b/make/photon/db/Dockerfile
@@ -18,15 +18,16 @@ RUN tdnf erase -y toybox && tdnf install -y util-linux net-tools
VOLUME /var/lib/postgresql/data
-ADD ./make/photon/db/docker-entrypoint.sh /entrypoint.sh
-ADD ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
-RUN chmod u+x /entrypoint.sh /docker-healthcheck.sh
-ENTRYPOINT ["/entrypoint.sh"]
-HEALTHCHECK CMD ["/docker-healthcheck.sh"]
-
+COPY ./make/photon/db/docker-entrypoint.sh /docker-entrypoint.sh
+COPY ./make/photon/db/docker-healthcheck.sh /docker-healthcheck.sh
COPY ./make/photon/db/initial-notaryserver.sql /docker-entrypoint-initdb.d/
COPY ./make/photon/db/initial-notarysigner.sql /docker-entrypoint-initdb.d/
COPY ./make/photon/db/initial-registry.sql /docker-entrypoint-initdb.d/
+RUN chown -R postgres:postgres /docker-entrypoint.sh /docker-healthcheck.sh /docker-entrypoint-initdb.d \
+ && chmod u+x /docker-entrypoint.sh /docker-healthcheck.sh
+
+ENTRYPOINT ["/docker-entrypoint.sh"]
+HEALTHCHECK CMD ["/docker-healthcheck.sh"]
EXPOSE 5432
-CMD ["postgres"]
+USER postgres
diff --git a/make/photon/db/docker-entrypoint.sh b/make/photon/db/docker-entrypoint.sh
index c8f667282..abfabe4ec 100644
--- a/make/photon/db/docker-entrypoint.sh
+++ b/make/photon/db/docker-entrypoint.sh
@@ -23,95 +23,88 @@ file_env() {
unset "$fileVar"
}
-if [ "${1:0:1}" = '-' ]; then
- set -- postgres "$@"
-fi
-
-if [ "$1" = 'postgres' ]; then
- chown -R postgres:postgres $PGDATA
- # look specifically for PG_VERSION, as it is expected in the DB dir
- if [ ! -s "$PGDATA/PG_VERSION" ]; then
- file_env 'POSTGRES_INITDB_ARGS'
- if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
- export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
- fi
- su - $1 -c "initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS"
- # check password first so we can output the warning before postgres
- # messes it up
- file_env 'POSTGRES_PASSWORD'
- if [ "$POSTGRES_PASSWORD" ]; then
- pass="PASSWORD '$POSTGRES_PASSWORD'"
- authMethod=md5
- else
- # The - option suppresses leading tabs but *not* spaces. :)
- cat >&2 <<-EOF
- ****************************************************
- WARNING: No password has been set for the database.
- This will allow anyone with access to the
- Postgres port to access your database. In
- Docker's default configuration, this is
- effectively any other container on the same
- system.
- Use "-e POSTGRES_PASSWORD=password" to set
- it in "docker run".
- ****************************************************
+# look specifically for PG_VERSION, as it is expected in the DB dir
+if [ ! -s "$PGDATA/PG_VERSION" ]; then
+ file_env 'POSTGRES_INITDB_ARGS'
+ if [ "$POSTGRES_INITDB_XLOGDIR" ]; then
+ export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --xlogdir $POSTGRES_INITDB_XLOGDIR"
+ fi
+ initdb -D $PGDATA -U postgres -E UTF-8 --lc-collate=en_US.UTF-8 --lc-ctype=en_US.UTF-8 $POSTGRES_INITDB_ARGS
+ # check password first so we can output the warning before postgres
+ # messes it up
+ file_env 'POSTGRES_PASSWORD'
+ if [ "$POSTGRES_PASSWORD" ]; then
+ pass="PASSWORD '$POSTGRES_PASSWORD'"
+ authMethod=md5
+ else
+ # The - option suppresses leading tabs but *not* spaces. :)
+ cat >&2 <<-EOF
+ ****************************************************
+ WARNING: No password has been set for the database.
+ This will allow anyone with access to the
+ Postgres port to access your database. In
+ Docker's default configuration, this is
+ effectively any other container on the same
+ system.
+ Use "-e POSTGRES_PASSWORD=password" to set
+ it in "docker run".
+ ****************************************************
EOF
- pass=
- authMethod=trust
- fi
+ pass=
+ authMethod=trust
+ fi
- {
- echo
- echo "host all all all $authMethod"
- } >> "$PGDATA/pg_hba.conf"
- su postgres
- echo `whoami`
- # internal start of server in order to allow set-up using psql-client
- # does not listen on external TCP/IP and waits until start finishes
- su - $1 -c "pg_ctl -D \"$PGDATA\" -o \"-c listen_addresses='localhost'\" -w start"
+ {
+ echo
+ echo "host all all all $authMethod"
+ } >> "$PGDATA/pg_hba.conf"
+ echo `whoami`
+ # internal start of server in order to allow set-up using psql-client
+ # does not listen on external TCP/IP and waits until start finishes
+ pg_ctl -D "$PGDATA" -o "-c listen_addresses=''" -w start
- file_env 'POSTGRES_USER' 'postgres'
- file_env 'POSTGRES_DB' "$POSTGRES_USER"
+ file_env 'POSTGRES_USER' 'postgres'
+ file_env 'POSTGRES_DB' "$POSTGRES_USER"
- psql=( psql -v ON_ERROR_STOP=1 )
+ psql=( psql -v ON_ERROR_STOP=1 )
- if [ "$POSTGRES_DB" != 'postgres' ]; then
- "${psql[@]}" --username postgres <<-EOSQL
- CREATE DATABASE "$POSTGRES_DB" ;
-EOSQL
- echo
- fi
-
- if [ "$POSTGRES_USER" = 'postgres' ]; then
- op='ALTER'
- else
- op='CREATE'
- fi
+ if [ "$POSTGRES_DB" != 'postgres' ]; then
"${psql[@]}" --username postgres <<-EOSQL
- $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
+ CREATE DATABASE "$POSTGRES_DB" ;
EOSQL
echo
-
- psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
-
- echo
- for f in /docker-entrypoint-initdb.d/*; do
- case "$f" in
- *.sh) echo "$0: running $f"; . "$f" ;;
- *.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
- *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
- *) echo "$0: ignoring $f" ;;
- esac
- echo
- done
-
- PGUSER="${PGUSER:-postgres}" \
- su - $1 -c "pg_ctl -D \"$PGDATA\" -m fast -w stop"
-
- echo
- echo 'PostgreSQL init process complete; ready for start up.'
- echo
fi
+
+ if [ "$POSTGRES_USER" = 'postgres' ]; then
+ op='ALTER'
+ else
+ op='CREATE'
+ fi
+ "${psql[@]}" --username postgres <<-EOSQL
+ $op USER "$POSTGRES_USER" WITH SUPERUSER $pass ;
+EOSQL
+ echo
+
+ psql+=( --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" )
+
+ echo
+ for f in /docker-entrypoint-initdb.d/*; do
+ case "$f" in
+ *.sh) echo "$0: running $f"; . "$f" ;;
+ *.sql) echo "$0: running $f"; "${psql[@]}" -f "$f"; echo ;;
+ *.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${psql[@]}"; echo ;;
+ *) echo "$0: ignoring $f" ;;
+ esac
+ echo
+ done
+
+ PGUSER="${PGUSER:-postgres}" \
+ pg_ctl -D "$PGDATA" -m fast -w stop
+
+ echo
+ echo 'PostgreSQL init process complete; ready for start up.'
+ echo
fi
-exec su - $1 -c "$@ -D $PGDATA"
+
+postgres -D $PGDATA
diff --git a/make/photon/jobservice/Dockerfile b/make/photon/jobservice/Dockerfile
index 3131550d2..eddb8e65b 100644
--- a/make/photon/jobservice/Dockerfile
+++ b/make/photon/jobservice/Dockerfile
@@ -1,12 +1,19 @@
FROM photon:2.0
-RUN mkdir /harbor/ \
- && tdnf install sudo -y >> /dev/null\
+RUN tdnf install sudo -y >> /dev/null\
&& tdnf clean all \
- && groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
+ && groupadd -r -g 10000 harbor && useradd --no-log-init -r -g 10000 -u 10000 harbor
-COPY ./make/photon/jobservice/start.sh ./make/photon/jobservice/harbor_jobservice /harbor/
+COPY ./make/photon/jobservice/harbor_jobservice /harbor/
+
+RUN chmod u+x /harbor/harbor_jobservice
-RUN chmod u+x /harbor/harbor_jobservice /harbor/start.sh
WORKDIR /harbor/
-ENTRYPOINT ["/harbor/start.sh"]
+
+USER harbor
+
+VOLUME ["/var/log/jobs/"]
+
+HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080/api/v1/stats || exit 1
+
+ENTRYPOINT ["/harbor/harbor_jobservice", "-c", "/etc/jobservice/config.yml"]
diff --git a/make/photon/jobservice/start.sh b/make/photon/jobservice/start.sh
deleted file mode 100644
index 517971b16..000000000
--- a/make/photon/jobservice/start.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/sh
-if [ -d /var/log/jobs ]; then
- chown -R 10000:10000 /var/log/jobs/
-fi
-sudo -E -u \#10000 "/harbor/harbor_jobservice" "-c" "/etc/jobservice/config.yml"
-
diff --git a/make/photon/log/rsyslog_docker.conf b/make/photon/log/rsyslog_docker.conf
index a21cc5078..5264d85db 100644
--- a/make/photon/log/rsyslog_docker.conf
+++ b/make/photon/log/rsyslog_docker.conf
@@ -1,8 +1,5 @@
# Rsyslog configuration file for docker.
-
-template(name="DynaFile" type="string"
- string="/var/log/docker/%syslogtag:R,ERE,0,DFLT:[^[]*--end:secpath-replace%.log"
-)
-#if $programname == "docker" then ?DynaFile
-if $programname != "rsyslogd" then -?DynaFile
-
+template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
+if $programname != "rsyslogd" then {
+ action(type="omfile" dynaFile="DynaFile")
+}
diff --git a/make/photon/nginx/Dockerfile b/make/photon/nginx/Dockerfile
index 3d244ee58..902107205 100644
--- a/make/photon/nginx/Dockerfile
+++ b/make/photon/nginx/Dockerfile
@@ -1,14 +1,19 @@
FROM photon:2.0
-RUN tdnf install -y nginx >> /dev/null\
+RUN tdnf install sudo nginx -y >> /dev/null\
+ && tdnf clean all \
+ && groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
&& ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log \
- && tdnf clean all
+ && ln -sf /dev/stderr /var/log/nginx/error.log
-EXPOSE 80
VOLUME /var/cache/nginx /var/log/nginx /run
+
+EXPOSE 8080
+
STOPSIGNAL SIGQUIT
-HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
+HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
+
+USER nginx
CMD ["nginx", "-g", "daemon off;"]
diff --git a/make/photon/notary/server-start.sh b/make/photon/notary/server-start.sh
deleted file mode 100644
index 0e38be19e..000000000
--- a/make/photon/notary/server-start.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt"
diff --git a/make/photon/notary/server.Dockerfile b/make/photon/notary/server.Dockerfile
index 5d60d17f4..4b0172439 100644
--- a/make/photon/notary/server.Dockerfile
+++ b/make/photon/notary/server.Dockerfile
@@ -4,12 +4,12 @@ RUN tdnf install -y shadow sudo \
&& tdnf clean all \
&& groupadd -r -g 10000 notary \
&& useradd --no-log-init -r -g 10000 -u 10000 notary
-
COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
COPY ./make/photon/notary/binary/notary-server /bin/notary-server
COPY ./make/photon/notary/binary/migrate /bin/migrate
COPY ./make/photon/notary/binary/migrations/ /migrations/
-COPY ./make/photon/notary/server-start.sh /bin/server-start.sh
-RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/server-start.sh
+
+RUN chmod +x /bin/notary-server /migrations/migrate.sh /bin/migrate /bin/migrate-patch
ENV SERVICE_NAME=notary_server
-ENTRYPOINT [ "/bin/server-start.sh" ]
+USER notary
+CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-server -config=/etc/notary/server-config.postgres.json -logf=logfmt
\ No newline at end of file
diff --git a/make/photon/notary/signer-start.sh b/make/photon/notary/signer-start.sh
deleted file mode 100644
index 05fc15118..000000000
--- a/make/photon/notary/signer-start.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/sh
-sudo -E -u \#10000 sh -c "migrate-patch -database='${DB_URL}' && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt"
diff --git a/make/photon/notary/signer.Dockerfile b/make/photon/notary/signer.Dockerfile
index b27bd3cd5..95e98bfd8 100644
--- a/make/photon/notary/signer.Dockerfile
+++ b/make/photon/notary/signer.Dockerfile
@@ -8,8 +8,8 @@ COPY ./make/photon/notary/migrate-patch /bin/migrate-patch
COPY ./make/photon/notary/binary/notary-signer /bin/notary-signer
COPY ./make/photon/notary/binary/migrate /bin/migrate
COPY ./make/photon/notary/binary/migrations/ /migrations/
-COPY ./make/photon/notary/signer-start.sh /bin/signer-start.sh
-RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch /bin/signer-start.sh
+RUN chmod +x /bin/notary-signer /migrations/migrate.sh /bin/migrate /bin/migrate-patch
ENV SERVICE_NAME=notary_signer
-ENTRYPOINT [ "/bin/signer-start.sh" ]
+USER notary
+CMD migrate-patch -database=${DB_URL} && /migrations/migrate.sh && /bin/notary-signer -config=/etc/notary/signer-config.postgres.json -logf=logfmt
\ No newline at end of file
diff --git a/make/photon/portal/Dockerfile b/make/photon/portal/Dockerfile
index 6201519da..9f71410f7 100644
--- a/make/photon/portal/Dockerfile
+++ b/make/photon/portal/Dockerfile
@@ -1,39 +1,44 @@
FROM node:10.15.0 as nodeportal
-RUN mkdir -p /portal_src
-RUN mkdir -p /build_dir
-
-COPY make/photon/portal/entrypoint.sh /
COPY src/portal /portal_src
COPY ./docs/swagger.yaml /portal_src
+COPY ./LICENSE /portal_src
-WORKDIR /portal_src
+WORKDIR /build_dir
-RUN npm install && \
- chmod u+x /entrypoint.sh
-RUN /entrypoint.sh
-VOLUME ["/portal_src"]
+RUN cp -r /portal_src/* /build_dir \
+ && ls -la \
+ && apt-get update \
+ && apt-get install -y --no-install-recommends python-yaml=3.12-1 \
+ && python -c 'import sys, yaml, json; y=yaml.load(sys.stdin.read()); print json.dumps(y)' < swagger.yaml > swagger.json \
+ && npm install \
+ && npm run build_lib \
+ && npm run link_lib \
+ && npm run release
FROM photon:2.0
-RUN tdnf install -y nginx >> /dev/null \
- && ln -sf /dev/stdout /var/log/nginx/access.log \
- && ln -sf /dev/stderr /var/log/nginx/error.log \
- && tdnf clean all
-
-EXPOSE 80
-VOLUME /var/cache/nginx /var/log/nginx /run
-
-
COPY --from=nodeportal /build_dir/dist /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.yaml /usr/share/nginx/html
COPY --from=nodeportal /build_dir/swagger.json /usr/share/nginx/html
+COPY --from=nodeportal /build_dir/LICENSE /usr/share/nginx/html
COPY make/photon/portal/nginx.conf /etc/nginx/nginx.conf
+RUN tdnf install -y nginx sudo >> /dev/null \
+ && ln -sf /dev/stdout /var/log/nginx/access.log \
+ && ln -sf /dev/stderr /var/log/nginx/error.log \
+ && groupadd -r -g 10000 nginx && useradd --no-log-init -r -g 10000 -u 10000 nginx \
+ && chown -R nginx:nginx /etc/nginx \
+ && tdnf clean all
+
+EXPOSE 8080
+VOLUME /var/cache/nginx /var/log/nginx /run
+
STOPSIGNAL SIGQUIT
-HEALTHCHECK CMD curl --fail -s http://127.0.0.1 || exit 1
-
+HEALTHCHECK CMD curl --fail -s http://127.0.0.1:8080 || exit 1
+USER nginx
CMD ["nginx", "-g", "daemon off;"]
+
diff --git a/make/photon/portal/entrypoint.sh b/make/photon/portal/entrypoint.sh
deleted file mode 100644
index c00b5e0dc..000000000
--- a/make/photon/portal/entrypoint.sh
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/bin/bash
-set -e
-
-cd /build_dir
-cp -r /portal_src/* .
-ls -la
-
-# Update
-apt-get update
-apt-get install -y ruby
-ruby -ryaml -rjson -e 'puts JSON.pretty_generate(YAML.load(ARGF))' swagger.yaml>swagger.json
-
-cat ./package.json
-npm install
-
-## Build harbor-portal and link it
-npm run build_lib
-npm run link_lib
-
-## Build production
-npm run release
diff --git a/make/photon/portal/nginx.conf b/make/photon/portal/nginx.conf
index b9b631df7..96da5243f 100644
--- a/make/photon/portal/nginx.conf
+++ b/make/photon/portal/nginx.conf
@@ -1,13 +1,21 @@
-worker_processes 1;
+worker_processes auto;
+pid /tmp/nginx.pid;
events {
worker_connections 1024;
}
http {
+
+ client_body_temp_path /tmp/client_body_temp;
+ proxy_temp_path /tmp/proxy_temp;
+ fastcgi_temp_path /tmp/fastcgi_temp;
+ uwsgi_temp_path /tmp/uwsgi_temp;
+ scgi_temp_path /tmp/scgi_temp;
+
server {
- listen 80;
+ listen 8080;
server_name localhost;
root /usr/share/nginx/html;
diff --git a/make/photon/prepare/g.py b/make/photon/prepare/g.py
index bb766f07b..229f61a54 100644
--- a/make/photon/prepare/g.py
+++ b/make/photon/prepare/g.py
@@ -5,11 +5,19 @@ from pathlib import Path
DEFAULT_UID = 10000
DEFAULT_GID = 10000
+PG_UID = 999
+PG_GID = 999
+
+REDIS_UID = 999
+REDIS_GID = 999
+
## Global variable
+host_root_dir = '/hostfs'
+
base_dir = '/harbor_make'
templates_dir = "/usr/src/app/templates"
config_dir = '/config'
-
+data_dir = '/data'
secret_dir = '/secret'
secret_key_dir='/secret/keys'
diff --git a/make/photon/prepare/main.py b/make/photon/prepare/main.py
index 604d2735c..e617baebc 100644
--- a/make/photon/prepare/main.py
+++ b/make/photon/prepare/main.py
@@ -16,6 +16,7 @@ from utils.clair import prepare_clair
from utils.chart import prepare_chartmuseum
from utils.docker_compose import prepare_docker_compose
from utils.nginx import prepare_nginx, nginx_confd_dir
+from utils.redis import prepare_redis
from g import (config_dir, input_config_path, private_key_pem_path, root_crt_path, secret_key_dir,
old_private_key_pem_path, old_crt_path)
@@ -38,6 +39,7 @@ def main(conf, with_notary, with_clair, with_chartmuseum):
prepare_registry_ctl(config_dict)
prepare_db(config_dict)
prepare_job_service(config_dict)
+ prepare_redis(config_dict)
get_secret_key(secret_key_dir)
diff --git a/make/photon/prepare/templates/clair/clair_env.jinja b/make/photon/prepare/templates/clair/clair_env.jinja
index 038f1a130..3825ca8fb 100644
--- a/make/photon/prepare/templates/clair/clair_env.jinja
+++ b/make/photon/prepare/templates/clair/clair_env.jinja
@@ -1,3 +1,3 @@
-http_proxy={{clair_http_proxy}}
-https_proxy={{clair_https_proxy}}
-no_proxy={{clair_no_proxy}}
+HTTP_PROXY={{clair_http_proxy}}
+HTTPS_PROXY={{clair_https_proxy}}
+NO_PROXY={{clair_no_proxy}}
diff --git a/make/photon/prepare/templates/clair/config.yaml.jinja b/make/photon/prepare/templates/clair/config.yaml.jinja
index 00062b917..210df726c 100644
--- a/make/photon/prepare/templates/clair/config.yaml.jinja
+++ b/make/photon/prepare/templates/clair/config.yaml.jinja
@@ -17,9 +17,3 @@ clair:
timeout: 300s
updater:
interval: {{clair_updaters_interval}}h
-
- notifier:
- attempts: 3
- renotifyinterval: 2h
- http:
- endpoint: http://core:8080/service/notifications/clair
diff --git a/make/photon/prepare/templates/core/env.jinja b/make/photon/prepare/templates/core/env.jinja
index 5e2ae21bb..d6413678e 100644
--- a/make/photon/prepare/templates/core/env.jinja
+++ b/make/photon/prepare/templates/core/env.jinja
@@ -15,6 +15,8 @@ POSTGRESQL_USERNAME={{harbor_db_username}}
POSTGRESQL_PASSWORD={{harbor_db_password}}
POSTGRESQL_DATABASE={{harbor_db_name}}
POSTGRESQL_SSLMODE={{harbor_db_sslmode}}
+POSTGRESQL_MAX_IDLE_CONNS={{harbor_db_max_idle_conns}}
+POSTGRESQL_MAX_OPEN_CONNS={{harbor_db_max_open_conns}}
REGISTRY_URL={{registry_url}}
TOKEN_SERVICE_URL={{token_service_url}}
HARBOR_ADMIN_PASSWORD={{harbor_admin_password}}
@@ -31,6 +33,7 @@ CLAIR_DB_USERNAME={{clair_db_username}}
CLAIR_DB={{clair_db_name}}
CLAIR_DB_SSLMODE={{clair_db_sslmode}}
CORE_URL={{core_url}}
+CORE_LOCAL_URL={{core_local_url}}
JOBSERVICE_URL={{jobservice_url}}
CLAIR_URL={{clair_url}}
NOTARY_URL={{notary_url}}
@@ -40,3 +43,7 @@ RELOAD_KEY={{reload_key}}
CHART_REPOSITORY_URL={{chart_repository_url}}
REGISTRY_CONTROLLER_URL={{registry_controller_url}}
WITH_CHARTMUSEUM={{with_chartmuseum}}
+
+HTTP_PROXY={{core_http_proxy}}
+HTTPS_PROXY={{core_https_proxy}}
+NO_PROXY={{core_no_proxy}}
diff --git a/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja b/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja
index 95b63099e..cb6785766 100644
--- a/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja
+++ b/make/photon/prepare/templates/docker_compose/docker-compose.yml.jinja
@@ -14,7 +14,8 @@ services:
- SETUID
volumes:
- {{log_location}}/:/var/log/docker/:z
- - ./common/config/log/:/etc/logrotate.d/:z
+ - ./common/config/log/logrotate.conf:/etc/logrotate.d/logrotate.conf:z
+ - ./common/config/log/rsyslog_docker.conf:/etc/rsyslog.d/rsyslog_docker.conf:z
ports:
- 127.0.0.1:1514:10514
networks:
@@ -275,12 +276,7 @@ services:
volumes:
- ./common/config/nginx:/etc/nginx:z
{% if protocol == 'https' %}
- - type: bind
- source: {{cert_key_path}}
- target: /etc/cert/server.key
- - type: bind
- source: {{cert_path}}
- target: /etc/cert/server.crt
+ - {{data_volume}}/secret/cert:/etc/cert:z
{% endif %}
networks:
- harbor
@@ -289,9 +285,9 @@ services:
{% endif %}
dns_search: .
ports:
- - {{http_port}}:80
+ - {{http_port}}:8080
{% if protocol == 'https' %}
- - {{https_port}}:443
+ - {{https_port}}:8443
{% endif %}
{% if with_notary %}
- 4443:4443
@@ -419,7 +415,7 @@ services:
{% if gcs_keyfile %}
- type: bind
source: {{gcs_keyfile}}
- target: /etc/registry/gcs.key
+ target: /etc/chartserver/gcs.key
{% endif %}
{%if registry_custom_ca_bundle_path %}
- type: bind
diff --git a/make/photon/prepare/templates/jobservice/env.jinja b/make/photon/prepare/templates/jobservice/env.jinja
index 2f4923248..c38534f02 100644
--- a/make/photon/prepare/templates/jobservice/env.jinja
+++ b/make/photon/prepare/templates/jobservice/env.jinja
@@ -1,3 +1,8 @@
CORE_SECRET={{core_secret}}
JOBSERVICE_SECRET={{jobservice_secret}}
CORE_URL={{core_url}}
+JOBSERVICE_WEBHOOK_JOB_MAX_RETRY={{notification_webhook_job_max_retry}}
+
+HTTP_PROXY={{jobservice_http_proxy}}
+HTTPS_PROXY={{jobservice_https_proxy}}
+NO_PROXY={{jobservice_no_proxy}}
diff --git a/make/photon/prepare/templates/log/rsyslog_docker.conf.jinja b/make/photon/prepare/templates/log/rsyslog_docker.conf.jinja
new file mode 100644
index 000000000..9071237fd
--- /dev/null
+++ b/make/photon/prepare/templates/log/rsyslog_docker.conf.jinja
@@ -0,0 +1,11 @@
+# Rsyslog configuration file for docker.
+
+template(name="DynaFile" type="string" string="/var/log/docker/%programname%.log")
+
+if $programname != "rsyslogd" then {
+{%if log_external %}
+ action(type="omfwd" Target="{{log_ep_host}}" Port="{{log_ep_port}}" Protocol="{{log_ep_protocol}}" Template="RSYSLOG_SyslogProtocol23Format")
+{% else %}
+ action(type="omfile" dynaFile="DynaFile")
+{% endif %}
+}
\ No newline at end of file
diff --git a/make/photon/prepare/templates/nginx/nginx.http.conf.jinja b/make/photon/prepare/templates/nginx/nginx.http.conf.jinja
index 0f7f5107e..09e1f4346 100644
--- a/make/photon/prepare/templates/nginx/nginx.http.conf.jinja
+++ b/make/photon/prepare/templates/nginx/nginx.http.conf.jinja
@@ -1,4 +1,5 @@
worker_processes auto;
+pid /tmp/nginx.pid;
events {
worker_connections 1024;
@@ -7,6 +8,11 @@ events {
}
http {
+ client_body_temp_path /tmp/client_body_temp;
+ proxy_temp_path /tmp/proxy_temp;
+ fastcgi_temp_path /tmp/fastcgi_temp;
+ uwsgi_temp_path /tmp/uwsgi_temp;
+ scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
# this is necessary for us to be able to disable request buffering in all cases
@@ -17,7 +23,7 @@ http {
}
upstream portal {
- server portal:80;
+ server portal:8080;
}
log_format timed_combined '$remote_addr - '
@@ -28,7 +34,7 @@ http {
access_log /dev/stdout timed_combined;
server {
- listen 80;
+ listen 8080;
server_tokens off;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
@@ -117,7 +123,7 @@ http {
proxy_request_buffering off;
}
- location /service/notifications {
+ location /service/notifications {
return 404;
}
}
diff --git a/make/photon/prepare/templates/nginx/nginx.https.conf.jinja b/make/photon/prepare/templates/nginx/nginx.https.conf.jinja
index 1ae2a9754..e4ac93078 100644
--- a/make/photon/prepare/templates/nginx/nginx.https.conf.jinja
+++ b/make/photon/prepare/templates/nginx/nginx.https.conf.jinja
@@ -1,4 +1,5 @@
worker_processes auto;
+pid /tmp/nginx.pid;
events {
worker_connections 1024;
@@ -7,6 +8,11 @@ events {
}
http {
+ client_body_temp_path /tmp/client_body_temp;
+ proxy_temp_path /tmp/proxy_temp;
+ fastcgi_temp_path /tmp/fastcgi_temp;
+ uwsgi_temp_path /tmp/uwsgi_temp;
+ scgi_temp_path /tmp/scgi_temp;
tcp_nodelay on;
include /etc/nginx/conf.d/*.upstream.conf;
@@ -18,7 +24,7 @@ http {
}
upstream portal {
- server portal:80;
+ server portal:8080;
}
log_format timed_combined '$remote_addr - '
@@ -31,7 +37,7 @@ http {
include /etc/nginx/conf.d/*.server.conf;
server {
- listen 443 ssl;
+ listen 8443 ssl;
# server_name harbordomain.com;
server_tokens off;
# SSL
@@ -136,13 +142,13 @@ http {
proxy_buffering off;
proxy_request_buffering off;
}
-
- location /service/notifications {
+
+ location /service/notifications {
return 404;
}
}
- server {
- listen 80;
+ server {
+ listen 8080;
#server_name harbordomain.com;
return 308 https://$host$request_uri;
}
diff --git a/make/photon/prepare/utils/clair.py b/make/photon/prepare/utils/clair.py
index 72db85038..8d8680249 100644
--- a/make/photon/prepare/utils/clair.py
+++ b/make/photon/prepare/utils/clair.py
@@ -2,12 +2,12 @@ import os, shutil
from g import templates_dir, config_dir, DEFAULT_UID, DEFAULT_GID
from .jinja import render_jinja
-from .misc import prepare_config_dir
+from .misc import prepare_dir
clair_template_dir = os.path.join(templates_dir, "clair")
def prepare_clair(config_dict):
- clair_config_dir = prepare_config_dir(config_dir, "clair")
+ clair_config_dir = prepare_dir(config_dir, "clair")
if os.path.exists(os.path.join(clair_config_dir, "postgresql-init.d")):
print("Copying offline data file for clair DB")
diff --git a/make/photon/prepare/utils/configs.py b/make/photon/prepare/utils/configs.py
index aaf2747db..df14a53de 100644
--- a/make/photon/prepare/utils/configs.py
+++ b/make/photon/prepare/utils/configs.py
@@ -13,6 +13,14 @@ def validate(conf, **kwargs):
if not conf.get("cert_key_path"):
raise Exception("Error: The protocol is https but attribute ssl_cert_key is not set")
+ # log endpoint validate
+ if ('log_ep_host' in conf) and not conf['log_ep_host']:
+ raise Exception('Error: must set log endpoint host to enable external host')
+ if ('log_ep_port' in conf) and not conf['log_ep_port']:
+ raise Exception('Error: must set log endpoint port to enable external host')
+ if ('log_ep_protocol' in conf) and (conf['log_ep_protocol'] not in ['udp', 'tcp']):
+ raise Exception("Protocol in external log endpoint must be one of 'udp' or 'tcp' ")
+
# Storage validate
valid_storage_drivers = ["filesystem", "azure", "gcs", "s3", "swift", "oss"]
storage_provider_name = conf.get("storage_provider_name")
@@ -59,6 +67,7 @@ def parse_yaml_config(config_file_path):
'registry_url': "http://registry:5000",
'registry_controller_url': "http://registryctl:8080",
'core_url': "http://core:8080",
+ 'core_local_url': "http://127.0.0.1:8080",
'token_service_url': "http://core:8080/service/token",
'jobservice_url': 'http://jobservice:8080',
'clair_url': 'http://clair:6060',
@@ -103,6 +112,11 @@ def parse_yaml_config(config_file_path):
config_dict['harbor_db_username'] = 'postgres'
config_dict['harbor_db_password'] = db_configs.get("password") or ''
config_dict['harbor_db_sslmode'] = 'disable'
+
+ default_max_idle_conns = 2 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxIdleConns
+ default_max_open_conns = 0 # NOTE: https://golang.org/pkg/database/sql/#DB.SetMaxOpenConns
+ config_dict['harbor_db_max_idle_conns'] = db_configs.get("max_idle_conns") or default_max_idle_conns
+ config_dict['harbor_db_max_open_conns'] = db_configs.get("max_open_conns") or default_max_open_conns
# clari db
config_dict['clair_db_host'] = 'postgresql'
config_dict['clair_db_port'] = 5432
@@ -162,13 +176,18 @@ def parse_yaml_config(config_file_path):
if storage_config.get('redirect'):
config_dict['storage_redirect_disabled'] = storage_config['redirect']['disabled']
+ # Global proxy configs
+ proxy_config = configs.get('proxy') or {}
+ proxy_components = proxy_config.get('components') or []
+ for proxy_component in proxy_components:
+ config_dict[proxy_component + '_http_proxy'] = proxy_config.get('http_proxy') or ''
+ config_dict[proxy_component + '_https_proxy'] = proxy_config.get('https_proxy') or ''
+ config_dict[proxy_component + '_no_proxy'] = proxy_config.get('no_proxy') or '127.0.0.1,localhost,core,registry'
+
# Clair configs, optional
clair_configs = configs.get("clair") or {}
config_dict['clair_db'] = 'postgres'
config_dict['clair_updaters_interval'] = clair_configs.get("updaters_interval") or 12
- config_dict['clair_http_proxy'] = clair_configs.get('http_proxy') or ''
- config_dict['clair_https_proxy'] = clair_configs.get('https_proxy') or ''
- config_dict['clair_no_proxy'] = clair_configs.get('no_proxy') or '127.0.0.1,localhost,core,registry'
# Chart configs
chart_configs = configs.get("chart") or {}
@@ -179,18 +198,34 @@ def parse_yaml_config(config_file_path):
config_dict['max_job_workers'] = js_config["max_job_workers"]
config_dict['jobservice_secret'] = generate_random_string(16)
+ # notification config
+ notification_config = configs.get('notification') or {}
+ config_dict['notification_webhook_job_max_retry'] = notification_config["webhook_job_max_retry"]
# Log configs
allowed_levels = ['debug', 'info', 'warning', 'error', 'fatal']
log_configs = configs.get('log') or {}
- config_dict['log_location'] = log_configs["location"]
- config_dict['log_rotate_count'] = log_configs["rotate_count"]
- config_dict['log_rotate_size'] = log_configs["rotate_size"]
+
log_level = log_configs['level']
if log_level not in allowed_levels:
raise Exception('log level must be one of debug, info, warning, error, fatal')
config_dict['log_level'] = log_level.lower()
+ # parse local log related configs
+ local_logs = log_configs.get('local') or {}
+ if local_logs:
+ config_dict['log_location'] = local_logs.get('location') or '/var/log/harbor'
+ config_dict['log_rotate_count'] = local_logs.get('rotate_count') or 50
+ config_dict['log_rotate_size'] = local_logs.get('rotate_size') or '200M'
+
+ # parse external log endpoint related configs
+ if log_configs.get('external_endpoint'):
+ config_dict['log_external'] = True
+ config_dict['log_ep_protocol'] = log_configs['external_endpoint']['protocol']
+ config_dict['log_ep_host'] = log_configs['external_endpoint']['host']
+ config_dict['log_ep_port'] = log_configs['external_endpoint']['port']
+ else:
+ config_dict['log_external'] = False
# external DB, optional, if external_db enabled, it will cover the database config
external_db_configs = configs.get('external_database') or {}
@@ -202,7 +237,7 @@ def parse_yaml_config(config_file_path):
config_dict['harbor_db_username'] = external_db_configs['harbor']['username']
config_dict['harbor_db_password'] = external_db_configs['harbor']['password']
config_dict['harbor_db_sslmode'] = external_db_configs['harbor']['ssl_mode']
- # clari db
+ # clair db
config_dict['clair_db_host'] = external_db_configs['clair']['host']
config_dict['clair_db_port'] = external_db_configs['clair']['port']
config_dict['clair_db_name'] = external_db_configs['clair']['db_name']
@@ -261,4 +296,4 @@ def parse_yaml_config(config_file_path):
# UAA configs
config_dict['uaa'] = configs.get('uaa') or {}
- return config_dict
\ No newline at end of file
+ return config_dict
diff --git a/make/photon/prepare/utils/core.py b/make/photon/prepare/utils/core.py
index 9f062efb8..5da6fc6fa 100644
--- a/make/photon/prepare/utils/core.py
+++ b/make/photon/prepare/utils/core.py
@@ -1,7 +1,7 @@
import shutil, os
from g import config_dir, templates_dir
-from utils.misc import prepare_config_dir, generate_random_string
+from utils.misc import prepare_dir, generate_random_string
from utils.jinja import render_jinja
core_config_dir = os.path.join(config_dir, "core", "certificates")
@@ -33,7 +33,7 @@ def prepare_core(config_dict, with_notary, with_clair, with_chartmuseum):
copy_core_config(core_conf_template_path, core_conf)
def prepare_core_config_dir():
- prepare_config_dir(core_config_dir)
+ prepare_dir(core_config_dir)
def copy_core_config(core_templates_path, core_config_path):
shutil.copyfile(core_templates_path, core_config_path)
diff --git a/make/photon/prepare/utils/db.py b/make/photon/prepare/utils/db.py
index 53ef3d93e..30b7e050d 100644
--- a/make/photon/prepare/utils/db.py
+++ b/make/photon/prepare/utils/db.py
@@ -1,20 +1,18 @@
import os
-from g import config_dir, templates_dir
-from utils.misc import prepare_config_dir
+from g import config_dir, templates_dir, data_dir, PG_UID, PG_GID
+from utils.misc import prepare_dir
from utils.jinja import render_jinja
db_config_dir = os.path.join(config_dir, "db")
db_env_template_path = os.path.join(templates_dir, "db", "env.jinja")
db_conf_env = os.path.join(config_dir, "db", "env")
+database_data_path = os.path.join(data_dir, 'database')
def prepare_db(config_dict):
- prepare_db_config_dir()
-
+ prepare_dir(database_data_path, uid=PG_UID, gid=PG_GID)
+ prepare_dir(db_config_dir)
render_jinja(
db_env_template_path,
db_conf_env,
harbor_db_password=config_dict['harbor_db_password'])
-
-def prepare_db_config_dir():
- prepare_config_dir(db_config_dir)
\ No newline at end of file
diff --git a/make/photon/prepare/utils/docker_compose.py b/make/photon/prepare/utils/docker_compose.py
index cf129c2a2..648d6b979 100644
--- a/make/photon/prepare/utils/docker_compose.py
+++ b/make/photon/prepare/utils/docker_compose.py
@@ -13,8 +13,8 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
VERSION_TAG = versions.get('VERSION_TAG') or 'dev'
REGISTRY_VERSION = versions.get('REGISTRY_VERSION') or 'v2.7.1'
NOTARY_VERSION = versions.get('NOTARY_VERSION') or 'v0.6.1'
- CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.7'
- CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.8.1'
+ CLAIR_VERSION = versions.get('CLAIR_VERSION') or 'v2.0.9'
+ CHARTMUSEUM_VERSION = versions.get('CHARTMUSEUM_VERSION') or 'v0.9.0'
rendering_variables = {
'version': VERSION_TAG,
@@ -33,17 +33,25 @@ def prepare_docker_compose(configs, with_clair, with_notary, with_chartmuseum):
'with_chartmuseum': with_chartmuseum
}
+ # for gcs
storage_config = configs.get('storage_provider_config') or {}
if storage_config.get('keyfile') and configs['storage_provider_name'] == 'gcs':
rendering_variables['gcs_keyfile'] = storage_config['keyfile']
+ # for http
if configs['protocol'] == 'https':
rendering_variables['cert_key_path'] = configs['cert_key_path']
rendering_variables['cert_path'] = configs['cert_path']
rendering_variables['https_port'] = configs['https_port']
+ # for uaa
uaa_config = configs.get('uaa') or {}
if uaa_config.get('ca_file'):
rendering_variables['uaa_ca_file'] = uaa_config['ca_file']
+ # for log
+ log_ep_host = configs.get('log_ep_host')
+ if log_ep_host:
+ rendering_variables['external_log_endpoint'] = True
+
render_jinja(docker_compose_template_path, docker_compose_yml_path, **rendering_variables)
\ No newline at end of file
diff --git a/make/photon/prepare/utils/jobservice.py b/make/photon/prepare/utils/jobservice.py
index 08aac9441..1b10900ee 100644
--- a/make/photon/prepare/utils/jobservice.py
+++ b/make/photon/prepare/utils/jobservice.py
@@ -1,7 +1,7 @@
import os
from g import config_dir, DEFAULT_GID, DEFAULT_UID, templates_dir
-from utils.misc import prepare_config_dir
+from utils.misc import prepare_dir
from utils.jinja import render_jinja
job_config_dir = os.path.join(config_dir, "jobservice")
@@ -10,15 +10,14 @@ job_service_conf_env = os.path.join(config_dir, "jobservice", "env")
job_service_conf_template_path = os.path.join(templates_dir, "jobservice", "config.yml.jinja")
jobservice_conf = os.path.join(config_dir, "jobservice", "config.yml")
-
def prepare_job_service(config_dict):
- prepare_config_dir(job_config_dir)
+ prepare_dir(job_config_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
log_level = config_dict['log_level'].upper()
# Job log is stored in data dir
job_log_dir = os.path.join('/data', "job_logs")
- prepare_config_dir(job_log_dir)
+ prepare_dir(job_log_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
# Render Jobservice env
render_jinja(
job_service_env_template_path,
@@ -33,4 +32,4 @@ def prepare_job_service(config_dict):
gid=DEFAULT_GID,
max_job_workers=config_dict['max_job_workers'],
redis_url=config_dict['redis_url_js'],
- level=log_level)
\ No newline at end of file
+ level=log_level)
diff --git a/make/photon/prepare/utils/log.py b/make/photon/prepare/utils/log.py
index d5fd52e20..a8a2a1d20 100644
--- a/make/photon/prepare/utils/log.py
+++ b/make/photon/prepare/utils/log.py
@@ -1,15 +1,21 @@
import os
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
-from utils.misc import prepare_config_dir
+from utils.misc import prepare_dir
from utils.jinja import render_jinja
log_config_dir = os.path.join(config_dir, "log")
+
+# logrotate config file
logrotate_template_path = os.path.join(templates_dir, "log", "logrotate.conf.jinja")
log_rotate_config = os.path.join(config_dir, "log", "logrotate.conf")
+# syslog docker config file
+log_syslog_docker_template_path = os.path.join(templates_dir, 'log', 'rsyslog_docker.conf.jinja')
+log_syslog_docker_config = os.path.join(config_dir, 'log', 'rsyslog_docker.conf')
+
def prepare_log_configs(config_dict):
- prepare_config_dir(log_config_dir)
+ prepare_dir(log_config_dir)
# Render Log config
render_jinja(
@@ -17,4 +23,13 @@ def prepare_log_configs(config_dict):
log_rotate_config,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
- **config_dict)
\ No newline at end of file
+ **config_dict)
+
+ # Render syslog docker config
+ render_jinja(
+ log_syslog_docker_template_path,
+ log_syslog_docker_config,
+ uid=DEFAULT_UID,
+ gid=DEFAULT_GID,
+ **config_dict
+ )
\ No newline at end of file
diff --git a/make/photon/prepare/utils/misc.py b/make/photon/prepare/utils/misc.py
index fe6bcc7f8..e7b62faff 100644
--- a/make/photon/prepare/utils/misc.py
+++ b/make/photon/prepare/utils/misc.py
@@ -3,7 +3,7 @@ import string
import random
from g import DEFAULT_UID, DEFAULT_GID
-
+from pathlib import Path
# To meet security requirement
# By default it will change file mode to 0600, and make the owner of the file to 10000:10000
@@ -84,6 +84,26 @@ def prepare_config_dir(root, *name):
os.makedirs(absolute_path)
return absolute_path
+def prepare_dir(root: str, *args, **kwargs) -> str:
+ gid, uid = kwargs.get('gid'), kwargs.get('uid')
+ absolute_path = Path(os.path.join(root, *args))
+ if absolute_path.is_file():
+ raise Exception('Path exists and the type is regular file')
+ mode = kwargs.get('mode') or 0o755
+ absolute_path.mkdir(mode, parents=True, exist_ok=True)
+
+ # if uid or gid not None, then change the ownership of this dir
+ if not(gid is None and uid is None):
+ dir_uid, dir_gid = absolute_path.stat().st_uid, absolute_path.stat().st_gid
+ if uid is None:
+ uid = dir_uid
+ if gid is None:
+ gid = dir_gid
+ os.chown(absolute_path, uid, gid)
+
+ return str(absolute_path)
+
+
def delfile(src):
if os.path.isfile(src):
diff --git a/make/photon/prepare/utils/nginx.py b/make/photon/prepare/utils/nginx.py
index a8706349a..0d1117448 100644
--- a/make/photon/prepare/utils/nginx.py
+++ b/make/photon/prepare/utils/nginx.py
@@ -2,11 +2,13 @@ import os, shutil
from fnmatch import fnmatch
from pathlib import Path
-from g import config_dir, templates_dir
-from utils.misc import prepare_config_dir, mark_file
+from g import config_dir, templates_dir, host_root_dir, DEFAULT_GID, DEFAULT_UID, data_dir
+from utils.misc import prepare_dir, mark_file
from utils.jinja import render_jinja
from utils.cert import SSL_CERT_KEY_PATH, SSL_CERT_PATH
+host_ngx_real_cert_dir = Path(os.path.join(data_dir, 'secret', 'cert'))
+
nginx_conf = os.path.join(config_dir, "nginx", "nginx.conf")
nginx_confd_dir = os.path.join(config_dir, "nginx", "conf.d")
nginx_https_conf_template = os.path.join(templates_dir, "nginx", "nginx.https.conf.jinja")
@@ -17,44 +19,76 @@ CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS = 'harbor.https.*.conf'
CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP = 'harbor.http.*.conf'
def prepare_nginx(config_dict):
- prepare_config_dir(nginx_confd_dir)
+ prepare_dir(nginx_confd_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
render_nginx_template(config_dict)
+
+def prepare_nginx_certs(cert_key_path, cert_path):
+ """
+ Prepare the certs file with proper ownership
+ 1. Remove nginx cert files in secret dir
+ 2. Copy cert files on host filesystem to secret dir
+ 3. Change the permission to 644 and ownership to 10000:10000
+ """
+ host_ngx_cert_key_path = Path(os.path.join(host_root_dir, cert_key_path.lstrip('/')))
+ host_ngx_cert_path = Path(os.path.join(host_root_dir, cert_path.lstrip('/')))
+
+ if host_ngx_real_cert_dir.exists() and host_ngx_real_cert_dir.is_dir():
+ shutil.rmtree(host_ngx_real_cert_dir)
+
+ os.makedirs(host_ngx_real_cert_dir, mode=0o755)
+ real_key_path = os.path.join(host_ngx_real_cert_dir, 'server.key')
+ real_crt_path = os.path.join(host_ngx_real_cert_dir, 'server.crt')
+ shutil.copy2(host_ngx_cert_key_path, real_key_path)
+ shutil.copy2(host_ngx_cert_path, real_crt_path)
+
+ os.chown(host_ngx_real_cert_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
+ mark_file(real_key_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
+ mark_file(real_crt_path, uid=DEFAULT_UID, gid=DEFAULT_GID)
+
+
def render_nginx_template(config_dict):
- if config_dict['protocol'] == "https":
- render_jinja(nginx_https_conf_template, nginx_conf,
+ """
+ 1. render nginx config file through protocol
+ 2. copy additional configs to cert.d dir
+ """
+ if config_dict['protocol'] == 'https':
+ prepare_nginx_certs(config_dict['cert_key_path'], config_dict['cert_path'])
+ render_jinja(
+ nginx_https_conf_template,
+ nginx_conf,
+ uid=DEFAULT_UID,
+ gid=DEFAULT_GID,
ssl_cert=SSL_CERT_PATH,
ssl_cert_key=SSL_CERT_KEY_PATH)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTPS
- cert_dir = Path(os.path.join(config_dir, 'cert'))
- ssl_key_path = Path(os.path.join(cert_dir, 'server.key'))
- ssl_crt_path = Path(os.path.join(cert_dir, 'server.crt'))
- cert_dir.mkdir(parents=True, exist_ok=True)
- ssl_key_path.touch()
- ssl_crt_path.touch()
+
else:
render_jinja(
nginx_http_conf_template,
- nginx_conf)
+ nginx_conf,
+ uid=DEFAULT_UID,
+ gid=DEFAULT_GID)
location_file_pattern = CUSTOM_NGINX_LOCATION_FILE_PATTERN_HTTP
copy_nginx_location_configs_if_exist(nginx_template_ext_dir, nginx_confd_dir, location_file_pattern)
-def add_additional_location_config(src, dst):
- """
- These conf files is used for user that wanna add additional customized locations to harbor proxy
- :params src: source of the file
- :params dst: destination file path
- """
- if not os.path.isfile(src):
- return
- print("Copying nginx configuration file {src} to {dst}".format(
- src=src, dst=dst))
- shutil.copy2(src, dst)
- mark_file(dst, mode=0o644)
def copy_nginx_location_configs_if_exist(src_config_dir, dst_config_dir, filename_pattern):
if not os.path.exists(src_config_dir):
return
+
+ def add_additional_location_config(src, dst):
+ """
+ These conf files is used for user that wanna add additional customized locations to harbor proxy
+ :params src: source of the file
+ :params dst: destination file path
+ """
+ if not os.path.isfile(src):
+ return
+ print("Copying nginx configuration file {src} to {dst}".format(src=src, dst=dst))
+ shutil.copy2(src, dst)
+ mark_file(dst, mode=0o644)
+
map(lambda filename: add_additional_location_config(
os.path.join(src_config_dir, filename),
os.path.join(dst_config_dir, filename)),
diff --git a/make/photon/prepare/utils/notary.py b/make/photon/prepare/utils/notary.py
index 8d1d1175e..2e571a462 100644
--- a/make/photon/prepare/utils/notary.py
+++ b/make/photon/prepare/utils/notary.py
@@ -2,7 +2,7 @@ import os, shutil, pathlib
from g import templates_dir, config_dir, root_crt_path, secret_key_dir,DEFAULT_UID, DEFAULT_GID
from .cert import openssl_installed, create_cert, create_root_cert, get_alias
from .jinja import render_jinja
-from .misc import mark_file, prepare_config_dir
+from .misc import mark_file, prepare_dir
notary_template_dir = os.path.join(templates_dir, "notary")
notary_signer_pg_template = os.path.join(notary_template_dir, "signer-config.postgres.json.jinja")
@@ -20,12 +20,12 @@ notary_server_env_path = os.path.join(notary_config_dir, "server_env")
def prepare_env_notary(nginx_config_dir):
- notary_config_dir = prepare_config_dir(config_dir, "notary")
+ notary_config_dir = prepare_dir(config_dir, "notary")
old_signer_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.crt'))
old_signer_key_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer.key'))
old_signer_ca_cert_secret_path = pathlib.Path(os.path.join(config_dir, 'notary-signer-ca.crt'))
- notary_secret_dir = prepare_config_dir('/secret/notary')
+ notary_secret_dir = prepare_dir('/secret/notary')
signer_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.crt'))
signer_key_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer.key'))
signer_ca_cert_secret_path = pathlib.Path(os.path.join(notary_secret_dir, 'notary-signer-ca.crt'))
@@ -72,9 +72,12 @@ def prepare_env_notary(nginx_config_dir):
print("Copying nginx configuration file for notary")
- shutil.copy2(
+
+ render_jinja(
os.path.join(templates_dir, "nginx", "notary.upstream.conf.jinja"),
- os.path.join(nginx_config_dir, "notary.upstream.conf"))
+ os.path.join(nginx_config_dir, "notary.upstream.conf"),
+ gid=DEFAULT_GID,
+ uid=DEFAULT_UID)
mark_file(os.path.join(notary_secret_dir, "notary-signer.crt"))
mark_file(os.path.join(notary_secret_dir, "notary-signer.key"))
@@ -88,6 +91,8 @@ def prepare_notary(config_dict, nginx_config_dir, ssl_cert_path, ssl_cert_key_pa
render_jinja(
notary_server_nginx_config_template,
os.path.join(nginx_config_dir, "notary.server.conf"),
+ gid=DEFAULT_GID,
+ uid=DEFAULT_UID,
ssl_cert=ssl_cert_path,
ssl_cert_key=ssl_cert_key_path)
diff --git a/make/photon/prepare/utils/redis.py b/make/photon/prepare/utils/redis.py
new file mode 100644
index 000000000..751a2475a
--- /dev/null
+++ b/make/photon/prepare/utils/redis.py
@@ -0,0 +1,9 @@
+import os
+
+from g import data_dir, REDIS_UID, REDIS_GID
+from utils.misc import prepare_dir
+
+redis_data_path = os.path.join(data_dir, 'redis')
+
+def prepare_redis(config_dict):
+ prepare_dir(redis_data_path, uid=REDIS_UID, gid=REDIS_GID)
diff --git a/make/photon/prepare/utils/registry.py b/make/photon/prepare/utils/registry.py
index e07a91bdb..2a3512d9b 100644
--- a/make/photon/prepare/utils/registry.py
+++ b/make/photon/prepare/utils/registry.py
@@ -1,7 +1,7 @@
import os, copy
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID
-from utils.misc import prepare_config_dir
+from utils.misc import prepare_dir
from utils.jinja import render_jinja
@@ -9,9 +9,16 @@ registry_config_dir = os.path.join(config_dir, "registry")
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
registry_conf = os.path.join(config_dir, "registry", "config.yml")
+levels_map = {
+ 'debug': 'debug',
+ 'info': 'info',
+ 'warning': 'warn',
+ 'error': 'error',
+ 'fatal': 'fatal'
+}
def prepare_registry(config_dict):
- prepare_config_dir(registry_config_dir)
+ prepare_dir(registry_config_dir)
storage_provider_info = get_storage_provider_info(
config_dict['storage_provider_name'],
@@ -22,6 +29,7 @@ def prepare_registry(config_dict):
registry_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
+ level=levels_map[config_dict['log_level']],
storage_provider_info=storage_provider_info,
**config_dict)
diff --git a/make/photon/prepare/utils/registry_ctl.py b/make/photon/prepare/utils/registry_ctl.py
index b3fc936f6..8ffe68104 100644
--- a/make/photon/prepare/utils/registry_ctl.py
+++ b/make/photon/prepare/utils/registry_ctl.py
@@ -1,7 +1,7 @@
import os, shutil
from g import config_dir, templates_dir
-from utils.misc import prepare_config_dir
+from utils.misc import prepare_dir
from utils.jinja import render_jinja
registryctl_config_dir = os.path.join(config_dir, "registryctl")
@@ -24,7 +24,7 @@ def prepare_registry_ctl(config_dict):
copy_registry_ctl_conf(registryctl_config_template_path, registryctl_conf)
def prepare_registry_ctl_config_dir():
- prepare_config_dir(registryctl_config_dir)
+ prepare_dir(registryctl_config_dir)
def copy_registry_ctl_conf(src, dst):
shutil.copyfile(src, dst)
\ No newline at end of file
diff --git a/make/photon/redis/Dockerfile b/make/photon/redis/Dockerfile
index efac15505..a90873b4b 100644
--- a/make/photon/redis/Dockerfile
+++ b/make/photon/redis/Dockerfile
@@ -4,11 +4,12 @@ RUN tdnf install -y redis sudo
VOLUME /var/lib/redis
WORKDIR /var/lib/redis
-COPY ./make/photon/redis/docker-entrypoint.sh /usr/bin/
+COPY ./make/photon/redis/docker-healthcheck /usr/bin/
COPY ./make/photon/redis/redis.conf /etc/redis.conf
-RUN chmod +x /usr/bin/docker-entrypoint.sh \
+RUN chmod +x /usr/bin/docker-healthcheck \
&& chown redis:redis /etc/redis.conf
-ENTRYPOINT ["docker-entrypoint.sh"]
+HEALTHCHECK CMD ["docker-healthcheck"]
+USER redis
EXPOSE 6379
CMD ["redis-server", "/etc/redis.conf"]
diff --git a/make/photon/redis/docker-entrypoint.sh b/make/photon/redis/docker-entrypoint.sh
deleted file mode 100644
index 5f19ac33d..000000000
--- a/make/photon/redis/docker-entrypoint.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-set -e
-
-if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
- set -- redis-server "$@"
-fi
-
-if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
- chown -R redis .
- exec sudo -u redis "$@"
-fi
-
-exec "$@"
diff --git a/make/photon/redis/docker-healthcheck b/make/photon/redis/docker-healthcheck
new file mode 100644
index 000000000..80f5cc480
--- /dev/null
+++ b/make/photon/redis/docker-healthcheck
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -eo pipefail
+
+if ping="$(redis-cli -h "127.0.0.1" ping)" && [ "$ping" = 'PONG' ]; then
+ exit 0
+fi
+
+exit 1
\ No newline at end of file
diff --git a/make/prepare b/make/prepare
index 8e6371f96..c628f46a3 100755
--- a/make/prepare
+++ b/make/prepare
@@ -1,8 +1,8 @@
#!/bin/bash
set +e
-# If compling source code this dir is harbor's make dir
-# If install harbor via pacakge, this dir is harbor's root dir
+# If compiling source code this dir is harbor's make dir.
+# If installing harbor via pacakge, this dir is harbor's root dir.
if [[ -n "$HARBOR_BUNDLE_DIR" ]]; then
harbor_prepare_path=$HARBOR_BUNDLE_DIR
else
@@ -35,7 +35,7 @@ set -e
# Copy harbor.yml to input dir
if [[ ! "$1" =~ ^\-\- ]] && [ -f "$1" ]
then
- cp $1 $input_dir/harbor.yml
+ cp $1 $input_dir/harbor.yml
else
cp ${harbor_prepare_path}/harbor.yml $input_dir/harbor.yml
fi
@@ -45,10 +45,12 @@ secret_dir=${data_path}/secret
config_dir=$harbor_prepare_path/common/config
# Run prepare script
-docker run --rm -v $input_dir:/input \
- -v $harbor_prepare_path:/compose_location \
- -v $config_dir:/config \
- -v $secret_dir:/secret \
+docker run --rm -v $input_dir:/input:z \
+ -v $data_path:/data:z \
+ -v $harbor_prepare_path:/compose_location:z \
+ -v $config_dir:/config:z \
+ -v $secret_dir:/secret:z \
+ -v /:/hostfs:z \
goharbor/prepare:dev $@
echo "Clean up the input dir"
diff --git a/src/Gopkg.lock b/src/Gopkg.lock
deleted file mode 100644
index 815277ae6..000000000
--- a/src/Gopkg.lock
+++ /dev/null
@@ -1,802 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- digest = "1:b16fbfbcc20645cb419f78325bb2e85ec729b338e996a228124d68931a6f2a37"
- name = "github.com/BurntSushi/toml"
- packages = ["."]
- pruneopts = "UT"
- revision = "b26d9c308763d68093482582cea63d69be07a0f0"
- version = "v0.3.0"
-
-[[projects]]
- digest = "1:5d3e23515e7916c152cc665eda0f7eaf6fdf8fdfe7c3dbac97049bcbd649b33f"
- name = "github.com/Knetic/govaluate"
- packages = ["."]
- pruneopts = "UT"
- revision = "d216395917cc49052c7c7094cf57f09657ca08a8"
- version = "v3.0.0"
-
-[[projects]]
- digest = "1:55388fd080150b9a072912f97b1f5891eb0b50df43401f8b75fb4273d3fec9fc"
- name = "github.com/Masterminds/semver"
- packages = ["."]
- pruneopts = "UT"
- revision = "c7af12943936e8c39859482e61f0574c2fd7fc75"
- version = "v1.4.2"
-
-[[projects]]
- digest = "1:e8078e5f9d84e87745efb3c0961e78045500cda10d7102fdf839fbac4b49a423"
- name = "github.com/Unknwon/goconfig"
- packages = ["."]
- pruneopts = "UT"
- revision = "5f601ca6ef4d5cea8d52be2f8b3a420ee4b574a5"
-
-[[projects]]
- branch = "master"
- digest = "1:47ea4fbe2ab4aeb9808502c51e657041c2e49b36b83fc1c1a349135cdf16342f"
- name = "github.com/agl/ed25519"
- packages = [
- ".",
- "edwards25519",
- ]
- pruneopts = "UT"
- revision = "5312a61534124124185d41f09206b9fef1d88403"
-
-[[projects]]
- digest = "1:d2dbd0b0ec5373e89b27d0dd9f59793aa47020a05805b4b75c63aa1b2406781b"
- name = "github.com/astaxie/beego"
- packages = [
- ".",
- "cache",
- "cache/redis",
- "config",
- "context",
- "context/param",
- "grace",
- "logs",
- "orm",
- "session",
- "session/redis",
- "toolbox",
- "utils",
- "validation",
- ]
- pruneopts = "UT"
- revision = "d96289a81bf67728cff7a19b067aaecc65a62ec6"
- version = "v1.9.0"
-
-[[projects]]
- digest = "1:4522bd966f53adb3da34201b39df1153534e441c8067d5e674964f05ecca3a71"
- name = "github.com/beego/i18n"
- packages = ["."]
- pruneopts = "UT"
- revision = "e87155e8f0c05bf323d0b13470e1b97af0cb5652"
-
-[[projects]]
- digest = "1:2aaf2cc045d0219bba79655e4df795b973168c310574669cb75786684f7287d3"
- name = "github.com/bmatcuk/doublestar"
- packages = ["."]
- pruneopts = "UT"
- revision = "85a78806aa1b4707d1dbace9be592cf1ece91ab3"
- version = "v1.1.1"
-
-[[projects]]
- digest = "1:76ca0dfcbf951d1868c7449453981dba9e1f79034706d1500a5a785000f5f222"
- name = "github.com/casbin/casbin"
- packages = [
- ".",
- "config",
- "effect",
- "log",
- "model",
- "persist",
- "persist/file-adapter",
- "rbac",
- "rbac/default-role-manager",
- "util",
- ]
- pruneopts = "UT"
- revision = "542e16cac74562eefac970a7d0d1467640d1f1cb"
- version = "v1.7.0"
-
-[[projects]]
- digest = "1:f6e5e1bc64c2908167e6aa9a1fe0c084d515132a1c63ad5b6c84036aa06dc0c1"
- name = "github.com/coreos/go-oidc"
- packages = ["."]
- pruneopts = "UT"
- revision = "1180514eaf4d9f38d0d19eef639a1d695e066e72"
- version = "v2.0.0"
-
-[[projects]]
- digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39"
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- pruneopts = "UT"
- revision = "346938d642f2ec3594ed81d874461961cd0faa76"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:ace1aef6acdf2c4647365dc87c14fb8b71ed8bb0b3ae114ffb216614a24da219"
- name = "github.com/dghubble/sling"
- packages = ["."]
- pruneopts = "UT"
- revision = "eb56e89ac5088bebb12eef3cb4b293300f43608b"
- version = "v1.1.0"
-
-[[projects]]
- digest = "1:d912bf9afc98bbb6539ea99c9ac3e83119853310dd1a3aec1583d76f340ece27"
- name = "github.com/dgrijalva/jwt-go"
- packages = ["."]
- pruneopts = "UT"
- revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
- version = "v3.0.0"
-
-[[projects]]
- digest = "1:d06c54bbda3a04ec18a2fa0577896b3c40f13409639b442379ee0a5a53be8259"
- name = "github.com/docker/distribution"
- packages = [
- ".",
- "context",
- "digestset",
- "health",
- "manifest",
- "manifest/manifestlist",
- "manifest/schema1",
- "manifest/schema2",
- "reference",
- "registry/api/errcode",
- "registry/auth",
- "registry/auth/token",
- "registry/client/auth/challenge",
- "uuid",
- ]
- pruneopts = "UT"
- revision = "2461543d988979529609e8cb6fca9ca190dc48da"
- version = "v2.7.1"
-
-[[projects]]
- branch = "master"
- digest = "1:72ba344e60095ac4fe0eac56f56fe95644421670b808238a1c849ea92721037e"
- name = "github.com/docker/go"
- packages = ["canonical/json"]
- pruneopts = "UT"
- revision = "d30aec9fd63c35133f8f79c3412ad91a3b08be06"
-
-[[projects]]
- branch = "master"
- digest = "1:4841e14252a2cecf11840bd05230412ad469709bbacfc12467e2ce5ad07f339b"
- name = "github.com/docker/libtrust"
- packages = ["."]
- pruneopts = "UT"
- revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
-
-[[projects]]
- digest = "1:0594af97b2f4cec6554086eeace6597e20a4b69466eb4ada25adf9f4300dddd2"
- name = "github.com/garyburd/redigo"
- packages = [
- "internal",
- "redis",
- ]
- pruneopts = "UT"
- revision = "a69d19351219b6dd56f274f96d85a7014a2ec34e"
- version = "v1.6.0"
-
-[[projects]]
- digest = "1:2cd7915ab26ede7d95b8749e6b1f933f1c6d5398030684e6505940a10f31cfda"
- name = "github.com/ghodss/yaml"
- packages = ["."]
- pruneopts = "UT"
- revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
- version = "v1.0.0"
-
-[[projects]]
- digest = "1:850c49ca338a10fec2cb9e78f793043ed23965489d09e30bcc19fe29719da313"
- name = "github.com/go-sql-driver/mysql"
- packages = ["."]
- pruneopts = "UT"
- revision = "a0583e0143b1624142adab07e0e97fe106d99561"
- version = "v1.3"
-
-[[projects]]
- digest = "1:9ae31ce33b4bab257668963e844d98765b44160be4ee98cafc44637a213e530d"
- name = "github.com/gobwas/glob"
- packages = [
- ".",
- "compiler",
- "match",
- "syntax",
- "syntax/ast",
- "syntax/lexer",
- "util/runes",
- "util/strings",
- ]
- pruneopts = "UT"
- revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
- version = "v0.2.3"
-
-[[projects]]
- digest = "1:615643b442214e7a9bade98fa7d50ec072fd17bdc5c955daa194b32e73a532a8"
- name = "github.com/gocraft/work"
- packages = ["."]
- pruneopts = "UT"
- revision = "1d4117a214abff263b472043871c8666aedb716b"
- version = "v0.5.1"
-
-[[projects]]
- digest = "1:4d02824a56d268f74a6b6fdd944b20b58a77c3d70e81008b3ee0c4f1a6777340"
- name = "github.com/gogo/protobuf"
- packages = [
- "proto",
- "sortkeys",
- ]
- pruneopts = "UT"
- revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
- version = "v1.2.1"
-
-[[projects]]
- digest = "1:39d9284259004077d3b89109d592fce5f311788745ce94a7ccd4545e536ad3ac"
- name = "github.com/golang-migrate/migrate"
- packages = [
- ".",
- "database",
- "database/postgres",
- "source",
- "source/file",
- ]
- pruneopts = "UT"
- revision = "bcd996f3df28363f43e2d0935484c4559537a3eb"
- version = "v3.3.0"
-
-[[projects]]
- branch = "master"
- digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467"
- name = "github.com/golang/glog"
- packages = ["."]
- pruneopts = "UT"
- revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
-
-[[projects]]
- digest = "1:41e5cefde26c58f1560df2d1c32c2fa85e332d7cb4460d2077ae8fd8e0f3d789"
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "ptypes/any",
- "ptypes/timestamp",
- ]
- pruneopts = "UT"
- revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
-
-[[projects]]
- digest = "1:38ec74012390146c45af1f92d46e5382b50531247929ff3a685d2b2be65155ac"
- name = "github.com/gomodule/redigo"
- packages = [
- "internal",
- "redis",
- ]
- pruneopts = "UT"
- revision = "9c11da706d9b7902c6da69c592f75637793fe121"
- version = "v2.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
- name = "github.com/google/go-querystring"
- packages = ["query"]
- pruneopts = "UT"
- revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
-
-[[projects]]
- branch = "master"
- digest = "1:3ee90c0d94da31b442dde97c99635aaafec68d0b8a3c12ee2075c6bdabeec6bb"
- name = "github.com/google/gofuzz"
- packages = ["."]
- pruneopts = "UT"
- revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
-
-[[projects]]
- digest = "1:160eabf7a69910fd74f29c692718bc2437c1c1c7d4c9dea9712357752a70e5df"
- name = "github.com/gorilla/context"
- packages = ["."]
- pruneopts = "UT"
- revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
- version = "v1.1"
-
-[[projects]]
- digest = "1:185a43b59a1f4e7ad4e7ccafb8a1538193d897a2a75be16dda093ec42ad231cf"
- name = "github.com/gorilla/handlers"
- packages = ["."]
- pruneopts = "UT"
- revision = "90663712d74cb411cbef281bc1e08c19d1a76145"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:3c44722a6360b8d8abf6f70f122c69007189be992a150e39571224c54a9bc380"
- name = "github.com/gorilla/mux"
- packages = ["."]
- pruneopts = "UT"
- revision = "7f08801859139f86dfafd1c296e2cba9a80d292e"
- version = "v1.6.0"
-
-[[projects]]
- digest = "1:f5a2051c55d05548d2d4fd23d244027b59fbd943217df8aa3b5e170ac2fd6e1b"
- name = "github.com/json-iterator/go"
- packages = ["."]
- pruneopts = "UT"
- revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
- version = "v1.1.6"
-
-[[projects]]
- digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
- name = "github.com/konsorten/go-windows-terminal-sequences"
- packages = ["."]
- pruneopts = "UT"
- revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
- version = "v1.0.2"
-
-[[projects]]
- branch = "master"
- digest = "1:bd26bbaf1e9f9dfe829a88f87a0849b56f717c31785443a67668f2c752fa8412"
- name = "github.com/lib/pq"
- packages = [
- ".",
- "oid",
- ]
- pruneopts = "UT"
- revision = "b2004221932bd6b13167ef654c81cffac36f7537"
-
-[[projects]]
- digest = "1:5113b1edf6e2f370f9ce6101e7b5a86c3e8decd108067e34b762ae91e42964ee"
- name = "github.com/miekg/pkcs11"
- packages = ["."]
- pruneopts = "UT"
- revision = "7283ca79f35edb89bc1b4ecae7f86a3680ce737f"
-
-[[projects]]
- digest = "1:33422d238f147d247752996a26574ac48dcf472976eda7f5134015f06bf16563"
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- pruneopts = "UT"
- revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
- version = "1.0.3"
-
-[[projects]]
- digest = "1:e32bdbdb7c377a07a9a46378290059822efdce5c8d96fe71940d87cb4f918855"
- name = "github.com/modern-go/reflect2"
- packages = ["."]
- pruneopts = "UT"
- revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
- version = "1.0.1"
-
-[[projects]]
- digest = "1:159d8a990f45d4891f1f04cb6ad7eb18b307cd02d783f7d37fa7a3b93912b172"
- name = "github.com/opencontainers/go-digest"
- packages = ["."]
- pruneopts = "UT"
- revision = "aa2ec055abd10d26d539eb630a92241b781ce4bc"
- version = "v1.0.0-rc0"
-
-[[projects]]
- digest = "1:11db38d694c130c800d0aefb502fb02519e514dc53d9804ce51d1ad25ec27db6"
- name = "github.com/opencontainers/image-spec"
- packages = [
- "specs-go",
- "specs-go/v1",
- ]
- pruneopts = "UT"
- revision = "d60099175f88c47cd379c4738d158884749ed235"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
- name = "github.com/pkg/errors"
- packages = ["."]
- pruneopts = "UT"
- revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
- version = "v0.8.1"
-
-[[projects]]
- digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
- name = "github.com/pmezard/go-difflib"
- packages = ["difflib"]
- pruneopts = "UT"
- revision = "792786c7400a136282c1664665ae0a8db921c6c2"
- version = "v1.0.0"
-
-[[projects]]
- branch = "master"
- digest = "1:bd9efe4e0b0f768302a1e2f0c22458149278de533e521206e5ddc71848c269a0"
- name = "github.com/pquerna/cachecontrol"
- packages = [
- ".",
- "cacheobject",
- ]
- pruneopts = "UT"
- revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0"
-
-[[projects]]
- digest = "1:3f68283c56d93b885f33c679708079e834815138649e9f59ffbc572c2993e0f8"
- name = "github.com/robfig/cron"
- packages = ["."]
- pruneopts = "UT"
- revision = "b024fc5ea0e34bc3f83d9941c8d60b0622bfaca4"
- version = "v1"
-
-[[projects]]
- digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426"
- name = "github.com/sirupsen/logrus"
- packages = ["."]
- pruneopts = "UT"
- revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f"
- version = "v1.4.1"
-
-[[projects]]
- digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7"
- name = "github.com/spf13/pflag"
- packages = ["."]
- pruneopts = "UT"
- revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
- version = "v1.0.1"
-
-[[projects]]
- digest = "1:ac83cf90d08b63ad5f7e020ef480d319ae890c208f8524622a2f3136e2686b02"
- name = "github.com/stretchr/objx"
- packages = ["."]
- pruneopts = "UT"
- revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
- version = "v0.1.1"
-
-[[projects]]
- digest = "1:288e2ba4192b77ec619875ab54d82e2179ca8978e8baa690dcb4343a4a1f4da7"
- name = "github.com/stretchr/testify"
- packages = [
- "assert",
- "mock",
- "require",
- "suite",
- ]
- pruneopts = "UT"
- revision = "ffdc059bfe9ce6a4e144ba849dbedead332c6053"
- version = "v1.3.0"
-
-[[projects]]
- digest = "1:a5702d6fd0891671faf050c05451d3ee4cfd70cb958e11556fefaca628ce832e"
- name = "github.com/theupdateframework/notary"
- packages = [
- ".",
- "client",
- "client/changelist",
- "cryptoservice",
- "storage",
- "trustmanager",
- "trustmanager/yubikey",
- "trustpinning",
- "tuf",
- "tuf/data",
- "tuf/signed",
- "tuf/utils",
- "tuf/validation",
- ]
- pruneopts = "UT"
- revision = "d6e1431feb32348e0650bf7551ac5cffd01d857b"
- version = "v0.6.1"
-
-[[projects]]
- digest = "1:ab3259b9f5008a18ff8c1cc34623eccce354f3a9faf5b409983cd6717d64b40b"
- name = "golang.org/x/crypto"
- packages = [
- "cast5",
- "ed25519",
- "ed25519/internal/edwards25519",
- "openpgp",
- "openpgp/armor",
- "openpgp/clearsign",
- "openpgp/elgamal",
- "openpgp/errors",
- "openpgp/packet",
- "openpgp/s2k",
- "pbkdf2",
- "ssh/terminal",
- ]
- pruneopts = "UT"
- revision = "5f961cd492ac9d43fc33a8ef646bae79d113fd97"
-
-[[projects]]
- digest = "1:2a465dcd21dc1094bd90bc28adc168d5c12d4d754b49d67b34362d26bd5c21b2"
- name = "golang.org/x/net"
- packages = [
- "context",
- "context/ctxhttp",
- "http2",
- "http2/hpack",
- "lex/httplex",
- ]
- pruneopts = "UT"
- revision = "075e191f18186a8ff2becaf64478e30f4545cdad"
-
-[[projects]]
- digest = "1:3d57c230f6800023b6fec274f38a139337b5fc0d00169a100a538eb3ef5e3da8"
- name = "golang.org/x/oauth2"
- packages = [
- ".",
- "clientcredentials",
- "internal",
- ]
- pruneopts = "UT"
- revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
-
-[[projects]]
- branch = "master"
- digest = "1:f21f21efdd315b95a015ffd7ddca70ca60ff021848618b5a4efd88bb1603335f"
- name = "golang.org/x/sys"
- packages = ["unix"]
- pruneopts = "UT"
- revision = "571f7bbbe08da2a8955aed9d4db316e78630e9a3"
-
-[[projects]]
- branch = "master"
- digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90"
- name = "golang.org/x/time"
- packages = ["rate"]
- pruneopts = "UT"
- revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef"
-
-[[projects]]
- digest = "1:52133d6859535332391e6193c8878d06347f28881111efa900392802485e9a18"
- name = "google.golang.org/appengine"
- packages = [
- "internal",
- "internal/base",
- "internal/datastore",
- "internal/log",
- "internal/remote_api",
- "internal/urlfetch",
- "urlfetch",
- ]
- pruneopts = "UT"
- revision = "24e4144ec923c2374f6b06610c0df16a9222c3d9"
-
-[[projects]]
- digest = "1:79decf236a2000df456fe7478fd23da8af950563c922747b299e1fab7fa7d78f"
- name = "gopkg.in/asn1-ber.v1"
- packages = ["."]
- pruneopts = "UT"
- revision = "4e86f4367175e39f69d9358a5f17b4dda270378d"
- version = "v1.1"
-
-[[projects]]
- digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
- name = "gopkg.in/inf.v0"
- packages = ["."]
- pruneopts = "UT"
- revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
- version = "v0.9.1"
-
-[[projects]]
- digest = "1:79691acfc86fc3204928daf67e44955e8021ec5e10091599d344b0e16de32236"
- name = "gopkg.in/ldap.v2"
- packages = ["."]
- pruneopts = "UT"
- revision = "8168ee085ee43257585e50c6441aadf54ecb2c9f"
- version = "v2.5.0"
-
-[[projects]]
- digest = "1:c0c30f47f9c16f227ba82f0bdfd14fa968453c30b7677a07903b3b4f34b98d49"
- name = "gopkg.in/square/go-jose.v2"
- packages = [
- ".",
- "cipher",
- "json",
- ]
- pruneopts = "UT"
- revision = "628223f44a71f715d2881ea69afc795a1e9c01be"
- version = "v2.3.0"
-
-[[projects]]
- digest = "1:2a81c6e126d36ad027328cffaa4888fc3be40f09dc48028d1f93705b718130b9"
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- pruneopts = "UT"
- revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
- version = "v2.1.1"
-
-[[projects]]
- digest = "1:7727a365529cdf6af394821dd990b046c56b8afac31e15e78fed58cf7bc179ad"
- name = "k8s.io/api"
- packages = [
- "admissionregistration/v1alpha1",
- "admissionregistration/v1beta1",
- "apps/v1",
- "apps/v1beta1",
- "apps/v1beta2",
- "authentication/v1",
- "authentication/v1beta1",
- "authorization/v1",
- "authorization/v1beta1",
- "autoscaling/v1",
- "autoscaling/v2beta1",
- "batch/v1",
- "batch/v1beta1",
- "batch/v2alpha1",
- "certificates/v1beta1",
- "core/v1",
- "events/v1beta1",
- "extensions/v1beta1",
- "networking/v1",
- "policy/v1beta1",
- "rbac/v1",
- "rbac/v1alpha1",
- "rbac/v1beta1",
- "scheduling/v1alpha1",
- "scheduling/v1beta1",
- "settings/v1alpha1",
- "storage/v1",
- "storage/v1alpha1",
- "storage/v1beta1",
- ]
- pruneopts = "UT"
- revision = "5cb15d34447165a97c76ed5a60e4e99c8a01ecfe"
- version = "kubernetes-1.13.4"
-
-[[projects]]
- branch = "master"
- digest = "1:d0d43cf61b49d2750351759e1d220134ab7731db608b6716dc4ed792a493027d"
- name = "k8s.io/apimachinery"
- packages = [
- "pkg/api/errors",
- "pkg/api/resource",
- "pkg/apis/meta/v1",
- "pkg/apis/meta/v1/unstructured",
- "pkg/conversion",
- "pkg/conversion/queryparams",
- "pkg/fields",
- "pkg/labels",
- "pkg/runtime",
- "pkg/runtime/schema",
- "pkg/runtime/serializer",
- "pkg/runtime/serializer/json",
- "pkg/runtime/serializer/protobuf",
- "pkg/runtime/serializer/recognizer",
- "pkg/runtime/serializer/streaming",
- "pkg/runtime/serializer/versioning",
- "pkg/selection",
- "pkg/types",
- "pkg/util/clock",
- "pkg/util/errors",
- "pkg/util/framer",
- "pkg/util/intstr",
- "pkg/util/json",
- "pkg/util/net",
- "pkg/util/runtime",
- "pkg/util/sets",
- "pkg/util/validation",
- "pkg/util/validation/field",
- "pkg/util/wait",
- "pkg/util/yaml",
- "pkg/version",
- "pkg/watch",
- "third_party/forked/golang/reflect",
- ]
- pruneopts = "UT"
- revision = "f534d624797b270e5e46104dc7e2c2d61edbb85d"
-
-[[projects]]
- digest = "1:131682c26796b64f0abb77ac3d85525712706fde0b085aaa7b6d10b4398167cc"
- name = "k8s.io/client-go"
- packages = [
- "kubernetes/scheme",
- "pkg/apis/clientauthentication",
- "pkg/apis/clientauthentication/v1alpha1",
- "pkg/apis/clientauthentication/v1beta1",
- "pkg/version",
- "plugin/pkg/client/auth/exec",
- "rest",
- "rest/watch",
- "tools/clientcmd/api",
- "tools/metrics",
- "transport",
- "util/cert",
- "util/connrotation",
- "util/flowcontrol",
- "util/homedir",
- "util/integer",
- ]
- pruneopts = "UT"
- revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
- version = "v8.0.0"
-
-[[projects]]
- digest = "1:1076dbb6a69b965ccfda2a06a04e5038db78eff586f74b5daf4a41444e6f6077"
- name = "k8s.io/helm"
- packages = [
- "cmd/helm/search",
- "pkg/chartutil",
- "pkg/getter",
- "pkg/helm/environment",
- "pkg/helm/helmpath",
- "pkg/ignore",
- "pkg/plugin",
- "pkg/proto/hapi/chart",
- "pkg/proto/hapi/version",
- "pkg/provenance",
- "pkg/repo",
- "pkg/sympath",
- "pkg/tlsutil",
- "pkg/urlutil",
- "pkg/version",
- ]
- pruneopts = "UT"
- revision = "20adb27c7c5868466912eebdf6664e7390ebe710"
- version = "v2.9.1"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- input-imports = [
- "github.com/Masterminds/semver",
- "github.com/astaxie/beego",
- "github.com/astaxie/beego/cache",
- "github.com/astaxie/beego/cache/redis",
- "github.com/astaxie/beego/context",
- "github.com/astaxie/beego/orm",
- "github.com/astaxie/beego/session",
- "github.com/astaxie/beego/session/redis",
- "github.com/astaxie/beego/validation",
- "github.com/beego/i18n",
- "github.com/bmatcuk/doublestar",
- "github.com/casbin/casbin",
- "github.com/casbin/casbin/model",
- "github.com/casbin/casbin/persist",
- "github.com/casbin/casbin/util",
- "github.com/coreos/go-oidc",
- "github.com/dghubble/sling",
- "github.com/dgrijalva/jwt-go",
- "github.com/docker/distribution",
- "github.com/docker/distribution/health",
- "github.com/docker/distribution/manifest/manifestlist",
- "github.com/docker/distribution/manifest/schema1",
- "github.com/docker/distribution/manifest/schema2",
- "github.com/docker/distribution/reference",
- "github.com/docker/distribution/registry/auth/token",
- "github.com/docker/distribution/registry/client/auth/challenge",
- "github.com/docker/libtrust",
- "github.com/garyburd/redigo/redis",
- "github.com/ghodss/yaml",
- "github.com/go-sql-driver/mysql",
- "github.com/gocraft/work",
- "github.com/golang-migrate/migrate",
- "github.com/golang-migrate/migrate/database/postgres",
- "github.com/golang-migrate/migrate/source/file",
- "github.com/gomodule/redigo/redis",
- "github.com/gorilla/handlers",
- "github.com/gorilla/mux",
- "github.com/lib/pq",
- "github.com/opencontainers/go-digest",
- "github.com/pkg/errors",
- "github.com/robfig/cron",
- "github.com/stretchr/testify/assert",
- "github.com/stretchr/testify/mock",
- "github.com/stretchr/testify/require",
- "github.com/stretchr/testify/suite",
- "github.com/theupdateframework/notary",
- "github.com/theupdateframework/notary/client",
- "github.com/theupdateframework/notary/trustpinning",
- "github.com/theupdateframework/notary/tuf/data",
- "golang.org/x/crypto/pbkdf2",
- "golang.org/x/oauth2",
- "golang.org/x/oauth2/clientcredentials",
- "gopkg.in/ldap.v2",
- "gopkg.in/yaml.v2",
- "k8s.io/api/authentication/v1beta1",
- "k8s.io/apimachinery/pkg/apis/meta/v1",
- "k8s.io/apimachinery/pkg/runtime/schema",
- "k8s.io/apimachinery/pkg/runtime/serializer",
- "k8s.io/client-go/kubernetes/scheme",
- "k8s.io/client-go/rest",
- "k8s.io/helm/cmd/helm/search",
- "k8s.io/helm/pkg/chartutil",
- "k8s.io/helm/pkg/proto/hapi/chart",
- "k8s.io/helm/pkg/repo",
- ]
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/src/Gopkg.toml b/src/Gopkg.toml
deleted file mode 100644
index 67a00c2c2..000000000
--- a/src/Gopkg.toml
+++ /dev/null
@@ -1,137 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-ignored = ["github.com/goharbor/harbor/tests*"]
-
-[prune]
- go-tests = true
- unused-packages = true
-
-[[constraint]]
- name = "github.com/astaxie/beego"
- version = "=1.9.0"
-
-[[constraint]]
- name = "github.com/casbin/casbin"
- version = "=1.7.0"
-
-[[constraint]]
- name = "github.com/dghubble/sling"
- version = "=1.1.0"
-
-[[constraint]]
- name = "github.com/dgrijalva/jwt-go"
- version = "=3.0.0"
-
-[[constraint]]
- name = "github.com/docker/distribution"
- version = "=2.7.1"
-
-[[constraint]]
- branch = "master"
- name = "github.com/docker/libtrust"
-
-[[constraint]]
- name = "github.com/go-sql-driver/mysql"
- version = "=1.3.0"
-
-[[override]]
- name = "github.com/mattn/go-sqlite3"
- version = "=1.6.0"
-
-[[constraint]]
- name = "github.com/opencontainers/go-digest"
- version = "=1.0.0-rc0"
-
-[[constraint]]
- name = "gopkg.in/ldap.v2"
- version = "=2.5.0"
-
-[[constraint]]
- name = "github.com/stretchr/testify"
- version = "=1.3.0"
-
-[[constraint]]
- name = "github.com/gorilla/handlers"
- version = "=1.3.0"
-
-[[constraint]]
- name = "github.com/gorilla/mux"
- version = "=1.6.0"
-
-[[override]]
- name = "github.com/Sirupsen/logrus"
- version = "=1.0.5"
-
-[[override]]
- name = "github.com/gorilla/context"
- version = "=1.1"
-
-[[override]]
- name = "github.com/garyburd/redigo"
- version = "=1.6.0"
-
-[[constraint]]
- name = "github.com/golang-migrate/migrate"
- version = "=3.3.0"
-
-[[constraint]]
- name = "k8s.io/helm"
- version = "2.9.1"
-
-[[constraint]]
- name = "github.com/ghodss/yaml"
- version = "=1.0.0"
-
-[[constraint]]
- name = "github.com/Masterminds/semver"
- version = "=1.4.2"
-
-[[constraint]]
- name = "github.com/gocraft/work"
- version = "=0.5.1"
-
-[[constraint]]
- name = "github.com/robfig/cron"
- version = "=1.0"
-
-[[constraint]]
- name = "github.com/coreos/go-oidc"
- version = "=2.0.0"
-
-[[constraint]]
- name = "gopkg.in/yaml.v2"
- version = "=2.1.1"
-
-[[constraint]]
- name = "k8s.io/api"
- version = "kubernetes-1.13.4"
-
-[[constraint]]
- name = "github.com/bmatcuk/doublestar"
- version = "=1.1.1"
-
-[[constraint]]
- name = "github.com/pkg/errors"
- version = "=0.8.1"
-
-[[constraint]]
- name = "github.com/docker/notary"
- version = "=0.6.1"
diff --git a/src/chartserver/client.go b/src/chartserver/client.go
index e7e1fb646..1ab153570 100644
--- a/src/chartserver/client.go
+++ b/src/chartserver/client.go
@@ -1,16 +1,16 @@
package chartserver
import (
- "errors"
"fmt"
- commonhttp "github.com/goharbor/harbor/src/common/http"
- hlog "github.com/goharbor/harbor/src/common/utils/log"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
+
+ commonhttp "github.com/goharbor/harbor/src/common/http"
+ "github.com/pkg/errors"
)
const (
@@ -49,11 +49,13 @@ func NewChartClient(credential *Credential) *ChartClient { // Create http client
func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
response, err := cc.sendRequest(addr, http.MethodGet, nil)
if err != nil {
+ err = errors.Wrap(err, "get content failed")
return nil, err
}
content, err := ioutil.ReadAll(response.Body)
if err != nil {
+ err = errors.Wrap(err, "Read response body error")
return nil, err
}
defer response.Body.Close()
@@ -61,6 +63,7 @@ func (cc *ChartClient) GetContent(addr string) ([]byte, error) {
if response.StatusCode != http.StatusOK {
text, err := extractError(content)
if err != nil {
+ err = errors.Wrap(err, "Extract content error failed")
return nil, err
}
return nil, &commonhttp.Error{
@@ -106,7 +109,8 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) (
fullURI, err := url.Parse(addr)
if err != nil {
- return nil, fmt.Errorf("invalid url: %s", err.Error())
+ err = errors.Wrap(err, "Invalid url")
+ return nil, err
}
request, err := http.NewRequest(method, addr, body)
@@ -121,7 +125,7 @@ func (cc *ChartClient) sendRequest(addr string, method string, body io.Reader) (
response, err := cc.httpClient.Do(request)
if err != nil {
- hlog.Errorf("%s '%s' failed with error: %s", method, fullURI.Path, err)
+ err = errors.Wrap(err, fmt.Sprintf("send request %s %s failed", method, fullURI.Path))
return nil, err
}
diff --git a/src/chartserver/controller.go b/src/chartserver/controller.go
index 6815a974f..499b14391 100644
--- a/src/chartserver/controller.go
+++ b/src/chartserver/controller.go
@@ -7,6 +7,7 @@ import (
"os"
hlog "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/justinas/alice"
)
const (
@@ -42,7 +43,7 @@ type Controller struct {
}
// NewController is constructor of the chartserver.Controller
-func NewController(backendServer *url.URL) (*Controller, error) {
+func NewController(backendServer *url.URL, chains ...*alice.Chain) (*Controller, error) {
if backendServer == nil {
return nil, errors.New("failed to create chartserver.Controller: backend sever address is required")
}
@@ -68,7 +69,7 @@ func NewController(backendServer *url.URL) (*Controller, error) {
return &Controller{
backendServerAddress: backendServer,
// Use customized reverse proxy
- trafficProxy: NewProxyEngine(backendServer, cred),
+ trafficProxy: NewProxyEngine(backendServer, cred, chains...),
// Initialize chart operator for use
chartOperator: &ChartOperator{},
// Create http client with customized timeouts
diff --git a/src/chartserver/handler_manipulation.go b/src/chartserver/handler_manipulation.go
index 9250f2476..42e714916 100644
--- a/src/chartserver/handler_manipulation.go
+++ b/src/chartserver/handler_manipulation.go
@@ -2,19 +2,20 @@ package chartserver
import (
"encoding/json"
- "errors"
"fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
"strings"
"github.com/ghodss/yaml"
+ commonhttp "github.com/goharbor/harbor/src/common/http"
+ "github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/replication"
rep_event "github.com/goharbor/harbor/src/replication/event"
"github.com/goharbor/harbor/src/replication/model"
+ "github.com/pkg/errors"
helm_repo "k8s.io/helm/pkg/repo"
-
- "os"
-
- "github.com/goharbor/harbor/src/common/utils/log"
)
// ListCharts gets the chart list under the namespace
@@ -68,11 +69,21 @@ func (c *Controller) DeleteChartVersion(namespace, chartName, version string) er
return errors.New("invalid chart for deleting")
}
- url := fmt.Sprintf("%s/%s/%s", c.APIPrefix(namespace), chartName, version)
+ url := fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", namespace, chartName, version)
+ req, _ := http.NewRequest(http.MethodDelete, url, nil)
+ w := httptest.NewRecorder()
- err := c.apiClient.DeleteContent(url)
- if err != nil {
- return err
+ c.trafficProxy.ServeHTTP(w, req)
+
+ if w.Code != http.StatusOK {
+ text, err := extractError(w.Body.Bytes())
+ if err != nil {
+ return err
+ }
+ return &commonhttp.Error{
+ Code: w.Code,
+ Message: text,
+ }
}
// send notification to replication handler
diff --git a/src/chartserver/reverse_proxy.go b/src/chartserver/reverse_proxy.go
index 74716ea6d..c11025c77 100644
--- a/src/chartserver/reverse_proxy.go
+++ b/src/chartserver/reverse_proxy.go
@@ -17,6 +17,7 @@ import (
hlog "github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/replication"
rep_event "github.com/goharbor/harbor/src/replication/event"
+ "github.com/justinas/alice"
)
const (
@@ -36,20 +37,29 @@ type ProxyEngine struct {
backend *url.URL
// Use go reverse proxy as engine
- engine *httputil.ReverseProxy
+ engine http.Handler
}
// NewProxyEngine is constructor of NewProxyEngine
-func NewProxyEngine(target *url.URL, cred *Credential) *ProxyEngine {
+func NewProxyEngine(target *url.URL, cred *Credential, chains ...*alice.Chain) *ProxyEngine {
+ var engine http.Handler
+
+ engine = &httputil.ReverseProxy{
+ ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile),
+ Director: func(req *http.Request) {
+ director(target, cred, req)
+ },
+ ModifyResponse: modifyResponse,
+ }
+
+ if len(chains) > 0 {
+ hlog.Info("New chart server traffic proxy with middlewares")
+ engine = chains[0].Then(engine)
+ }
+
return &ProxyEngine{
backend: target,
- engine: &httputil.ReverseProxy{
- ErrorLog: log.New(os.Stdout, "", log.Ldate|log.Ltime|log.Lshortfile),
- Director: func(req *http.Request) {
- director(target, cred, req)
- },
- ModifyResponse: modifyResponse,
- },
+ engine: engine,
}
}
diff --git a/src/common/api/base.go b/src/common/api/base.go
index fba8c3621..928c37e08 100644
--- a/src/common/api/base.go
+++ b/src/common/api/base.go
@@ -20,12 +20,11 @@ import (
"net/http"
"strconv"
+ "github.com/astaxie/beego"
"github.com/astaxie/beego/validation"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils/log"
-
- "errors"
- "github.com/astaxie/beego"
+ "github.com/pkg/errors"
)
const (
diff --git a/src/common/config/manager.go b/src/common/config/manager.go
index 0df6eaa47..3886f160f 100644
--- a/src/common/config/manager.go
+++ b/src/common/config/manager.go
@@ -210,12 +210,14 @@ func (c *CfgManager) GetDatabaseCfg() *models.Database {
return &models.Database{
Type: c.Get(common.DatabaseType).GetString(),
PostGreSQL: &models.PostGreSQL{
- Host: c.Get(common.PostGreSQLHOST).GetString(),
- Port: c.Get(common.PostGreSQLPort).GetInt(),
- Username: c.Get(common.PostGreSQLUsername).GetString(),
- Password: c.Get(common.PostGreSQLPassword).GetString(),
- Database: c.Get(common.PostGreSQLDatabase).GetString(),
- SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
+ Host: c.Get(common.PostGreSQLHOST).GetString(),
+ Port: c.Get(common.PostGreSQLPort).GetInt(),
+ Username: c.Get(common.PostGreSQLUsername).GetString(),
+ Password: c.Get(common.PostGreSQLPassword).GetString(),
+ Database: c.Get(common.PostGreSQLDatabase).GetString(),
+ SSLMode: c.Get(common.PostGreSQLSSLMode).GetString(),
+ MaxIdleConns: c.Get(common.PostGreSQLMaxIdleConns).GetInt(),
+ MaxOpenConns: c.Get(common.PostGreSQLMaxOpenConns).GetInt(),
},
}
}
diff --git a/src/common/config/metadata/metadatalist.go b/src/common/config/metadata/metadatalist.go
index 202f426b7..7106a38c6 100644
--- a/src/common/config/metadata/metadatalist.go
+++ b/src/common/config/metadata/metadatalist.go
@@ -47,6 +47,7 @@ const (
HTTPAuthGroup = "http_auth"
OIDCGroup = "oidc"
DatabaseGroup = "database"
+ QuotaGroup = "quota"
// Put all config items do not belong a existing group into basic
BasicGroup = "basic"
ClairGroup = "clair"
@@ -74,6 +75,7 @@ var (
{Name: common.ClairURL, Scope: SystemScope, Group: ClairGroup, EnvKey: "CLAIR_URL", DefaultValue: "http://clair:6060", ItemType: &StringType{}, Editable: false},
{Name: common.CoreURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_URL", DefaultValue: "http://core:8080", ItemType: &StringType{}, Editable: false},
+ {Name: common.CoreLocalURL, Scope: SystemScope, Group: BasicGroup, EnvKey: "CORE_LOCAL_URL", DefaultValue: "http://127.0.0.1:8080", ItemType: &StringType{}, Editable: false},
{Name: common.DatabaseType, Scope: SystemScope, Group: BasicGroup, EnvKey: "DATABASE_TYPE", DefaultValue: "postgresql", ItemType: &StringType{}, Editable: false},
{Name: common.EmailFrom, Scope: UserScope, Group: EmailGroup, EnvKey: "EMAIL_FROM", DefaultValue: "admin ", ItemType: &StringType{}, Editable: false},
@@ -91,7 +93,7 @@ var (
{Name: common.LDAPBaseDN, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_BASE_DN", DefaultValue: "", ItemType: &NonEmptyStringType{}, Editable: false},
{Name: common.LDAPFilter, Scope: UserScope, Group: LdapBasicGroup, EnvKey: "LDAP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupBaseDN, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_BASE_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
- {Name: common.LdapGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
+ {Name: common.LDAPGroupAdminDn, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_ADMIN_DN", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupAttributeName, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_GID", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupSearchFilter, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_FILTER", DefaultValue: "", ItemType: &StringType{}, Editable: false},
{Name: common.LDAPGroupSearchScope, Scope: UserScope, Group: LdapGroupGroup, EnvKey: "LDAP_GROUP_SCOPE", DefaultValue: "2", ItemType: &LdapScopeType{}, Editable: false},
@@ -114,6 +116,8 @@ var (
{Name: common.PostGreSQLPort, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_PORT", DefaultValue: "5432", ItemType: &PortType{}, Editable: false},
{Name: common.PostGreSQLSSLMode, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_SSLMODE", DefaultValue: "disable", ItemType: &StringType{}, Editable: false},
{Name: common.PostGreSQLUsername, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_USERNAME", DefaultValue: "postgres", ItemType: &StringType{}, Editable: false},
+ {Name: common.PostGreSQLMaxIdleConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_IDLE_CONNS", DefaultValue: "2", ItemType: &IntType{}, Editable: false},
+ {Name: common.PostGreSQLMaxOpenConns, Scope: SystemScope, Group: DatabaseGroup, EnvKey: "POSTGRESQL_MAX_OPEN_CONNS", DefaultValue: "0", ItemType: &IntType{}, Editable: false},
{Name: common.ProjectCreationRestriction, Scope: UserScope, Group: BasicGroup, EnvKey: "PROJECT_CREATION_RESTRICTION", DefaultValue: common.ProCrtRestrEveryone, ItemType: &ProjectCreationRestrictionType{}, Editable: false},
{Name: common.ReadOnly, Scope: UserScope, Group: BasicGroup, EnvKey: "READ_ONLY", DefaultValue: "false", ItemType: &BoolType{}, Editable: false},
@@ -133,7 +137,7 @@ var (
{Name: common.HTTPAuthProxyEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
{Name: common.HTTPAuthProxyTokenReviewEndpoint, Scope: UserScope, Group: HTTPAuthGroup, ItemType: &StringType{}},
{Name: common.HTTPAuthProxyVerifyCert, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "true", ItemType: &BoolType{}},
- {Name: common.HTTPAuthProxyAlwaysOnboard, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}},
+ {Name: common.HTTPAuthProxySkipSearch, Scope: UserScope, Group: HTTPAuthGroup, DefaultValue: "false", ItemType: &BoolType{}},
{Name: common.OIDCName, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
{Name: common.OIDCEndpoint, Scope: UserScope, Group: OIDCGroup, ItemType: &StringType{}},
@@ -147,5 +151,10 @@ var (
{Name: common.WithNotary, Scope: SystemScope, Group: BasicGroup, EnvKey: "WITH_NOTARY", DefaultValue: "false", ItemType: &BoolType{}, Editable: true},
// the unit of expiration is minute, 43200 minutes = 30 days
{Name: common.RobotTokenDuration, Scope: UserScope, Group: BasicGroup, EnvKey: "ROBOT_TOKEN_DURATION", DefaultValue: "43200", ItemType: &IntType{}, Editable: true},
+ {Name: common.NotificationEnable, Scope: UserScope, Group: BasicGroup, EnvKey: "NOTIFICATION_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
+
+ {Name: common.QuotaPerProjectEnable, Scope: UserScope, Group: QuotaGroup, EnvKey: "QUOTA_PER_PROJECT_ENABLE", DefaultValue: "true", ItemType: &BoolType{}, Editable: true},
+ {Name: common.CountPerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "COUNT_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
+ {Name: common.StoragePerProject, Scope: UserScope, Group: QuotaGroup, EnvKey: "STORAGE_PER_PROJECT", DefaultValue: "-1", ItemType: &QuotaType{}, Editable: true},
}
)
diff --git a/src/common/config/metadata/type.go b/src/common/config/metadata/type.go
index 6ed790c97..745f30868 100644
--- a/src/common/config/metadata/type.go
+++ b/src/common/config/metadata/type.go
@@ -18,9 +18,10 @@ package metadata
import (
"encoding/json"
"fmt"
- "github.com/goharbor/harbor/src/common"
"strconv"
"strings"
+
+ "github.com/goharbor/harbor/src/common"
)
// Type - Use this interface to define and encapsulate the behavior of validation and transformation
@@ -186,3 +187,21 @@ func (t *MapType) get(str string) (interface{}, error) {
err := json.Unmarshal([]byte(str), &result)
return result, err
}
+
+// QuotaType ...
+type QuotaType struct {
+ Int64Type
+}
+
+func (t *QuotaType) validate(str string) error {
+ val, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+
+ if val <= 0 && val != -1 {
+ return fmt.Errorf("quota value should be -1 or great than zero")
+ }
+
+ return nil
+}
diff --git a/src/common/config/store/driver/db.go b/src/common/config/store/driver/db.go
index 18fe703e0..97b6568cb 100644
--- a/src/common/config/store/driver/db.go
+++ b/src/common/config/store/driver/db.go
@@ -40,7 +40,7 @@ func (d *Database) Load() (map[string]interface{}, error) {
itemMetadata, ok := metadata.Instance().GetByName(item.Key)
if !ok {
- log.Warningf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err)
+ log.Debugf("failed to get metadata, key:%v, error:%v, skip to load item", item.Key, err)
continue
}
if itemMetadata.Scope == metadata.SystemScope {
diff --git a/src/common/const.go b/src/common/const.go
old mode 100644
new mode 100755
index 532e7960f..dbb8dec57
--- a/src/common/const.go
+++ b/src/common/const.go
@@ -53,8 +53,11 @@ const (
PostGreSQLPassword = "postgresql_password"
PostGreSQLDatabase = "postgresql_database"
PostGreSQLSSLMode = "postgresql_sslmode"
+ PostGreSQLMaxIdleConns = "postgresql_max_idle_conns"
+ PostGreSQLMaxOpenConns = "postgresql_max_open_conns"
SelfRegistration = "self_registration"
CoreURL = "core_url"
+ CoreLocalURL = "core_local_url"
JobServiceURL = "jobservice_url"
LDAPURL = "ldap_url"
LDAPSearchDN = "ldap_search_dn"
@@ -100,7 +103,7 @@ const (
HTTPAuthProxyEndpoint = "http_authproxy_endpoint"
HTTPAuthProxyTokenReviewEndpoint = "http_authproxy_tokenreview_endpoint"
HTTPAuthProxyVerifyCert = "http_authproxy_verify_cert"
- HTTPAuthProxyAlwaysOnboard = "http_authproxy_always_onboard"
+ HTTPAuthProxySkipSearch = "http_authproxy_skip_search"
OIDCName = "oidc_name"
OIDCEndpoint = "oidc_endpoint"
OIDCCLientID = "oidc_client_id"
@@ -120,8 +123,9 @@ const (
NotaryURL = "notary_url"
DefaultCoreEndpoint = "http://core:8080"
DefaultNotaryEndpoint = "http://notary-server:4443"
- LdapGroupType = 1
- LdapGroupAdminDn = "ldap_group_admin_dn"
+ LDAPGroupType = 1
+ HTTPGroupType = 2
+ LDAPGroupAdminDn = "ldap_group_admin_dn"
LDAPGroupMembershipAttribute = "ldap_group_membership_attribute"
DefaultRegistryControllerEndpoint = "http://registryctl:8080"
WithChartMuseum = "with_chartmuseum"
@@ -141,4 +145,12 @@ const (
OIDCLoginPath = "/c/oidc/login"
ChartUploadCtxKey = contextKey("chart_upload_event")
+
+ // Global notification enable configuration
+ NotificationEnable = "notification_enable"
+
+ // Quota setting items for project
+ QuotaPerProjectEnable = "quota_per_project_enable"
+ CountPerProject = "count_per_project"
+ StoragePerProject = "storage_per_project"
)
diff --git a/src/common/dao/artifact.go b/src/common/dao/artifact.go
new file mode 100644
index 000000000..34663b5cd
--- /dev/null
+++ b/src/common/dao/artifact.go
@@ -0,0 +1,142 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "strings"
+ "time"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// AddArtifact ...
+func AddArtifact(af *models.Artifact) (int64, error) {
+ now := time.Now()
+ af.CreationTime = now
+ af.PushTime = now
+
+ id, err := GetOrmer().Insert(af)
+ if err != nil {
+ if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
+ return 0, ErrDupRows
+ }
+ return 0, err
+ }
+ return id, nil
+}
+
+// UpdateArtifact ...
+func UpdateArtifact(af *models.Artifact) error {
+ _, err := GetOrmer().Update(af)
+ return err
+}
+
+// UpdateArtifactDigest ...
+func UpdateArtifactDigest(af *models.Artifact) error {
+ _, err := GetOrmer().Update(af, "digest")
+ return err
+}
+
+// UpdateArtifactPullTime updates the pull time of the artifact.
+func UpdateArtifactPullTime(af *models.Artifact) error {
+ _, err := GetOrmer().Update(af, "pull_time")
+ return err
+}
+
+// DeleteArtifact ...
+func DeleteArtifact(id int64) error {
+
+ _, err := GetOrmer().QueryTable(&models.Artifact{}).Filter("ID", id).Delete()
+ return err
+}
+
+// DeleteArtifactByDigest ...
+func DeleteArtifactByDigest(projectID int64, repo, digest string) error {
+ _, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and digest = ? `,
+ projectID, repo, digest).Exec()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// DeleteArtifactByTag ...
+func DeleteArtifactByTag(projectID int64, repo, tag string) error {
+ _, err := GetOrmer().Raw(`delete from artifact where project_id = ? and repo = ? and tag = ? `,
+ projectID, repo, tag).Exec()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListArtifacts list artifacts according to the query conditions
+func ListArtifacts(query *models.ArtifactQuery) ([]*models.Artifact, error) {
+ qs := getArtifactQuerySetter(query)
+ if query.Size > 0 {
+ qs = qs.Limit(query.Size)
+ if query.Page > 0 {
+ qs = qs.Offset((query.Page - 1) * query.Size)
+ }
+ }
+ afs := []*models.Artifact{}
+ _, err := qs.All(&afs)
+ return afs, err
+}
+
+// GetArtifact by repository and tag
+func GetArtifact(repo, tag string) (*models.Artifact, error) {
+ artifact := &models.Artifact{}
+ err := GetOrmer().QueryTable(&models.Artifact{}).
+ Filter("Repo", repo).
+ Filter("Tag", tag).One(artifact)
+ if err != nil {
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return artifact, nil
+}
+
+// GetTotalOfArtifacts returns total of artifacts
+func GetTotalOfArtifacts(query ...*models.ArtifactQuery) (int64, error) {
+ var qs orm.QuerySeter
+ if len(query) > 0 {
+ qs = getArtifactQuerySetter(query[0])
+ } else {
+ qs = GetOrmer().QueryTable(&models.Artifact{})
+ }
+
+ return qs.Count()
+}
+
+func getArtifactQuerySetter(query *models.ArtifactQuery) orm.QuerySeter {
+ qs := GetOrmer().QueryTable(&models.Artifact{})
+ if query.PID != 0 {
+ qs = qs.Filter("PID", query.PID)
+ }
+ if len(query.Repo) > 0 {
+ qs = qs.Filter("Repo", query.Repo)
+ }
+ if len(query.Tag) > 0 {
+ qs = qs.Filter("Tag", query.Tag)
+ }
+ if len(query.Digest) > 0 {
+ qs = qs.Filter("Digest", query.Digest)
+ }
+ return qs
+}
diff --git a/src/common/dao/artifact_blob.go b/src/common/dao/artifact_blob.go
new file mode 100644
index 000000000..f1bcabb56
--- /dev/null
+++ b/src/common/dao/artifact_blob.go
@@ -0,0 +1,110 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "fmt"
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/pkg/errors"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// AddArtifactNBlob ...
+func AddArtifactNBlob(afnb *models.ArtifactAndBlob) (int64, error) {
+ now := time.Now()
+ afnb.CreationTime = now
+ id, err := GetOrmer().Insert(afnb)
+ if err != nil {
+ if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
+ return 0, ErrDupRows
+ }
+ return 0, err
+ }
+ return id, nil
+}
+
+// AddArtifactNBlobs ...
+func AddArtifactNBlobs(afnbs []*models.ArtifactAndBlob) error {
+ o := orm.NewOrm()
+ err := o.Begin()
+ if err != nil {
+ return err
+ }
+
+ var errInsertMultiple error
+ total := len(afnbs)
+ successNums, err := o.InsertMulti(total, afnbs)
+ if err != nil {
+ errInsertMultiple = err
+ if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
+ errInsertMultiple = errors.Wrap(errInsertMultiple, ErrDupRows.Error())
+ }
+ err := o.Rollback()
+ if err != nil {
+ log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
+ errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
+ }
+ return errInsertMultiple
+ }
+
+ // part of them cannot be inserted successfully.
+ if successNums != int64(total) {
+ errInsertMultiple = errors.New("Not all of artifact and blobs are inserted successfully")
+ err := o.Rollback()
+ if err != nil {
+ log.Errorf("fail to rollback when to insert multiple artifact and blobs, %v", err)
+ errInsertMultiple = errors.Wrap(errInsertMultiple, err.Error())
+ }
+ return errInsertMultiple
+ }
+
+ err = o.Commit()
+ if err != nil {
+ log.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
+ return fmt.Errorf("fail to commit when to insert multiple artifact and blobs, %v", err)
+ }
+
+ return nil
+}
+
+// DeleteArtifactAndBlobByDigest ...
+func DeleteArtifactAndBlobByDigest(digest string) error {
+ _, err := GetOrmer().Raw(`delete from artifact_blob where digest_af = ? `, digest).Exec()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CountSizeOfArtifact ...
+func CountSizeOfArtifact(digest string) (int64, error) {
+ var res []orm.Params
+ num, err := GetOrmer().Raw(`SELECT sum(bb.size) FROM artifact_blob afnb LEFT JOIN blob bb ON afnb.digest_blob = bb.digest WHERE afnb.digest_af = ? `, digest).Values(&res)
+ if err != nil {
+ return -1, err
+ }
+ if num > 0 {
+ size, err := strconv.ParseInt(res[0]["sum"].(string), 0, 64)
+ if err != nil {
+ return -1, err
+ }
+ return size, nil
+ }
+ return -1, err
+}
diff --git a/src/common/dao/artifact_blob_test.go b/src/common/dao/artifact_blob_test.go
new file mode 100644
index 000000000..3da44748b
--- /dev/null
+++ b/src/common/dao/artifact_blob_test.go
@@ -0,0 +1,131 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/models"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddArtifactNBlob(t *testing.T) {
+ afnb := &models.ArtifactAndBlob{
+ DigestAF: "vvvv",
+ DigestBlob: "aaaa",
+ }
+
+ // add
+ id, err := AddArtifactNBlob(afnb)
+ require.Nil(t, err)
+ afnb.ID = id
+ assert.Equal(t, id, int64(1))
+}
+
+func TestAddArtifactNBlobs(t *testing.T) {
+ afnb1 := &models.ArtifactAndBlob{
+ DigestAF: "zzzz",
+ DigestBlob: "zzza",
+ }
+ afnb2 := &models.ArtifactAndBlob{
+ DigestAF: "zzzz",
+ DigestBlob: "zzzb",
+ }
+ afnb3 := &models.ArtifactAndBlob{
+ DigestAF: "zzzz",
+ DigestBlob: "zzzc",
+ }
+
+ var afnbs []*models.ArtifactAndBlob
+ afnbs = append(afnbs, afnb1)
+ afnbs = append(afnbs, afnb2)
+ afnbs = append(afnbs, afnb3)
+
+ // add
+ err := AddArtifactNBlobs(afnbs)
+ require.Nil(t, err)
+}
+
+func TestDeleteArtifactAndBlobByDigest(t *testing.T) {
+ afnb := &models.ArtifactAndBlob{
+ DigestAF: "vvvv",
+ DigestBlob: "vvva",
+ }
+
+ // add
+ _, err := AddArtifactNBlob(afnb)
+ require.Nil(t, err)
+
+ // delete
+ err = DeleteArtifactAndBlobByDigest(afnb.DigestAF)
+ require.Nil(t, err)
+}
+
+func TestCountSizeOfArtifact(t *testing.T) {
+
+ afnb1 := &models.ArtifactAndBlob{
+ DigestAF: "xxxx",
+ DigestBlob: "aaaa",
+ }
+ afnb2 := &models.ArtifactAndBlob{
+ DigestAF: "xxxx",
+ DigestBlob: "aaab",
+ }
+ afnb3 := &models.ArtifactAndBlob{
+ DigestAF: "xxxx",
+ DigestBlob: "aaac",
+ }
+
+ var afnbs []*models.ArtifactAndBlob
+ afnbs = append(afnbs, afnb1)
+ afnbs = append(afnbs, afnb2)
+ afnbs = append(afnbs, afnb3)
+
+ err := AddArtifactNBlobs(afnbs)
+ require.Nil(t, err)
+
+ blob1 := &models.Blob{
+ Digest: "aaaa",
+ ContentType: "v2.blob",
+ Size: 100,
+ }
+
+ _, err = AddBlob(blob1)
+ require.Nil(t, err)
+
+ blob2 := &models.Blob{
+ Digest: "aaab",
+ ContentType: "v2.blob",
+ Size: 200,
+ }
+
+ _, err = AddBlob(blob2)
+ require.Nil(t, err)
+
+ blob3 := &models.Blob{
+ Digest: "aaac",
+ ContentType: "v2.blob",
+ Size: 300,
+ }
+
+ _, err = AddBlob(blob3)
+ require.Nil(t, err)
+
+ imageSize, err := CountSizeOfArtifact("xxxx")
+ require.Nil(t, err)
+ require.Equal(t, imageSize, int64(600))
+}
diff --git a/src/common/dao/artifact_test.go b/src/common/dao/artifact_test.go
new file mode 100644
index 000000000..a7889375c
--- /dev/null
+++ b/src/common/dao/artifact_test.go
@@ -0,0 +1,184 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddArtifact(t *testing.T) {
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "latest",
+ Digest: "1234abcd",
+ Kind: "image",
+ }
+
+ // add
+ id, err := AddArtifact(af)
+ require.Nil(t, err)
+ af.ID = id
+ assert.Equal(t, id, int64(1))
+
+}
+
+func TestGetArtifact(t *testing.T) {
+ repo := "hello-world"
+ tag := "latest"
+ artifact, err := GetArtifact(repo, tag)
+ require.Nil(t, err)
+ require.NotNil(t, artifact)
+ assert.Equal(t, repo, artifact.Repo)
+ assert.Equal(t, tag, artifact.Tag)
+}
+
+func TestUpdateArtifactDigest(t *testing.T) {
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "v2.0",
+ Digest: "4321abcd",
+ Kind: "image",
+ }
+
+ // add
+ _, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ af.Digest = "update_4321abcd"
+ require.Nil(t, UpdateArtifactDigest(af))
+ assert.Equal(t, af.Digest, "update_4321abcd")
+}
+
+func TestUpdateArtifactPullTime(t *testing.T) {
+ timeNow := time.Now()
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "TestUpdateArtifactPullTime",
+ Tag: "v1.0",
+ Digest: "4321abcd",
+ Kind: "image",
+ PullTime: timeNow,
+ }
+
+ // add
+ _, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ time.Sleep(time.Second * 1)
+
+ af.PullTime = time.Now()
+ require.Nil(t, UpdateArtifactPullTime(af))
+ assert.NotEqual(t, timeNow, af.PullTime)
+}
+
+func TestDeleteArtifact(t *testing.T) {
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "v1.0",
+ Digest: "1234abcd",
+ Kind: "image",
+ }
+ // add
+ id, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ // delete
+ err = DeleteArtifact(id)
+ require.Nil(t, err)
+}
+
+func TestDeleteArtifactByDigest(t *testing.T) {
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "v1.1",
+ Digest: "TestDeleteArtifactByDigest",
+ Kind: "image",
+ }
+ // add
+ _, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ // delete
+ err = DeleteArtifactByDigest(af.PID, af.Repo, af.Digest)
+ require.Nil(t, err)
+}
+
+func TestDeleteArtifactByTag(t *testing.T) {
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "v1.2",
+ Digest: "TestDeleteArtifactByTag",
+ Kind: "image",
+ }
+ // add
+ _, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ // delete
+ err = DeleteArtifactByTag(1, "hello-world", "v1.2")
+ require.Nil(t, err)
+}
+
+func TestListArtifacts(t *testing.T) {
+ af := &models.Artifact{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "v3.0",
+ Digest: "TestListArtifacts",
+ Kind: "image",
+ }
+ // add
+ _, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ afs, err := ListArtifacts(&models.ArtifactQuery{
+ PID: 1,
+ Repo: "hello-world",
+ Tag: "v3.0",
+ })
+ require.Nil(t, err)
+ assert.Equal(t, 1, len(afs))
+}
+
+func TestGetTotalOfArtifacts(t *testing.T) {
+ af := &models.Artifact{
+ PID: 2,
+ Repo: "hello-world",
+ Tag: "v3.0",
+ Digest: "TestGetTotalOfArtifacts",
+ Kind: "image",
+ }
+ // add
+ _, err := AddArtifact(af)
+ require.Nil(t, err)
+
+ total, err := GetTotalOfArtifacts(&models.ArtifactQuery{
+ PID: 2,
+ Repo: "hello-world",
+ Tag: "v3.0",
+ })
+ require.Nil(t, err)
+ assert.Equal(t, int64(1), total)
+}
diff --git a/src/common/dao/base.go b/src/common/dao/base.go
index 3e04867da..43ded29ef 100644
--- a/src/common/dao/base.go
+++ b/src/common/dao/base.go
@@ -121,12 +121,16 @@ func getDatabase(database *models.Database) (db Database, err error) {
switch database.Type {
case "", "postgresql":
- db = NewPGSQL(database.PostGreSQL.Host,
+ db = NewPGSQL(
+ database.PostGreSQL.Host,
strconv.Itoa(database.PostGreSQL.Port),
database.PostGreSQL.Username,
database.PostGreSQL.Password,
database.PostGreSQL.Database,
- database.PostGreSQL.SSLMode)
+ database.PostGreSQL.SSLMode,
+ database.PostGreSQL.MaxIdleConns,
+ database.PostGreSQL.MaxOpenConns,
+ )
default:
err = fmt.Errorf("invalid database: %s", database.Type)
}
@@ -139,6 +143,8 @@ var once sync.Once
// GetOrmer :set ormer singleton
func GetOrmer() orm.Ormer {
once.Do(func() {
+ // override the default value(1000) to return all records when setting no limit
+ orm.DefaultRowsLimit = -1
globalOrm = orm.NewOrm()
})
return globalOrm
@@ -167,11 +173,13 @@ func ClearTable(table string) error {
return err
}
-func paginateForRawSQL(sql string, limit, offset int64) string {
+// PaginateForRawSQL ...
+func PaginateForRawSQL(sql string, limit, offset int64) string {
return fmt.Sprintf("%s limit %d offset %d", sql, limit, offset)
}
-func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter {
+// PaginateForQuerySetter ...
+func PaginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter {
if size > 0 {
qs = qs.Limit(size)
if page > 0 {
@@ -183,7 +191,34 @@ func paginateForQuerySetter(qs orm.QuerySeter, page, size int64) orm.QuerySeter
// Escape ..
func Escape(str string) string {
+ str = strings.Replace(str, `\`, `\\`, -1)
str = strings.Replace(str, `%`, `\%`, -1)
str = strings.Replace(str, `_`, `\_`, -1)
return str
}
+
+// WithTransaction helper for transaction
+func WithTransaction(handler func(o orm.Ormer) error) error {
+ o := orm.NewOrm()
+
+ if err := o.Begin(); err != nil {
+ log.Errorf("begin transaction failed: %v", err)
+ return err
+ }
+
+ if err := handler(o); err != nil {
+ if e := o.Rollback(); e != nil {
+ log.Errorf("rollback transaction failed: %v", e)
+ return e
+ }
+
+ return err
+ }
+
+ if err := o.Commit(); err != nil {
+ log.Errorf("commit transaction failed: %v", err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/common/dao/blob.go b/src/common/dao/blob.go
new file mode 100644
index 000000000..ddcca42e1
--- /dev/null
+++ b/src/common/dao/blob.go
@@ -0,0 +1,136 @@
+package dao
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+)
+
+// AddBlob ...
+func AddBlob(blob *models.Blob) (int64, error) {
+ now := time.Now()
+ blob.CreationTime = now
+ id, err := GetOrmer().Insert(blob)
+ if err != nil {
+ if strings.Contains(err.Error(), "duplicate key value violates unique constraint") {
+ return 0, ErrDupRows
+ }
+ return 0, err
+ }
+ return id, nil
+}
+
+// GetOrCreateBlob returns blob by digest, create it if not exists
+func GetOrCreateBlob(blob *models.Blob) (bool, *models.Blob, error) {
+ blob.CreationTime = time.Now()
+
+ created, id, err := GetOrmer().ReadOrCreate(blob, "digest")
+ if err != nil {
+ return false, nil, err
+ }
+
+ blob.ID = id
+
+ return created, blob, nil
+}
+
+// GetBlob ...
+func GetBlob(digest string) (*models.Blob, error) {
+ o := GetOrmer()
+ qs := o.QueryTable(&models.Blob{})
+ qs = qs.Filter("Digest", digest)
+ b := []*models.Blob{}
+ _, err := qs.All(&b)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get blob for digest %s, error: %v", digest, err)
+ }
+ if len(b) == 0 {
+ log.Infof("No blob found for digest %s, returning empty.", digest)
+ return &models.Blob{}, nil
+ } else if len(b) > 1 {
+ log.Infof("Multiple blob found for digest %s", digest)
+ return &models.Blob{}, fmt.Errorf("Multiple blob found for digest %s", digest)
+ }
+ return b[0], nil
+}
+
+// DeleteBlob ...
+func DeleteBlob(digest string) error {
+ o := GetOrmer()
+ _, err := o.QueryTable("blob").Filter("digest", digest).Delete()
+ return err
+}
+
+// GetBlobsByArtifact returns blobs of artifact
+func GetBlobsByArtifact(artifactDigest string) ([]*models.Blob, error) {
+ sql := `SELECT * FROM blob WHERE digest IN (SELECT digest_blob FROM artifact_blob WHERE digest_af = ?)`
+
+ var blobs []*models.Blob
+ if _, err := GetOrmer().Raw(sql, artifactDigest).QueryRows(&blobs); err != nil {
+ return nil, err
+ }
+
+ return blobs, nil
+}
+
+// GetExclusiveBlobs returns layers of repository:tag which are not shared with other repositories in the project
+func GetExclusiveBlobs(projectID int64, repository, digest string) ([]*models.Blob, error) {
+ blobs, err := GetBlobsByArtifact(digest)
+ if err != nil {
+ return nil, err
+ }
+
+ sql := fmt.Sprintf(`
+SELECT
+ DISTINCT b.digest_blob AS digest
+FROM
+ (
+ SELECT
+ digest
+ FROM
+ artifact
+ WHERE
+ (
+ project_id = ?
+ AND repo != ?
+ )
+ OR (
+ project_id = ?
+ AND digest != ?
+ )
+ ) AS a
+ LEFT JOIN artifact_blob b ON a.digest = b.digest_af
+ AND b.digest_blob IN (%s)`, ParamPlaceholderForIn(len(blobs)-1))
+
+ params := []interface{}{projectID, repository, projectID, digest}
+ for _, blob := range blobs {
+ if blob.Digest != digest {
+ params = append(params, blob.Digest)
+ }
+ }
+
+ var rows []struct {
+ Digest string
+ }
+
+ if _, err := GetOrmer().Raw(sql, params...).QueryRows(&rows); err != nil {
+ return nil, err
+ }
+
+ shared := map[string]bool{}
+ for _, row := range rows {
+ shared[row.Digest] = true
+ }
+
+ var exclusive []*models.Blob
+ for _, blob := range blobs {
+ if blob.Digest != digest && !shared[blob.Digest] {
+ exclusive = append(exclusive, blob)
+ }
+ }
+
+ return exclusive, nil
+}
diff --git a/src/common/dao/blob_test.go b/src/common/dao/blob_test.go
new file mode 100644
index 000000000..26dc5e492
--- /dev/null
+++ b/src/common/dao/blob_test.go
@@ -0,0 +1,222 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+func TestAddBlob(t *testing.T) {
+ blob := &models.Blob{
+ Digest: "1234abcd",
+ ContentType: "v2.blob",
+ Size: 1523,
+ }
+
+ // add
+ _, err := AddBlob(blob)
+ require.Nil(t, err)
+}
+
+func TestGetBlob(t *testing.T) {
+ blob := &models.Blob{
+ Digest: "12345abcde",
+ ContentType: "v2.blob",
+ Size: 453,
+ }
+
+ // add
+ id, err := AddBlob(blob)
+ require.Nil(t, err)
+ blob.ID = id
+
+ blob2, err := GetBlob("12345abcde")
+ require.Nil(t, err)
+ assert.Equal(t, blob.Digest, blob2.Digest)
+
+}
+
+func TestDeleteBlob(t *testing.T) {
+ blob := &models.Blob{
+ Digest: "123456abcdef",
+ ContentType: "v2.blob",
+ Size: 4543,
+ }
+ id, err := AddBlob(blob)
+ require.Nil(t, err)
+ blob.ID = id
+ err = DeleteBlob(blob.Digest)
+ require.Nil(t, err)
+}
+
+func prepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) (string, error) {
+ digest := digest.FromString(strings.Join(layerDigests, ":")).String()
+ artifact := &models.Artifact{PID: projectID, Repo: projectName + "/" + name, Digest: digest, Tag: tag}
+ if _, err := AddArtifact(artifact); err != nil {
+ return "", err
+ }
+
+ var afnbs []*models.ArtifactAndBlob
+
+ blobDigests := append([]string{digest}, layerDigests...)
+ for _, blobDigest := range blobDigests {
+ blob := &models.Blob{Digest: blobDigest, Size: 1}
+ if _, _, err := GetOrCreateBlob(blob); err != nil {
+ return "", err
+ }
+
+ afnbs = append(afnbs, &models.ArtifactAndBlob{DigestAF: digest, DigestBlob: blobDigest})
+ }
+
+ total, err := GetTotalOfArtifacts(&models.ArtifactQuery{Digest: digest})
+ if err != nil {
+ return "", err
+ }
+
+ if total == 1 {
+ if err := AddArtifactNBlobs(afnbs); err != nil {
+ return "", err
+ }
+ }
+
+ return digest, nil
+}
+
+func withProject(f func(int64, string)) {
+ projectName := utils.GenerateRandomString()
+
+ projectID, err := AddProject(models.Project{
+ Name: projectName,
+ OwnerID: 1,
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ defer func() {
+ DeleteProject(projectID)
+ }()
+
+ f(projectID, projectName)
+}
+
+type GetExclusiveBlobsSuite struct {
+ suite.Suite
+}
+
+func (suite *GetExclusiveBlobsSuite) mustPrepareImage(projectID int64, projectName, name, tag string, layerDigests ...string) string {
+ digest, err := prepareImage(projectID, projectName, name, tag, layerDigests...)
+ suite.Nil(err)
+
+ return digest
+}
+
+func (suite *GetExclusiveBlobsSuite) TestInSameRepository() {
+ withProject(func(projectID int64, projectName string) {
+ digest1 := digest.FromString(utils.GenerateRandomString()).String()
+ digest2 := digest.FromString(utils.GenerateRandomString()).String()
+ digest3 := digest.FromString(utils.GenerateRandomString()).String()
+
+ manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 2)
+ }
+
+ manifest2 := suite.mustPrepareImage(projectID, projectName, "mysql", "8.0", digest1, digest2)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
+ suite.Len(blobs, 2)
+ }
+
+ manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 0)
+ }
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
+ suite.Len(blobs, 0)
+ }
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
+ suite.Len(blobs, 1)
+ suite.Equal(digest3, blobs[0].Digest)
+ }
+ })
+}
+
+func (suite *GetExclusiveBlobsSuite) TestInDifferentRepositories() {
+ withProject(func(projectID int64, projectName string) {
+ digest1 := digest.FromString(utils.GenerateRandomString()).String()
+ digest2 := digest.FromString(utils.GenerateRandomString()).String()
+ digest3 := digest.FromString(utils.GenerateRandomString()).String()
+
+ manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 2)
+ }
+
+ manifest2 := suite.mustPrepareImage(projectID, projectName, "mariadb", "latest", digest1, digest2)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 0)
+ }
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mariadb", manifest2); suite.Nil(err) {
+ suite.Len(blobs, 0)
+ }
+
+ manifest3 := suite.mustPrepareImage(projectID, projectName, "mysql", "dev", digest1, digest2, digest3)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 0)
+ }
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest2); suite.Nil(err) {
+ suite.Len(blobs, 0)
+ }
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest3); suite.Nil(err) {
+ suite.Len(blobs, 1)
+ suite.Equal(digest3, blobs[0].Digest)
+ }
+ })
+}
+
+func (suite *GetExclusiveBlobsSuite) TestInDifferentProjects() {
+ withProject(func(projectID int64, projectName string) {
+ digest1 := digest.FromString(utils.GenerateRandomString()).String()
+ digest2 := digest.FromString(utils.GenerateRandomString()).String()
+
+ manifest1 := suite.mustPrepareImage(projectID, projectName, "mysql", "latest", digest1, digest2)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 2)
+ }
+
+ withProject(func(id int64, name string) {
+ manifest2 := suite.mustPrepareImage(id, name, "mysql", "latest", digest1, digest2)
+ if blobs, err := GetExclusiveBlobs(projectID, projectName+"/mysql", manifest1); suite.Nil(err) {
+ suite.Len(blobs, 2)
+ }
+ if blobs, err := GetExclusiveBlobs(id, name+"/mysql", manifest2); suite.Nil(err) {
+ suite.Len(blobs, 2)
+ }
+ })
+
+ })
+}
+
+func TestRunGetExclusiveBlobsSuite(t *testing.T) {
+ suite.Run(t, new(GetExclusiveBlobsSuite))
+}
diff --git a/src/common/dao/config.go b/src/common/dao/config.go
index 65ec6e195..eea49cb30 100644
--- a/src/common/dao/config.go
+++ b/src/common/dao/config.go
@@ -54,7 +54,7 @@ func GetConfigEntries() ([]*models.ConfigEntry, error) {
func SaveConfigEntries(entries []models.ConfigEntry) error {
o := GetOrmer()
for _, entry := range entries {
- if entry.Key == common.LdapGroupAdminDn {
+ if entry.Key == common.LDAPGroupAdminDn {
entry.Value = utils.TrimLower(entry.Value)
}
tempEntry := models.ConfigEntry{}
diff --git a/src/common/dao/cve_whitelist.go b/src/common/dao/cve_whitelist.go
new file mode 100644
index 000000000..645a1c076
--- /dev/null
+++ b/src/common/dao/cve_whitelist.go
@@ -0,0 +1,64 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+)
+
+// CreateCVEWhitelist creates the CVE whitelist
+func CreateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
+ o := GetOrmer()
+ itemsBytes, _ := json.Marshal(l.Items)
+ l.ItemsText = string(itemsBytes)
+ return o.Insert(&l)
+}
+
+// UpdateCVEWhitelist Updates the vulnerability white list to DB
+func UpdateCVEWhitelist(l models.CVEWhitelist) (int64, error) {
+ o := GetOrmer()
+ itemsBytes, _ := json.Marshal(l.Items)
+ l.ItemsText = string(itemsBytes)
+ id, err := o.InsertOrUpdate(&l, "project_id")
+ return id, err
+}
+
+// GetCVEWhitelist Gets the CVE whitelist of the project based on the project ID in parameter
+func GetCVEWhitelist(pid int64) (*models.CVEWhitelist, error) {
+ o := GetOrmer()
+ qs := o.QueryTable(&models.CVEWhitelist{})
+ qs = qs.Filter("ProjectID", pid)
+ r := []*models.CVEWhitelist{}
+ _, err := qs.All(&r)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get CVE whitelist for project %d, error: %v", pid, err)
+ }
+ if len(r) == 0 {
+ return nil, nil
+ } else if len(r) > 1 {
+ log.Infof("Multiple CVE whitelists found for project %d, length: %d, returning first element.", pid, len(r))
+ }
+ items := []models.CVEWhitelistItem{}
+ err = json.Unmarshal([]byte(r[0].ItemsText), &items)
+ if err != nil {
+ log.Errorf("Failed to decode item list, err: %v, text: %s", err, r[0].ItemsText)
+ return nil, err
+ }
+ r[0].Items = items
+ return r[0], nil
+}
diff --git a/src/common/dao/cve_whitelist_test.go b/src/common/dao/cve_whitelist_test.go
new file mode 100644
index 000000000..099409de5
--- /dev/null
+++ b/src/common/dao/cve_whitelist_test.go
@@ -0,0 +1,55 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "testing"
+)
+
+func TestUpdateAndGetCVEWhitelist(t *testing.T) {
+ require.Nil(t, ClearTable("cve_whitelist"))
+ l2, err := GetCVEWhitelist(5)
+ assert.Nil(t, err)
+ assert.Nil(t, l2)
+
+ longList := []models.CVEWhitelistItem{}
+ for i := 0; i < 50; i++ {
+ longList = append(longList, models.CVEWhitelistItem{CVEID: "CVE-1999-0067"})
+ }
+
+ e := int64(1573254000)
+ in1 := models.CVEWhitelist{ProjectID: 3, Items: longList, ExpiresAt: &e}
+ _, err = UpdateCVEWhitelist(in1)
+ require.Nil(t, err)
+ // assert.Equal(t, int64(1), n)
+ out1, err := GetCVEWhitelist(3)
+ require.Nil(t, err)
+ assert.Equal(t, int64(3), out1.ProjectID)
+ assert.Equal(t, longList, out1.Items)
+ assert.Equal(t, e, *out1.ExpiresAt)
+
+ sysCVEs := []models.CVEWhitelistItem{
+ {CVEID: "CVE-2019-10164"},
+ {CVEID: "CVE-2017-12345"},
+ }
+ in3 := models.CVEWhitelist{Items: sysCVEs}
+ _, err = UpdateCVEWhitelist(in3)
+ require.Nil(t, err)
+
+ require.Nil(t, ClearTable("cve_whitelist"))
+}
diff --git a/src/common/dao/dao_test.go b/src/common/dao/dao_test.go
index 646634226..bc070245a 100644
--- a/src/common/dao/dao_test.go
+++ b/src/common/dao/dao_test.go
@@ -47,8 +47,8 @@ func cleanByUser(username string) {
o := GetOrmer()
o.Begin()
- err = execUpdate(o, `delete
- from project_member
+ err = execUpdate(o, `delete
+ from project_member
where entity_id = (
select user_id
from harbor_user
@@ -59,7 +59,7 @@ func cleanByUser(username string) {
log.Error(err)
}
- err = execUpdate(o, `delete
+ err = execUpdate(o, `delete
from project_member
where project_id = (
select project_id
@@ -71,8 +71,8 @@ func cleanByUser(username string) {
log.Error(err)
}
- err = execUpdate(o, `delete
- from access_log
+ err = execUpdate(o, `delete
+ from access_log
where username = ?
`, username)
if err != nil {
@@ -80,7 +80,7 @@ func cleanByUser(username string) {
log.Error(err)
}
- err = execUpdate(o, `delete
+ err = execUpdate(o, `delete
from access_log
where project_id = (
select project_id
@@ -302,9 +302,6 @@ func TestListUsers(t *testing.T) {
if err != nil {
t.Errorf("Error occurred in ListUsers: %v", err)
}
- if len(users) != 1 {
- t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
- }
users2, err := ListUsers(&models.UserQuery{Username: username})
if len(users2) != 1 {
t.Errorf("Expect one user in list, but the acutal length is %d, the list: %+v", len(users), users)
@@ -1035,3 +1032,53 @@ func TestIsDupRecError(t *testing.T) {
assert.True(t, isDupRecErr(fmt.Errorf("pq: duplicate key value violates unique constraint \"properties_k_key\"")))
assert.False(t, isDupRecErr(fmt.Errorf("other error")))
}
+
+func TestWithTransaction(t *testing.T) {
+ reference := "transaction"
+
+ quota := models.Quota{
+ Reference: reference,
+ ReferenceID: "1",
+ Hard: "{}",
+ }
+
+ failed := func(o orm.Ormer) error {
+ o.Insert("a)
+
+ return fmt.Errorf("failed")
+ }
+
+ var quotaID int64
+ success := func(o orm.Ormer) error {
+ id, err := o.Insert("a)
+ if err != nil {
+ return err
+ }
+
+ quotaID = id
+ return nil
+ }
+
+ assert := assert.New(t)
+
+ if assert.Error(WithTransaction(failed)) {
+ var quota models.Quota
+ quota.Reference = reference
+ quota.ReferenceID = "1"
+ err := GetOrmer().Read("a, "reference", "reference_id")
+ assert.Error(err)
+ assert.False(quota.ID != 0)
+ }
+
+ if assert.Nil(WithTransaction(success)) {
+ var quota models.Quota
+ quota.Reference = reference
+ quota.ReferenceID = "1"
+ err := GetOrmer().Read("a, "reference", "reference_id")
+ assert.Nil(err)
+ assert.True(quota.ID != 0)
+ assert.Equal(quotaID, quota.ID)
+
+ GetOrmer().Delete(&models.Quota{ID: quotaID}, "id")
+ }
+}
diff --git a/src/common/dao/group/usergroup.go b/src/common/dao/group/usergroup.go
index e0aa1d226..a6eedfec1 100644
--- a/src/common/dao/group/usergroup.go
+++ b/src/common/dao/group/usergroup.go
@@ -18,23 +18,35 @@ import (
"strings"
"time"
- "github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/utils"
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/pkg/errors"
)
+// ErrGroupNameDup ...
+var ErrGroupNameDup = errors.New("duplicated user group name")
+
// AddUserGroup - Add User Group
func AddUserGroup(userGroup models.UserGroup) (int, error) {
+ userGroupList, err := QueryUserGroup(models.UserGroup{GroupName: userGroup.GroupName, GroupType: common.HTTPGroupType})
+ if err != nil {
+ return 0, ErrGroupNameDup
+ }
+ if len(userGroupList) > 0 {
+ return 0, ErrGroupNameDup
+ }
o := dao.GetOrmer()
-
sql := "insert into user_group (group_name, group_type, ldap_group_dn, creation_time, update_time) values (?, ?, ?, ?, ?) RETURNING id"
var id int
now := time.Now()
- err := o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id)
+ err = o.Raw(sql, userGroup.GroupName, userGroup.GroupType, utils.TrimLower(userGroup.LdapGroupDN), now, now).QueryRow(&id)
if err != nil {
return 0, err
}
@@ -47,10 +59,10 @@ func QueryUserGroup(query models.UserGroup) ([]*models.UserGroup, error) {
o := dao.GetOrmer()
sql := `select id, group_name, group_type, ldap_group_dn from user_group where 1=1 `
sqlParam := make([]interface{}, 1)
- groups := []*models.UserGroup{}
+ var groups []*models.UserGroup
if len(query.GroupName) != 0 {
- sql += ` and group_name like ? `
- sqlParam = append(sqlParam, `%`+dao.Escape(query.GroupName)+`%`)
+ sql += ` and group_name = ? `
+ sqlParam = append(sqlParam, query.GroupName)
}
if query.GroupType != 0 {
@@ -86,6 +98,27 @@ func GetUserGroup(id int) (*models.UserGroup, error) {
return nil, nil
}
+// GetGroupIDByGroupName - Return the group ID by given group name. it is possible less group ID than the given group name if some group doesn't exist.
+func GetGroupIDByGroupName(groupName []string, groupType int) ([]int, error) {
+ var retGroupID []int
+ var conditions []string
+ if len(groupName) == 0 {
+ return retGroupID, nil
+ }
+ for _, gName := range groupName {
+ con := "'" + gName + "'"
+ conditions = append(conditions, con)
+ }
+ sql := fmt.Sprintf("select id from user_group where group_name in ( %s ) and group_type = %v", strings.Join(conditions, ","), groupType)
+ o := dao.GetOrmer()
+ cnt, err := o.Raw(sql).QueryRows(&retGroupID)
+ if err != nil {
+ return retGroupID, err
+ }
+ log.Debugf("Found rows %v", cnt)
+ return retGroupID, nil
+}
+
// DeleteUserGroup ...
func DeleteUserGroup(id int) error {
userGroup := models.UserGroup{ID: id}
@@ -111,11 +144,7 @@ func UpdateUserGroupName(id int, groupName string) error {
return err
}
-// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and
-// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile.
-// This is used for ldap and uaa authentication, such the usergroup can have an ID in Harbor.
-// the keyAttribute and combinedKeyAttribute are key columns used to check duplicate usergroup in harbor
-func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error {
+func onBoardCommonUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttributes ...string) error {
g.LdapGroupDN = utils.TrimLower(g.LdapGroupDN)
o := dao.GetOrmer()
@@ -140,19 +169,11 @@ func OnBoardUserGroup(g *models.UserGroup, keyAttribute string, combinedKeyAttri
return nil
}
-// GetGroupDNQueryCondition get the part of IN ('XXX', 'XXX') condition
-func GetGroupDNQueryCondition(userGroupList []*models.UserGroup) string {
- result := make([]string, 0)
- count := 0
- for _, userGroup := range userGroupList {
- if userGroup.GroupType == common.LdapGroupType {
- result = append(result, "'"+userGroup.LdapGroupDN+"'")
- count++
- }
+// OnBoardUserGroup will check if a usergroup exists in usergroup table, if not insert the usergroup and
+// put the id in the pointer of usergroup model, if it does exist, return the usergroup's profile.
+func OnBoardUserGroup(g *models.UserGroup) error {
+ if g.GroupType == common.LDAPGroupType {
+ return onBoardCommonUserGroup(g, "LdapGroupDN", "GroupType")
}
- // No LDAP Group found
- if count == 0 {
- return ""
- }
- return strings.Join(result, ",")
+ return onBoardCommonUserGroup(g, "GroupName", "GroupType")
}
diff --git a/src/common/dao/group/usergroup_test.go b/src/common/dao/group/usergroup_test.go
index 91603e64d..2b7952ef9 100644
--- a/src/common/dao/group/usergroup_test.go
+++ b/src/common/dao/group/usergroup_test.go
@@ -17,6 +17,7 @@ package group
import (
"fmt"
"os"
+ "reflect"
"testing"
"github.com/goharbor/harbor/src/common"
@@ -46,20 +47,30 @@ func TestMain(m *testing.M) {
// Extract to test utils
initSqls := []string{
"insert into harbor_user (username, email, password, realname) values ('member_test_01', 'member_test_01@example.com', '123456', 'member_test_01')",
+ "insert into harbor_user (username, email, password, realname) values ('grouptestu09', 'grouptestu09@example.com', '123456', 'grouptestu09')",
"insert into project (name, owner_id) values ('member_test_01', 1)",
+ `insert into project (name, owner_id) values ('group_project2', 1)`,
+ `insert into project (name, owner_id) values ('group_project_private', 1)`,
"insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')",
+ "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_http_group', 2, '')",
+ "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_myhttp_group', 2, '')",
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
+ "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_http_group'), 'g', 4)",
+ "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_myhttp_group'), 'g', 4)",
}
clearSqls := []string{
"delete from project where name='member_test_01'",
- "delete from harbor_user where username='member_test_01' or username='pm_sample'",
+ "delete from project where name='group_project2'",
+ "delete from project where name='group_project_private'",
+ "delete from harbor_user where username='member_test_01' or username='pm_sample' or username='grouptestu09'",
"delete from user_group",
"delete from project_member",
}
- dao.PrepareTestData(clearSqls, initSqls)
+ dao.ExecuteBatchSQL(initSqls)
+ defer dao.ExecuteBatchSQL(clearSqls)
result = m.Run()
@@ -80,7 +91,7 @@ func TestAddUserGroup(t *testing.T) {
want int
wantErr bool
}{
- {"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LdapGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
+ {"Insert an ldap user group", args{userGroup: models.UserGroup{GroupName: "sample_group", GroupType: common.LDAPGroupType, LdapGroupDN: "sample_ldap_dn_string"}}, 0, false},
{"Insert other user group", args{userGroup: models.UserGroup{GroupName: "other_group", GroupType: 3, LdapGroupDN: "other information"}}, 0, false},
}
for _, tt := range tests {
@@ -108,8 +119,8 @@ func TestQueryUserGroup(t *testing.T) {
wantErr bool
}{
{"Query all user group", args{query: models.UserGroup{GroupName: "test_group_01"}}, 1, false},
- {"Query all ldap group", args{query: models.UserGroup{GroupType: common.LdapGroupType}}, 2, false},
- {"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LdapGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
+ {"Query all ldap group", args{query: models.UserGroup{GroupType: common.LDAPGroupType}}, 2, false},
+ {"Query ldap group with group property", args{query: models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com"}}, 1, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
@@ -126,7 +137,7 @@ func TestQueryUserGroup(t *testing.T) {
}
func TestGetUserGroup(t *testing.T) {
- userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LdapGroupType, LdapGroupDN: "ldap_dn_string"}
+ userGroup := models.UserGroup{GroupName: "insert_group", GroupType: common.LDAPGroupType, LdapGroupDN: "ldap_dn_string"}
result, err := AddUserGroup(userGroup)
if err != nil {
t.Errorf("Error occurred when AddUserGroup: %v", err)
@@ -175,7 +186,7 @@ func TestUpdateUserGroup(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- fmt.Printf("id=%v", createdUserGroupID)
+ fmt.Printf("id=%v\n", createdUserGroupID)
if err := UpdateUserGroupName(tt.args.id, tt.args.groupName); (err != nil) != tt.wantErr {
t.Errorf("UpdateUserGroup() error = %v, wantErr %v", err, tt.wantErr)
userGroup, err := GetUserGroup(tt.args.id)
@@ -231,65 +242,30 @@ func TestOnBoardUserGroup(t *testing.T) {
args{g: &models.UserGroup{
GroupName: "harbor_example",
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
- GroupType: common.LdapGroupType}},
+ GroupType: common.LDAPGroupType}},
false},
{"OnBoardUserGroup second time",
args{g: &models.UserGroup{
GroupName: "harbor_example",
LdapGroupDN: "cn=harbor_example,ou=groups,dc=example,dc=com",
- GroupType: common.LdapGroupType}},
+ GroupType: common.LDAPGroupType}},
+ false},
+ {"OnBoardUserGroup HTTP user group",
+ args{g: &models.UserGroup{
+ GroupName: "test_myhttp_group",
+ GroupType: common.HTTPGroupType}},
false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- if err := OnBoardUserGroup(tt.args.g, "LdapGroupDN", "GroupType"); (err != nil) != tt.wantErr {
+ if err := OnBoardUserGroup(tt.args.g); (err != nil) != tt.wantErr {
t.Errorf("OnBoardUserGroup() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
-func TestGetGroupDNQueryCondition(t *testing.T) {
- userGroupList := []*models.UserGroup{
- {
- GroupName: "sample1",
- GroupType: 1,
- LdapGroupDN: "cn=sample1_users,ou=groups,dc=example,dc=com",
- },
- {
- GroupName: "sample2",
- GroupType: 1,
- LdapGroupDN: "cn=sample2_users,ou=groups,dc=example,dc=com",
- },
- {
- GroupName: "sample3",
- GroupType: 0,
- LdapGroupDN: "cn=sample3_users,ou=groups,dc=example,dc=com",
- },
- }
-
- groupQueryConditions := GetGroupDNQueryCondition(userGroupList)
- expectedConditions := `'cn=sample1_users,ou=groups,dc=example,dc=com','cn=sample2_users,ou=groups,dc=example,dc=com'`
- if groupQueryConditions != expectedConditions {
- t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", expectedConditions, groupQueryConditions)
- }
- var userGroupList2 []*models.UserGroup
- groupQueryCondition2 := GetGroupDNQueryCondition(userGroupList2)
- if len(groupQueryCondition2) > 0 {
- t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition2)
- }
- groupQueryCondition3 := GetGroupDNQueryCondition(nil)
- if len(groupQueryCondition3) > 0 {
- t.Errorf("Failed to GetGroupDNQueryCondition, expected %v, actual %v", "", groupQueryCondition3)
- }
-}
func TestGetGroupProjects(t *testing.T) {
- userID, err := dao.Register(models.User{
- Username: "grouptestu09",
- Email: "grouptest09@example.com",
- Password: "Harbor123456",
- })
- defer dao.DeleteUser(int(userID))
projectID1, err := dao.AddProject(models.Project{
Name: "grouptest01",
OwnerID: 1,
@@ -307,7 +283,7 @@ func TestGetGroupProjects(t *testing.T) {
}
defer dao.DeleteProject(projectID2)
groupID, err := AddUserGroup(models.UserGroup{
- GroupName: "test_group_01",
+ GroupName: "test_group_03",
GroupType: 1,
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
})
@@ -322,8 +298,7 @@ func TestGetGroupProjects(t *testing.T) {
})
defer project.DeleteProjectMemberByID(pmid)
type args struct {
- groupDNCondition string
- query *models.ProjectQueryParam
+ query *models.ProjectQueryParam
}
member := &models.MemberQuery{
Name: "grouptestu09",
@@ -335,19 +310,17 @@ func TestGetGroupProjects(t *testing.T) {
wantErr bool
}{
{"Query with group DN",
- args{"'cn=harbor_users,ou=groups,dc=example,dc=com'",
- &models.ProjectQueryParam{
- Member: member,
- }},
+ args{&models.ProjectQueryParam{
+ Member: member,
+ }},
1, false},
{"Query without group DN",
- args{"",
- &models.ProjectQueryParam{}},
+ args{&models.ProjectQueryParam{}},
1, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := dao.GetGroupProjects(tt.args.groupDNCondition, tt.args.query)
+ got, err := dao.GetGroupProjects([]int{groupID}, tt.args.query)
if (err != nil) != tt.wantErr {
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -377,7 +350,7 @@ func TestGetTotalGroupProjects(t *testing.T) {
}
defer dao.DeleteProject(projectID2)
groupID, err := AddUserGroup(models.UserGroup{
- GroupName: "test_group_01",
+ GroupName: "test_group_05",
GroupType: 1,
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
})
@@ -392,8 +365,7 @@ func TestGetTotalGroupProjects(t *testing.T) {
})
defer project.DeleteProjectMemberByID(pmid)
type args struct {
- groupDNCondition string
- query *models.ProjectQueryParam
+ query *models.ProjectQueryParam
}
tests := []struct {
name string
@@ -401,18 +373,16 @@ func TestGetTotalGroupProjects(t *testing.T) {
wantSize int
wantErr bool
}{
- {"Query with group DN",
- args{"'cn=harbor_users,ou=groups,dc=example,dc=com'",
- &models.ProjectQueryParam{}},
+ {"Query with group ID",
+ args{&models.ProjectQueryParam{}},
1, false},
- {"Query without group DN",
- args{"",
- &models.ProjectQueryParam{}},
+ {"Query without group ID",
+ args{&models.ProjectQueryParam{}},
1, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := dao.GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query)
+ got, err := dao.GetTotalGroupProjects([]int{groupID}, tt.args.query)
if (err != nil) != tt.wantErr {
t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
return
@@ -423,3 +393,90 @@ func TestGetTotalGroupProjects(t *testing.T) {
})
}
}
+func TestGetRolesByLDAPGroup(t *testing.T) {
+
+ userGroupList, err := QueryUserGroup(models.UserGroup{LdapGroupDN: "cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com", GroupType: 1})
+ if err != nil || len(userGroupList) < 1 {
+ t.Errorf("failed to query user group, err %v", err)
+ }
+ gl2, err2 := GetGroupIDByGroupName([]string{"test_http_group", "test_myhttp_group"}, common.HTTPGroupType)
+ if err2 != nil || len(gl2) != 2 {
+ t.Errorf("failed to query http user group, err %v", err)
+ }
+ project, err := dao.GetProjectByName("member_test_01")
+ if err != nil {
+ t.Errorf("Error occurred when Get project by name: %v", err)
+ }
+ privateProject, err := dao.GetProjectByName("group_project_private")
+ if err != nil {
+ t.Errorf("Error occurred when Get project by name: %v", err)
+ }
+
+ type args struct {
+ projectID int64
+ groupIDs []int
+ }
+ tests := []struct {
+ name string
+ args args
+ wantSize int
+ wantErr bool
+ }{
+ {"Check normal", args{projectID: project.ProjectID, groupIDs: []int{userGroupList[0].ID, gl2[0], gl2[1]}}, 2, false},
+ {"Check non exist", args{projectID: privateProject.ProjectID, groupIDs: []int{9999}}, 0, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := dao.GetRolesByGroupID(tt.args.projectID, tt.args.groupIDs)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if len(got) != tt.wantSize {
+ t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize)
+ }
+ })
+ }
+}
+
+func TestGetGroupIDByGroupName(t *testing.T) {
+ groupList, err := QueryUserGroup(models.UserGroup{GroupName: "test_http_group", GroupType: 2})
+ if err != nil {
+ t.Error(err)
+ }
+ if len(groupList) < 0 {
+ t.Error(err)
+ }
+ groupList2, err := QueryUserGroup(models.UserGroup{GroupName: "test_myhttp_group", GroupType: 2})
+ if err != nil {
+ t.Error(err)
+ }
+ if len(groupList2) < 0 {
+ t.Error(err)
+ }
+ var expectGroupID []int
+ type args struct {
+ groupName []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want []int
+ wantErr bool
+ }{
+ {"empty query", args{groupName: []string{}}, expectGroupID, false},
+ {"normal query", args{groupName: []string{"test_http_group", "test_myhttp_group"}}, []int{groupList[0].ID, groupList2[0].ID}, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := GetGroupIDByGroupName(tt.args.groupName, common.HTTPGroupType)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetHTTPGroupIDByGroupName() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("GetHTTPGroupIDByGroupName() = %#v, want %#v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/common/dao/notification/notification_job.go b/src/common/dao/notification/notification_job.go
new file mode 100755
index 000000000..1bd8c5039
--- /dev/null
+++ b/src/common/dao/notification/notification_job.go
@@ -0,0 +1,122 @@
+package notification
+
+import (
+ "fmt"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/pkg/errors"
+)
+
+// UpdateNotificationJob update notification job
+func UpdateNotificationJob(job *models.NotificationJob, props ...string) (int64, error) {
+ if job == nil {
+ return 0, errors.New("nil job")
+ }
+
+ if job.ID == 0 {
+ return 0, fmt.Errorf("notification job ID is empty")
+ }
+
+ o := dao.GetOrmer()
+ return o.Update(job, props...)
+}
+
+// AddNotificationJob insert new notification job to DB
+func AddNotificationJob(job *models.NotificationJob) (int64, error) {
+ if job == nil {
+ return 0, errors.New("nil job")
+ }
+ o := dao.GetOrmer()
+ if len(job.Status) == 0 {
+ job.Status = models.JobPending
+ }
+ return o.Insert(job)
+}
+
+// GetNotificationJob ...
+func GetNotificationJob(id int64) (*models.NotificationJob, error) {
+ o := dao.GetOrmer()
+ j := &models.NotificationJob{
+ ID: id,
+ }
+ err := o.Read(j)
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return j, nil
+}
+
+// GetTotalCountOfNotificationJobs ...
+func GetTotalCountOfNotificationJobs(query ...*models.NotificationJobQuery) (int64, error) {
+ qs := notificationJobQueryConditions(query...)
+ return qs.Count()
+}
+
+// GetNotificationJobs ...
+func GetNotificationJobs(query ...*models.NotificationJobQuery) ([]*models.NotificationJob, error) {
+ var jobs []*models.NotificationJob
+
+ qs := notificationJobQueryConditions(query...)
+ if len(query) > 0 && query[0] != nil {
+ qs = dao.PaginateForQuerySetter(qs, query[0].Page, query[0].Size)
+ }
+
+ qs = qs.OrderBy("-UpdateTime")
+
+ _, err := qs.All(&jobs)
+ return jobs, err
+}
+
+// GetLastTriggerJobsGroupByEventType get notification jobs info of policy, including event type and last trigger time
+func GetLastTriggerJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) {
+ o := dao.GetOrmer()
+ // get jobs last triggered(created) group by event_type. postgres group by usage reference:
+ // https://stackoverflow.com/questions/13325583/postgresql-max-and-group-by
+ sql := `select distinct on (event_type) event_type, id, creation_time, status, notify_type, job_uuid, update_time,
+ creation_time, job_detail from notification_job where policy_id = ?
+ order by event_type, id desc, creation_time, status, notify_type, job_uuid, update_time, creation_time, job_detail`
+
+ jobs := []*models.NotificationJob{}
+ _, err := o.Raw(sql, policyID).QueryRows(&jobs)
+ if err != nil {
+ log.Errorf("query last trigger info group by event type failed: %v", err)
+ return nil, err
+ }
+
+ return jobs, nil
+}
+
+// DeleteNotificationJob ...
+func DeleteNotificationJob(id int64) error {
+ o := dao.GetOrmer()
+ _, err := o.Delete(&models.NotificationJob{ID: id})
+ return err
+}
+
+// DeleteAllNotificationJobsByPolicyID ...
+func DeleteAllNotificationJobsByPolicyID(policyID int64) (int64, error) {
+ o := dao.GetOrmer()
+ return o.Delete(&models.NotificationJob{PolicyID: policyID}, "policy_id")
+}
+
+func notificationJobQueryConditions(query ...*models.NotificationJobQuery) orm.QuerySeter {
+ qs := dao.GetOrmer().QueryTable(&models.NotificationJob{})
+ if len(query) == 0 || query[0] == nil {
+ return qs
+ }
+
+ q := query[0]
+ if q.PolicyID != 0 {
+ qs = qs.Filter("PolicyID", q.PolicyID)
+ }
+ if len(q.Statuses) > 0 {
+ qs = qs.Filter("Status__in", q.Statuses)
+ }
+ if len(q.EventTypes) > 0 {
+ qs = qs.Filter("EventType__in", q.EventTypes)
+ }
+ return qs
+}
diff --git a/src/common/dao/notification/notification_job_test.go b/src/common/dao/notification/notification_job_test.go
new file mode 100644
index 000000000..0f7b97750
--- /dev/null
+++ b/src/common/dao/notification/notification_job_test.go
@@ -0,0 +1,263 @@
+package notification
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ testJob1 = &models.NotificationJob{
+ PolicyID: 1111,
+ EventType: "pushImage",
+ NotifyType: "http",
+ Status: "pending",
+ JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563536782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
+ UUID: "00000000",
+ }
+ testJob2 = &models.NotificationJob{
+ PolicyID: 111,
+ EventType: "pullImage",
+ NotifyType: "http",
+ Status: "",
+ JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563537782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
+ UUID: "00000000",
+ }
+ testJob3 = &models.NotificationJob{
+ PolicyID: 111,
+ EventType: "deleteImage",
+ NotifyType: "http",
+ Status: "pending",
+ JobDetail: "{\"type\":\"pushImage\",\"occur_at\":1563538782,\"event_data\":{\"resources\":[{\"digest\":\"sha256:bf1684a6e3676389ec861c602e97f27b03f14178e5bc3f70dce198f9f160cce9\",\"tag\":\"v1.0\",\"resource_url\":\"10.194.32.23/myproj/alpine:v1.0\"}],\"repository\":{\"date_created\":1563505587,\"name\":\"alpine\",\"namespace\":\"myproj\",\"repo_full_name\":\"myproj/alpine\",\"repo_type\":\"private\"}},\"operator\":\"admin\"}",
+ UUID: "00000000",
+ }
+)
+
+func TestAddNotificationJob(t *testing.T) {
+ tests := []struct {
+ name string
+ job *models.NotificationJob
+ want int64
+ wantErr bool
+ }{
+ {name: "AddNotificationJob nil", job: nil, wantErr: true},
+ {name: "AddNotificationJob 1", job: testJob1, want: 1},
+ {name: "AddNotificationJob 2", job: testJob2, want: 2},
+ {name: "AddNotificationJob 3", job: testJob3, want: 3},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := AddNotificationJob(tt.job)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func TestGetTotalCountOfNotificationJobs(t *testing.T) {
+ type args struct {
+ query *models.NotificationJobQuery
+ }
+ tests := []struct {
+ name string
+ args args
+ want int64
+ wantErr bool
+ }{
+ {
+ name: "GetTotalCountOfNotificationJobs 1",
+ args: args{
+ query: &models.NotificationJobQuery{
+ PolicyID: 111,
+ },
+ },
+ want: 2,
+ },
+ {
+ name: "GetTotalCountOfNotificationJobs 2",
+ args: args{},
+ want: 3,
+ },
+ {
+ name: "GetTotalCountOfNotificationJobs 3",
+ args: args{
+ query: &models.NotificationJobQuery{
+ Statuses: []string{"pending"},
+ },
+ },
+ want: 3,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := GetTotalCountOfNotificationJobs(tt.args.query)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func TestGetLastTriggerJobsGroupByEventType(t *testing.T) {
+ type args struct {
+ policyID int64
+ }
+ tests := []struct {
+ name string
+ args args
+ want []*models.NotificationJob
+ wantErr bool
+ }{
+ {
+ name: "GetLastTriggerJobsGroupByEventType",
+ args: args{
+ policyID: 111,
+ },
+ want: []*models.NotificationJob{
+ testJob2,
+ testJob3,
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := GetLastTriggerJobsGroupByEventType(tt.args.policyID)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, len(tt.want), len(got))
+ })
+ }
+
+}
+
+func TestUpdateNotificationJob(t *testing.T) {
+ type args struct {
+ job *models.NotificationJob
+ props []string
+ }
+ tests := []struct {
+ name string
+ args args
+ want int64
+ wantErr bool
+ }{
+ {name: "UpdateNotificationJob Want Error 1", args: args{job: nil}, wantErr: true},
+ {name: "UpdateNotificationJob Want Error 2", args: args{job: &models.NotificationJob{ID: 0}}, wantErr: true},
+ {
+ name: "UpdateNotificationJob 1",
+ args: args{
+ job: &models.NotificationJob{ID: 1, UUID: "111111111111111"},
+ props: []string{"UUID"},
+ },
+ },
+ {
+ name: "UpdateNotificationJob 2",
+ args: args{
+ job: &models.NotificationJob{ID: 2, UUID: "222222222222222"},
+ props: []string{"UUID"},
+ },
+ },
+ {
+ name: "UpdateNotificationJob 3",
+ args: args{
+ job: &models.NotificationJob{ID: 3, UUID: "333333333333333"},
+ props: []string{"UUID"},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := UpdateNotificationJob(tt.args.job, tt.args.props...)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+
+ require.Nil(t, err)
+ gotJob, err := GetNotificationJob(tt.args.job.ID)
+
+ require.Nil(t, err)
+ assert.Equal(t, tt.args.job.UUID, gotJob.UUID)
+ })
+ }
+}
+
+func TestDeleteNotificationJob(t *testing.T) {
+ type args struct {
+ id int64
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {name: "DeleteNotificationJob 1", args: args{id: 1}},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := DeleteNotificationJob(tt.args.id)
+
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+
+ require.Nil(t, err)
+ job, err := GetNotificationJob(tt.args.id)
+
+ require.Nil(t, err)
+ assert.Nil(t, job)
+ })
+ }
+}
+
+func TestDeleteAllNotificationJobs(t *testing.T) {
+ type args struct {
+ policyID int64
+ query []*models.NotificationJobQuery
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "DeleteAllNotificationJobs 1",
+ args: args{
+ policyID: 111,
+ query: []*models.NotificationJobQuery{
+ {PolicyID: 111},
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := DeleteAllNotificationJobsByPolicyID(tt.args.policyID)
+
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+
+ require.Nil(t, err)
+ jobs, err := GetNotificationJobs(tt.args.query...)
+
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(jobs))
+ })
+ }
+}
diff --git a/src/common/dao/notification/notification_policy.go b/src/common/dao/notification/notification_policy.go
new file mode 100755
index 000000000..58bf8a52c
--- /dev/null
+++ b/src/common/dao/notification/notification_policy.go
@@ -0,0 +1,69 @@
+package notification
+
+import (
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/pkg/errors"
+)
+
+// GetNotificationPolicy return notification policy by id
+func GetNotificationPolicy(id int64) (*models.NotificationPolicy, error) {
+ policy := new(models.NotificationPolicy)
+ o := dao.GetOrmer()
+ err := o.QueryTable(policy).Filter("id", id).One(policy)
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return policy, err
+}
+
+// GetNotificationPolicyByName return notification policy by name
+func GetNotificationPolicyByName(name string, projectID int64) (*models.NotificationPolicy, error) {
+ policy := new(models.NotificationPolicy)
+ o := dao.GetOrmer()
+ err := o.QueryTable(policy).Filter("name", name).Filter("projectID", projectID).One(policy)
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return policy, err
+}
+
+// GetNotificationPolicies returns all notification policy in project
+func GetNotificationPolicies(projectID int64) ([]*models.NotificationPolicy, error) {
+ var policies []*models.NotificationPolicy
+ qs := dao.GetOrmer().QueryTable(new(models.NotificationPolicy)).Filter("ProjectID", projectID)
+
+ _, err := qs.All(&policies)
+ if err != nil {
+ return nil, err
+ }
+ return policies, nil
+
+}
+
+// AddNotificationPolicy insert new notification policy to DB
+func AddNotificationPolicy(policy *models.NotificationPolicy) (int64, error) {
+ if policy == nil {
+ return 0, errors.New("nil policy")
+ }
+ o := dao.GetOrmer()
+ return o.Insert(policy)
+}
+
+// UpdateNotificationPolicy update t specified notification policy
+func UpdateNotificationPolicy(policy *models.NotificationPolicy) error {
+ if policy == nil {
+ return errors.New("nil policy")
+ }
+ o := dao.GetOrmer()
+ _, err := o.Update(policy)
+ return err
+}
+
+// DeleteNotificationPolicy delete notification policy by id
+func DeleteNotificationPolicy(id int64) error {
+ o := dao.GetOrmer()
+ _, err := o.Delete(&models.NotificationPolicy{ID: id})
+ return err
+}
diff --git a/src/common/dao/notification/notification_policy_test.go b/src/common/dao/notification/notification_policy_test.go
new file mode 100644
index 000000000..756a01c7d
--- /dev/null
+++ b/src/common/dao/notification/notification_policy_test.go
@@ -0,0 +1,291 @@
+package notification
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ testPly1 = &models.NotificationPolicy{
+ Name: "webhook test policy1",
+ Description: "webhook test policy1 description",
+ ProjectID: 111,
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
+ Creator: "no one",
+ CreationTime: time.Now(),
+ UpdateTime: time.Now(),
+ Enabled: true,
+ }
+)
+
+var (
+ testPly2 = &models.NotificationPolicy{
+ Name: "webhook test policy2",
+ Description: "webhook test policy2 description",
+ ProjectID: 222,
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
+ Creator: "no one",
+ CreationTime: time.Now(),
+ UpdateTime: time.Now(),
+ Enabled: true,
+ }
+)
+
+var (
+ testPly3 = &models.NotificationPolicy{
+ Name: "webhook test policy3",
+ Description: "webhook test policy3 description",
+ ProjectID: 333,
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
+ Creator: "no one",
+ CreationTime: time.Now(),
+ UpdateTime: time.Now(),
+ Enabled: true,
+ }
+)
+
+func TestAddNotificationPolicy(t *testing.T) {
+ tests := []struct {
+ name string
+ policy *models.NotificationPolicy
+ want int64
+ wantErr bool
+ }{
+ {name: "AddNotificationPolicy nil", policy: nil, wantErr: true},
+ {name: "AddNotificationPolicy 1", policy: testPly1, want: 1},
+ {name: "AddNotificationPolicy 2", policy: testPly2, want: 2},
+ {name: "AddNotificationPolicy 3", policy: testPly3, want: 3},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := AddNotificationPolicy(tt.policy)
+
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.want, got)
+ })
+ }
+}
+
+func TestGetNotificationPolicies(t *testing.T) {
+ tests := []struct {
+ name string
+ projectID int64
+ wantPolicies []*models.NotificationPolicy
+ wantErr bool
+ }{
+ {name: "GetNotificationPolicies nil", projectID: 0, wantPolicies: []*models.NotificationPolicy{}},
+ {name: "GetNotificationPolicies 1", projectID: 111, wantPolicies: []*models.NotificationPolicy{testPly1}},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotPolicies, err := GetNotificationPolicies(tt.projectID)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+
+ require.Nil(t, err)
+ for i, gotPolicy := range gotPolicies {
+ assert.Equal(t, tt.wantPolicies[i].Name, gotPolicy.Name)
+ assert.Equal(t, tt.wantPolicies[i].ID, gotPolicy.ID)
+ assert.Equal(t, tt.wantPolicies[i].EventTypesDB, gotPolicy.EventTypesDB)
+ assert.Equal(t, tt.wantPolicies[i].TargetsDB, gotPolicy.TargetsDB)
+ assert.Equal(t, tt.wantPolicies[i].Creator, gotPolicy.Creator)
+ assert.Equal(t, tt.wantPolicies[i].Enabled, gotPolicy.Enabled)
+ assert.Equal(t, tt.wantPolicies[i].Description, gotPolicy.Description)
+ }
+ })
+ }
+}
+
+func TestGetNotificationPolicy(t *testing.T) {
+ tests := []struct {
+ name string
+ id int64
+ wantPolicy *models.NotificationPolicy
+ wantErr bool
+ }{
+ {name: "GetRepPolicy 1", id: 1, wantPolicy: testPly1},
+ {name: "GetRepPolicy 2", id: 2, wantPolicy: testPly2},
+ {name: "GetRepPolicy 3", id: 3, wantPolicy: testPly3},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotPolicy, err := GetNotificationPolicy(tt.id)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name)
+ assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID)
+ assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB)
+ assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB)
+ assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator)
+ assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled)
+ assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description)
+ })
+ }
+}
+
+func TestGetNotificationPolicyByName(t *testing.T) {
+ type args struct {
+ name string
+ projectID int64
+ }
+ tests := []struct {
+ name string
+ args args
+ wantPolicy *models.NotificationPolicy
+ wantErr bool
+ }{
+ {name: "GetNotificationPolicyByName 1", args: args{name: testPly1.Name, projectID: testPly1.ProjectID}, wantPolicy: testPly1},
+ {name: "GetNotificationPolicyByName 2", args: args{name: testPly2.Name, projectID: testPly2.ProjectID}, wantPolicy: testPly2},
+ {name: "GetNotificationPolicyByName 3", args: args{name: testPly3.Name, projectID: testPly3.ProjectID}, wantPolicy: testPly3},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotPolicy, err := GetNotificationPolicyByName(tt.args.name, tt.args.projectID)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.wantPolicy.Name, gotPolicy.Name)
+ assert.Equal(t, tt.wantPolicy.ID, gotPolicy.ID)
+ assert.Equal(t, tt.wantPolicy.EventTypesDB, gotPolicy.EventTypesDB)
+ assert.Equal(t, tt.wantPolicy.TargetsDB, gotPolicy.TargetsDB)
+ assert.Equal(t, tt.wantPolicy.Creator, gotPolicy.Creator)
+ assert.Equal(t, tt.wantPolicy.Enabled, gotPolicy.Enabled)
+ assert.Equal(t, tt.wantPolicy.Description, gotPolicy.Description)
+ })
+ }
+
+}
+
+func TestUpdateNotificationPolicy(t *testing.T) {
+ type args struct {
+ policy *models.NotificationPolicy
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "UpdateNotificationPolicy nil",
+ args: args{
+ policy: nil,
+ },
+ wantErr: true,
+ },
+
+ {
+ name: "UpdateNotificationPolicy 1",
+ args: args{
+ policy: &models.NotificationPolicy{
+ ID: 1,
+ Name: "webhook test policy1 new",
+ Description: "webhook test policy1 description new",
+ ProjectID: 111,
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
+ Creator: "no one",
+ CreationTime: time.Now(),
+ UpdateTime: time.Now(),
+ Enabled: true,
+ },
+ },
+ },
+ {
+ name: "UpdateNotificationPolicy 2",
+ args: args{
+ policy: &models.NotificationPolicy{
+ ID: 2,
+ Name: "webhook test policy2 new",
+ Description: "webhook test policy2 description new",
+ ProjectID: 222,
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
+ Creator: "no one",
+ CreationTime: time.Now(),
+ UpdateTime: time.Now(),
+ Enabled: true,
+ },
+ },
+ },
+ {
+ name: "UpdateNotificationPolicy 3",
+ args: args{
+ policy: &models.NotificationPolicy{
+ ID: 3,
+ Name: "webhook test policy3 new",
+ Description: "webhook test policy3 description new",
+ ProjectID: 333,
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\",\"token\":\"xxxxxxxxx\",\"skip_cert_verify\":true}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\",\"uploadChart\",\"deleteChart\",\"downloadChart\",\"scanningFailed\",\"scanningCompleted\"]",
+ Creator: "no one",
+ CreationTime: time.Now(),
+ UpdateTime: time.Now(),
+ Enabled: true,
+ },
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := UpdateNotificationPolicy(tt.args.policy)
+
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+
+ require.Nil(t, err)
+ gotPolicy, err := GetNotificationPolicy(tt.args.policy.ID)
+
+ require.Nil(t, err)
+ assert.Equal(t, tt.args.policy.Description, gotPolicy.Description)
+ assert.Equal(t, tt.args.policy.Name, gotPolicy.Name)
+ })
+ }
+
+}
+
+func TestDeleteNotificationPolicy(t *testing.T) {
+ tests := []struct {
+ name string
+ id int64
+ wantErr bool
+ }{
+ {name: "DeleteNotificationPolicy 1", id: 1, wantErr: false},
+ {name: "DeleteNotificationPolicy 2", id: 2, wantErr: false},
+ {name: "DeleteNotificationPolicy 3", id: 3, wantErr: false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := DeleteNotificationPolicy(tt.id)
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ policy, err := GetNotificationPolicy(tt.id)
+ require.Nil(t, err)
+ assert.Nil(t, policy)
+ })
+ }
+}
diff --git a/src/common/dao/notification/notification_test.go b/src/common/dao/notification/notification_test.go
new file mode 100644
index 000000000..2912e75f9
--- /dev/null
+++ b/src/common/dao/notification/notification_test.go
@@ -0,0 +1,13 @@
+package notification
+
+import (
+ "os"
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/dao"
+)
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+ os.Exit(m.Run())
+}
diff --git a/src/common/dao/pgsql.go b/src/common/dao/pgsql.go
index e1b3da6cb..bf98c6b08 100644
--- a/src/common/dao/pgsql.go
+++ b/src/common/dao/pgsql.go
@@ -31,12 +31,14 @@ import (
const defaultMigrationPath = "migrations/postgresql/"
type pgsql struct {
- host string
- port string
- usr string
- pwd string
- database string
- sslmode string
+ host string
+ port string
+ usr string
+ pwd string
+ database string
+ sslmode string
+ maxIdleConns int
+ maxOpenConns int
}
// Name returns the name of PostgreSQL
@@ -51,17 +53,19 @@ func (p *pgsql) String() string {
}
// NewPGSQL returns an instance of postgres
-func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string) Database {
+func NewPGSQL(host string, port string, usr string, pwd string, database string, sslmode string, maxIdleConns int, maxOpenConns int) Database {
if len(sslmode) == 0 {
sslmode = "disable"
}
return &pgsql{
- host: host,
- port: port,
- usr: usr,
- pwd: pwd,
- database: database,
- sslmode: sslmode,
+ host: host,
+ port: port,
+ usr: usr,
+ pwd: pwd,
+ database: database,
+ sslmode: sslmode,
+ maxIdleConns: maxIdleConns,
+ maxOpenConns: maxOpenConns,
}
}
@@ -82,7 +86,7 @@ func (p *pgsql) Register(alias ...string) error {
info := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
p.host, p.port, p.usr, p.pwd, p.database, p.sslmode)
- return orm.RegisterDataBase(an, "postgres", info)
+ return orm.RegisterDataBase(an, "postgres", info, p.maxIdleConns, p.maxOpenConns)
}
// UpgradeSchema calls migrate tool to upgrade schema to the latest based on the SQL scripts.
diff --git a/src/common/dao/pro_meta.go b/src/common/dao/pro_meta.go
index d4a9c4e6f..a6593e2ef 100644
--- a/src/common/dao/pro_meta.go
+++ b/src/common/dao/pro_meta.go
@@ -44,7 +44,7 @@ func DeleteProjectMetadata(projectID int64, name ...string) error {
params = append(params, projectID)
if len(name) > 0 {
- sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
+ sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@@ -74,7 +74,7 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
params = append(params, projectID)
if len(name) > 0 {
- sql += fmt.Sprintf(` and name in ( %s )`, paramPlaceholder(len(name)))
+ sql += fmt.Sprintf(` and name in ( %s )`, ParamPlaceholderForIn(len(name)))
params = append(params, name)
}
@@ -82,7 +82,9 @@ func GetProjectMetadata(projectID int64, name ...string) ([]*models.ProjectMetad
return proMetas, err
}
-func paramPlaceholder(n int) string {
+// ParamPlaceholderForIn returns a string that contains placeholders for sql keyword "in"
+// e.g. n=3, returns "?,?,?"
+func ParamPlaceholderForIn(n int) string {
placeholders := []string{}
for i := 0; i < n; i++ {
placeholders = append(placeholders, "?")
diff --git a/src/common/dao/project.go b/src/common/dao/project.go
index 423b6b23b..e027ec221 100644
--- a/src/common/dao/project.go
+++ b/src/common/dao/project.go
@@ -156,19 +156,21 @@ func GetProjects(query *models.ProjectQueryParam) ([]*models.Project, error) {
// GetGroupProjects - Get user's all projects, including user is the user member of this project
// and the user is in the group which is a group member of this project.
-func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) ([]*models.Project, error) {
+func GetGroupProjects(groupIDs []int, query *models.ProjectQueryParam) ([]*models.Project, error) {
sql, params := projectQueryConditions(query)
sql = `select distinct p.project_id, p.name, p.owner_id,
p.creation_time, p.update_time ` + sql
- if len(groupDNCondition) > 0 {
+ groupIDCondition := JoinNumberConditions(groupIDs)
+ if len(groupIDs) > 0 {
sql = fmt.Sprintf(
`%s union select distinct p.project_id, p.name, p.owner_id, p.creation_time, p.update_time
from project p
left join project_member pm on p.project_id = pm.project_id
- left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1
- where ug.ldap_group_dn in ( %s ) order by name`,
- sql, groupDNCondition)
+ left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
+ where ug.id in ( %s )`,
+ sql, groupIDCondition)
}
+ sql = sql + ` order by name`
sqlStr, queryParams := CreatePagination(query, sql, params)
log.Debugf("query sql:%v", sql)
var projects []*models.Project
@@ -178,10 +180,11 @@ func GetGroupProjects(groupDNCondition string, query *models.ProjectQueryParam)
// GetTotalGroupProjects - Get the total count of projects, including user is the member of this project and the
// user is in the group, which is the group member of this project.
-func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryParam) (int, error) {
+func GetTotalGroupProjects(groupIDs []int, query *models.ProjectQueryParam) (int, error) {
var sql string
sqlCondition, params := projectQueryConditions(query)
- if len(groupDNCondition) == 0 {
+ groupIDCondition := JoinNumberConditions(groupIDs)
+ if len(groupIDs) == 0 {
sql = `select count(1) ` + sqlCondition
} else {
sql = fmt.Sprintf(
@@ -189,9 +192,9 @@ func GetTotalGroupProjects(groupDNCondition string, query *models.ProjectQueryPa
from ( select p.project_id %s union select p.project_id
from project p
left join project_member pm on p.project_id = pm.project_id
- left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g' and ug.group_type = 1
- where ug.ldap_group_dn in ( %s )) t`,
- sqlCondition, groupDNCondition)
+ left join user_group ug on ug.id = pm.entity_id and pm.entity_type = 'g'
+ where ug.id in ( %s )) t`,
+ sqlCondition, groupIDCondition)
}
log.Debugf("query sql:%v", sql)
var count int
@@ -257,7 +260,7 @@ func projectQueryConditions(query *models.ProjectQueryParam) (string, []interfac
}
if len(query.ProjectIDs) > 0 {
sql += fmt.Sprintf(` and p.project_id in ( %s )`,
- paramPlaceholder(len(query.ProjectIDs)))
+ ParamPlaceholderForIn(len(query.ProjectIDs)))
params = append(params, query.ProjectIDs)
}
return sql, params
@@ -291,29 +294,24 @@ func DeleteProject(id int64) error {
return err
}
-// GetRolesByLDAPGroup - Get Project roles of the
-// specified group DN is a member of current project
-func GetRolesByLDAPGroup(projectID int64, groupDNCondition string) ([]int, error) {
+// GetRolesByGroupID - Get Project roles of the
+// specified group is a member of current project
+func GetRolesByGroupID(projectID int64, groupIDs []int) ([]int, error) {
var roles []int
- if len(groupDNCondition) == 0 {
+ if len(groupIDs) == 0 {
return roles, nil
}
+ groupIDCondition := JoinNumberConditions(groupIDs)
o := GetOrmer()
- // Because an LDAP user can be memberof multiple groups,
- // the role is in descent order (1-admin, 2-developer, 3-guest, 4-master), use min to select the max privilege role.
sql := fmt.Sprintf(
- `select min(pm.role) from project_member pm
+ `select distinct pm.role from project_member pm
left join user_group ug on pm.entity_type = 'g' and pm.entity_id = ug.id
- where ug.ldap_group_dn in ( %s ) and pm.project_id = ? `,
- groupDNCondition)
- log.Debugf("sql:%v", sql)
+ where ug.id in ( %s ) and pm.project_id = ?`,
+ groupIDCondition)
+ log.Debugf("sql for GetRolesByGroupID(project ID: %d, group ids: %v):%v", projectID, groupIDs, sql)
if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil {
- log.Warningf("Error in GetRolesByLDAPGroup, error: %v", err)
+ log.Warningf("Error in GetRolesByGroupID, error: %v", err)
return nil, err
}
- // If there is no row selected, the min returns an empty row, to avoid return 0 as role
- if len(roles) == 1 && roles[0] == 0 {
- return []int{}, nil
- }
return roles, nil
}
diff --git a/src/common/dao/project/projectmember.go b/src/common/dao/project/projectmember.go
index f9a81e706..081b036f0 100644
--- a/src/common/dao/project/projectmember.go
+++ b/src/common/dao/project/projectmember.go
@@ -30,13 +30,13 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
}
o := dao.GetOrmer()
- sql := ` select a.* from (select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename,
- r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm
+ sql := ` select a.* from (select pm.id as id, pm.project_id as project_id, ug.id as entity_id, ug.group_name as entity_name, ug.creation_time, ug.update_time, r.name as rolename,
+ r.role_id as role, pm.entity_type as entity_type from user_group ug join project_member pm
on pm.project_id = ? and ug.id = pm.entity_id join role r on pm.role = r.role_id where pm.entity_type = 'g'
union
- select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
- r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm
- on pm.project_id = ? and u.user_id = pm.entity_id
+ select pm.id as id, pm.project_id as project_id, u.user_id as entity_id, u.username as entity_name, u.creation_time, u.update_time, r.name as rolename,
+ r.role_id as role, pm.entity_type as entity_type from harbor_user u join project_member pm
+ on pm.project_id = ? and u.user_id = pm.entity_id
join role r on pm.role = r.role_id where u.deleted = false and pm.entity_type = 'u') as a where a.project_id = ? `
queryParam := make([]interface{}, 1)
@@ -70,6 +70,27 @@ func GetProjectMember(queryMember models.Member) ([]*models.Member, error) {
return members, err
}
+// GetTotalOfProjectMembers returns total of project members
+func GetTotalOfProjectMembers(projectID int64, roles ...int) (int64, error) {
+ log.Debugf("Query condition %+v", projectID)
+ if projectID == 0 {
+ return 0, fmt.Errorf("failed to get total of project members, project id required %v", projectID)
+ }
+
+ sql := "SELECT COUNT(1) FROM project_member WHERE project_id = ?"
+
+ queryParam := []interface{}{projectID}
+
+ if len(roles) > 0 {
+ sql += " AND role = ?"
+ queryParam = append(queryParam, roles[0])
+ }
+
+ var count int64
+ err := dao.GetOrmer().Raw(sql, queryParam).QueryRow(&count)
+ return count, err
+}
+
// AddProjectMember inserts a record to table project_member
func AddProjectMember(member models.Member) (int, error) {
@@ -120,23 +141,23 @@ func DeleteProjectMemberByID(pmid int) error {
// SearchMemberByName search members of the project by entity_name
func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, error) {
o := dao.GetOrmer()
- sql := `select pm.id, pm.project_id,
- u.username as entity_name,
+ sql := `select pm.id, pm.project_id,
+ u.username as entity_name,
r.name as rolename,
- pm.role, pm.entity_id, pm.entity_type
+ pm.role, pm.entity_id, pm.entity_type
from project_member pm
left join harbor_user u on pm.entity_id = u.user_id and pm.entity_type = 'u'
left join role r on pm.role = r.role_id
- where u.deleted = false and pm.project_id = ? and u.username like ?
+ where u.deleted = false and pm.project_id = ? and u.username like ?
union
- select pm.id, pm.project_id,
- ug.group_name as entity_name,
+ select pm.id, pm.project_id,
+ ug.group_name as entity_name,
r.name as rolename,
- pm.role, pm.entity_id, pm.entity_type
+ pm.role, pm.entity_id, pm.entity_type
from project_member pm
left join user_group ug on pm.entity_id = ug.id and pm.entity_type = 'g'
left join role r on pm.role = r.role_id
- where pm.project_id = ? and ug.group_name like ?
+ where pm.project_id = ? and ug.group_name like ?
order by entity_name `
queryParam := make([]interface{}, 4)
queryParam = append(queryParam, projectID)
@@ -148,16 +169,3 @@ func SearchMemberByName(projectID int64, entityName string) ([]*models.Member, e
_, err := o.Raw(sql, queryParam).QueryRows(&members)
return members, err
}
-
-// GetRolesByGroup -- Query group roles
-func GetRolesByGroup(projectID int64, groupDNCondition string) []int {
- var roles []int
- o := dao.GetOrmer()
- sql := `select role from project_member pm
- left join user_group ug on pm.project_id = ?
- where ug.group_type = 1 and ug.ldap_group_dn in (` + groupDNCondition + `)`
- if _, err := o.Raw(sql, projectID).QueryRows(&roles); err != nil {
- return roles
- }
- return roles
-}
diff --git a/src/common/dao/project/projectmember_test.go b/src/common/dao/project/projectmember_test.go
index 66de3b6a8..fadb598b2 100644
--- a/src/common/dao/project/projectmember_test.go
+++ b/src/common/dao/project/projectmember_test.go
@@ -51,11 +51,18 @@ func TestMain(m *testing.M) {
"update project set owner_id = (select user_id from harbor_user where username = 'member_test_01') where name = 'member_test_01'",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select user_id from harbor_user where username = 'member_test_01'), 'u', 1)",
"insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_01') , (select id from user_group where group_name = 'test_group_01'), 'g', 1)",
+
+ "insert into harbor_user (username, email, password, realname) values ('member_test_02', 'member_test_02@example.com', '123456', 'member_test_02')",
+ "insert into project (name, owner_id) values ('member_test_02', 1)",
+ "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_02', 1, 'CN=harbor_users,OU=sample,OU=vmware,DC=harbor,DC=com')",
+ "update project set owner_id = (select user_id from harbor_user where username = 'member_test_02') where name = 'member_test_02'",
+ "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select user_id from harbor_user where username = 'member_test_02'), 'u', 1)",
+ "insert into project_member (project_id, entity_id, entity_type, role) values ( (select project_id from project where name = 'member_test_02') , (select id from user_group where group_name = 'test_group_02'), 'g', 1)",
}
clearSqls := []string{
- "delete from project where name='member_test_01'",
- "delete from harbor_user where username='member_test_01' or username='pm_sample'",
+ "delete from project where name='member_test_01' or name='member_test_02'",
+ "delete from harbor_user where username='member_test_01' or username='member_test_02' or username='pm_sample'",
"delete from user_group",
"delete from project_member",
}
@@ -285,6 +292,39 @@ func TestGetProjectMember(t *testing.T) {
}
}
+
+func TestGetTotalOfProjectMembers(t *testing.T) {
+ currentProject, _ := dao.GetProjectByName("member_test_02")
+
+ type args struct {
+ projectID int64
+ roles []int
+ }
+ tests := []struct {
+ name string
+ args args
+ want int64
+ wantErr bool
+ }{
+ {"Get total of project admin", args{currentProject.ProjectID, []int{common.RoleProjectAdmin}}, 2, false},
+ {"Get total of master", args{currentProject.ProjectID, []int{common.RoleMaster}}, 0, false},
+ {"Get total of developer", args{currentProject.ProjectID, []int{common.RoleDeveloper}}, 0, false},
+ {"Get total of guest", args{currentProject.ProjectID, []int{common.RoleGuest}}, 0, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := GetTotalOfProjectMembers(tt.args.projectID, tt.args.roles...)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("GetTotalOfProjectMembers() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if got != tt.want {
+ t.Errorf("GetTotalOfProjectMembers() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
func PrepareGroupTest() {
initSqls := []string{
`insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`,
@@ -305,30 +345,3 @@ func PrepareGroupTest() {
}
dao.PrepareTestData(clearSqls, initSqls)
}
-func TestGetRolesByGroup(t *testing.T) {
- PrepareGroupTest()
-
- project, err := dao.GetProjectByName("group_project")
- if err != nil {
- t.Errorf("Error occurred when GetProjectByName : %v", err)
- }
- type args struct {
- projectID int64
- groupDNCondition string
- }
- tests := []struct {
- name string
- args args
- want []int
- }{
- {"Query group with role", args{project.ProjectID, "'cn=harbor_user,dc=example,dc=com'"}, []int{2}},
- {"Query group no role", args{project.ProjectID, "'cn=another_user,dc=example,dc=com'"}, []int{}},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- if got := GetRolesByGroup(tt.args.projectID, tt.args.groupDNCondition); !dao.ArrayEqual(got, tt.want) {
- t.Errorf("GetRolesByGroup() = %v, want %v", got, tt.want)
- }
- })
- }
-}
diff --git a/src/common/dao/project_blob.go b/src/common/dao/project_blob.go
new file mode 100644
index 000000000..b6ade9938
--- /dev/null
+++ b/src/common/dao/project_blob.go
@@ -0,0 +1,122 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// AddBlobToProject ...
+func AddBlobToProject(blobID, projectID int64) (int64, error) {
+ pb := &models.ProjectBlob{
+ BlobID: blobID,
+ ProjectID: projectID,
+ CreationTime: time.Now(),
+ }
+
+ _, id, err := GetOrmer().ReadOrCreate(pb, "blob_id", "project_id")
+ return id, err
+}
+
+// AddBlobsToProject ...
+func AddBlobsToProject(projectID int64, blobs ...*models.Blob) (int64, error) {
+ if len(blobs) == 0 {
+ return 0, nil
+ }
+
+ now := time.Now()
+
+ var projectBlobs []*models.ProjectBlob
+ for _, blob := range blobs {
+ projectBlobs = append(projectBlobs, &models.ProjectBlob{
+ BlobID: blob.ID,
+ ProjectID: projectID,
+ CreationTime: now,
+ })
+ }
+
+ return GetOrmer().InsertMulti(len(projectBlobs), projectBlobs)
+}
+
+// RemoveBlobsFromProject ...
+func RemoveBlobsFromProject(projectID int64, blobs ...*models.Blob) error {
+ var blobIDs []interface{}
+ for _, blob := range blobs {
+ blobIDs = append(blobIDs, blob.ID)
+ }
+
+ if len(blobIDs) == 0 {
+ return nil
+ }
+
+ sql := fmt.Sprintf(`DELETE FROM project_blob WHERE blob_id IN (%s)`, ParamPlaceholderForIn(len(blobIDs)))
+
+ _, err := GetOrmer().Raw(sql, blobIDs).Exec()
+ return err
+}
+
+// HasBlobInProject ...
+func HasBlobInProject(projectID int64, digest string) (bool, error) {
+ sql := `SELECT COUNT(*) FROM project_blob JOIN blob ON project_blob.blob_id = blob.id AND project_id = ? AND digest = ?`
+
+ var count int64
+ if err := GetOrmer().Raw(sql, projectID, digest).QueryRow(&count); err != nil {
+ return false, err
+ }
+
+ return count > 0, nil
+}
+
+// GetBlobsNotInProject returns blobs not in project
+func GetBlobsNotInProject(projectID int64, blobDigests ...string) ([]*models.Blob, error) {
+ if len(blobDigests) == 0 {
+ return nil, nil
+ }
+
+ sql := fmt.Sprintf("SELECT * FROM blob WHERE id NOT IN (SELECT blob_id FROM project_blob WHERE project_id = ?) AND digest IN (%s)",
+ ParamPlaceholderForIn(len(blobDigests)))
+
+ params := []interface{}{projectID}
+ for _, digest := range blobDigests {
+ params = append(params, digest)
+ }
+
+ var blobs []*models.Blob
+ if _, err := GetOrmer().Raw(sql, params...).QueryRows(&blobs); err != nil {
+ return nil, err
+ }
+
+ return blobs, nil
+}
+
+// CountSizeOfProject ...
+func CountSizeOfProject(pid int64) (int64, error) {
+ var blobs []models.Blob
+
+ _, err := GetOrmer().Raw(`SELECT bb.id, bb.digest, bb.content_type, bb.size, bb.creation_time FROM project_blob pb LEFT JOIN blob bb ON pb.blob_id = bb.id WHERE pb.project_id = ? `, pid).QueryRows(&blobs)
+ if err != nil {
+ return 0, err
+ }
+
+ var size int64
+ for _, blob := range blobs {
+ size += blob.Size
+ }
+
+ return size, err
+}
diff --git a/src/common/dao/project_blob_test.go b/src/common/dao/project_blob_test.go
new file mode 100644
index 000000000..3d3643aee
--- /dev/null
+++ b/src/common/dao/project_blob_test.go
@@ -0,0 +1,68 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHasBlobInProject(t *testing.T) {
+ _, blob, err := GetOrCreateBlob(&models.Blob{
+ Digest: digest.FromString(utils.GenerateRandomString()).String(),
+ Size: 100,
+ })
+ require.Nil(t, err)
+
+ _, err = AddBlobToProject(blob.ID, 1)
+ require.Nil(t, err)
+
+ has, err := HasBlobInProject(1, blob.Digest)
+ require.Nil(t, err)
+ assert.True(t, has)
+}
+
+func TestCountSizeOfProject(t *testing.T) {
+ id1, err := AddBlob(&models.Blob{
+ Digest: "CountSizeOfProject_blob1",
+ Size: 101,
+ })
+ require.Nil(t, err)
+
+ id2, err := AddBlob(&models.Blob{
+ Digest: "CountSizeOfProject_blob2",
+ Size: 202,
+ })
+ require.Nil(t, err)
+
+ pid1, err := AddProject(models.Project{
+ Name: "CountSizeOfProject_project1",
+ OwnerID: 1,
+ })
+ require.Nil(t, err)
+
+ _, err = AddBlobToProject(id1, pid1)
+ require.Nil(t, err)
+ _, err = AddBlobToProject(id2, pid1)
+ require.Nil(t, err)
+
+ pSize, err := CountSizeOfProject(pid1)
+ assert.Equal(t, pSize, int64(303))
+}
diff --git a/src/common/dao/project_test.go b/src/common/dao/project_test.go
index 7358840b9..b35200047 100644
--- a/src/common/dao/project_test.go
+++ b/src/common/dao/project_test.go
@@ -118,124 +118,6 @@ func Test_projectQueryConditions(t *testing.T) {
}
}
-func TestGetGroupProjects(t *testing.T) {
- prepareGroupTest()
- query := &models.ProjectQueryParam{Member: &models.MemberQuery{Name: "sample_group"}}
- type args struct {
- groupDNCondition string
- query *models.ProjectQueryParam
- }
- tests := []struct {
- name string
- args args
- wantSize int
- wantErr bool
- }{
- {"Verify correct sql", args{groupDNCondition: "'cn=harbor_user,dc=example,dc=com'", query: query}, 1, false},
- {"Verify missed sql", args{groupDNCondition: "'cn=another_user,dc=example,dc=com'", query: query}, 0, false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := GetGroupProjects(tt.args.groupDNCondition, tt.args.query)
- if (err != nil) != tt.wantErr {
- t.Errorf("GetGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if len(got) != tt.wantSize {
- t.Errorf("GetGroupProjects() = %v, want %v", got, tt.wantSize)
- }
- })
- }
-}
-
-func prepareGroupTest() {
- initSqls := []string{
- `insert into user_group (group_name, group_type, ldap_group_dn) values ('harbor_group_01', 1, 'cn=harbor_user,dc=example,dc=com')`,
- `insert into harbor_user (username, email, password, realname) values ('sample01', 'sample01@example.com', 'harbor12345', 'sample01')`,
- `insert into project (name, owner_id) values ('group_project', 1)`,
- `insert into project (name, owner_id) values ('group_project_private', 1)`,
- `insert into project_metadata (project_id, name, value) values ((select project_id from project where name = 'group_project'), 'public', 'false')`,
- `insert into project_metadata (project_id, name, value) values ((select project_id from project where name = 'group_project_private'), 'public', 'false')`,
- `insert into project_member (project_id, entity_id, entity_type, role) values ((select project_id from project where name = 'group_project'), (select id from user_group where group_name = 'harbor_group_01'),'g', 2)`,
- }
-
- clearSqls := []string{
- `delete from project_metadata where project_id in (select project_id from project where name in ('group_project', 'group_project_private'))`,
- `delete from project where name in ('group_project', 'group_project_private')`,
- `delete from project_member where project_id in (select project_id from project where name in ('group_project', 'group_project_private'))`,
- `delete from user_group where group_name = 'harbor_group_01'`,
- `delete from harbor_user where username = 'sample01'`,
- }
- PrepareTestData(clearSqls, initSqls)
-}
-
-func TestGetTotalGroupProjects(t *testing.T) {
- prepareGroupTest()
- query := &models.ProjectQueryParam{Member: &models.MemberQuery{Name: "sample_group"}}
- type args struct {
- groupDNCondition string
- query *models.ProjectQueryParam
- }
- tests := []struct {
- name string
- args args
- want int
- wantErr bool
- }{
- {"Verify correct sql", args{groupDNCondition: "'cn=harbor_user,dc=example,dc=com'", query: query}, 1, false},
- {"Verify missed sql", args{groupDNCondition: "'cn=another_user,dc=example,dc=com'", query: query}, 0, false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := GetTotalGroupProjects(tt.args.groupDNCondition, tt.args.query)
- if (err != nil) != tt.wantErr {
- t.Errorf("GetTotalGroupProjects() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("GetTotalGroupProjects() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestGetRolesByLDAPGroup(t *testing.T) {
- prepareGroupTest()
- project, err := GetProjectByName("group_project")
- if err != nil {
- t.Errorf("Error occurred when Get project by name: %v", err)
- }
- privateProject, err := GetProjectByName("group_project_private")
- if err != nil {
- t.Errorf("Error occurred when Get project by name: %v", err)
- }
- type args struct {
- projectID int64
- groupDNCondition string
- }
- tests := []struct {
- name string
- args args
- wantSize int
- wantErr bool
- }{
- {"Check normal", args{project.ProjectID, "'cn=harbor_user,dc=example,dc=com'"}, 1, false},
- {"Check non exist", args{privateProject.ProjectID, "'cn=not_harbor_user,dc=example,dc=com'"}, 0, false},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- got, err := GetRolesByLDAPGroup(tt.args.projectID, tt.args.groupDNCondition)
- if (err != nil) != tt.wantErr {
- t.Errorf("TestGetRolesByLDAPGroup() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if len(got) != tt.wantSize {
- t.Errorf("TestGetRolesByLDAPGroup() = %v, want %v", len(got), tt.wantSize)
- }
- })
- }
-}
-
func TestProjetExistsByName(t *testing.T) {
name := "project_exist_by_name_test"
exist := ProjectExistsByName(name)
diff --git a/src/common/dao/quota.go b/src/common/dao/quota.go
new file mode 100644
index 000000000..c86c53797
--- /dev/null
+++ b/src/common/dao/quota.go
@@ -0,0 +1,235 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+var (
+ quotaOrderMap = map[string]string{
+ "creation_time": "b.creation_time asc",
+ "+creation_time": "b.creation_time asc",
+ "-creation_time": "b.creation_time desc",
+ "update_time": "b.update_time asc",
+ "+update_time": "b.update_time asc",
+ "-update_time": "b.update_time desc",
+ }
+)
+
+// AddQuota add quota to the database.
+func AddQuota(quota models.Quota) (int64, error) {
+ now := time.Now()
+ quota.CreationTime = now
+ quota.UpdateTime = now
+ return GetOrmer().Insert("a)
+}
+
+// GetQuota returns quota by id.
+func GetQuota(id int64) (*models.Quota, error) {
+ q := models.Quota{ID: id}
+ err := GetOrmer().Read(&q, "ID")
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return &q, err
+}
+
+// UpdateQuota update the quota.
+func UpdateQuota(quota models.Quota) error {
+ quota.UpdateTime = time.Now()
+ _, err := GetOrmer().Update("a)
+ return err
+}
+
+// Quota quota mode for api
+type Quota struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ Ref driver.RefObject `json:"ref"`
+ Reference string `orm:"column(reference)" json:"-"`
+ ReferenceID string `orm:"column(reference_id)" json:"-"`
+ Hard string `orm:"column(hard);type(jsonb)" json:"-"`
+ Used string `orm:"column(used);type(jsonb)" json:"-"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
+}
+
+// MarshalJSON ...
+func (q *Quota) MarshalJSON() ([]byte, error) {
+ hard, err := types.NewResourceList(q.Hard)
+ if err != nil {
+ return nil, err
+ }
+
+ used, err := types.NewResourceList(q.Used)
+ if err != nil {
+ return nil, err
+ }
+
+ type Alias Quota
+ return json.Marshal(&struct {
+ *Alias
+ Hard types.ResourceList `json:"hard"`
+ Used types.ResourceList `json:"used"`
+ }{
+ Alias: (*Alias)(q),
+ Hard: hard,
+ Used: used,
+ })
+}
+
+// ListQuotas returns quotas by query.
+func ListQuotas(query ...*models.QuotaQuery) ([]*Quota, error) {
+ condition, params := quotaQueryConditions(query...)
+
+ sql := fmt.Sprintf(`
+SELECT
+ a.id,
+ a.reference,
+ a.reference_id,
+ a.hard,
+ b.used,
+ b.creation_time,
+ b.update_time
+FROM
+ quota AS a
+ JOIN quota_usage AS b ON a.id = b.id %s`, condition)
+
+ orderBy := quotaOrderBy(query...)
+ if orderBy != "" {
+ sql += ` order by ` + orderBy
+ }
+
+ if len(query) > 0 && query[0] != nil {
+ page, size := query[0].Page, query[0].Size
+ if size > 0 {
+ sql += ` limit ?`
+ params = append(params, size)
+ if page > 0 {
+ sql += ` offset ?`
+ params = append(params, size*(page-1))
+ }
+ }
+ }
+
+ var quotas []*Quota
+ if _, err := GetOrmer().Raw(sql, params).QueryRows("as); err != nil {
+ return nil, err
+ }
+
+ for _, quota := range quotas {
+ d, ok := driver.Get(quota.Reference)
+ if !ok {
+ continue
+ }
+
+ ref, err := d.Load(quota.ReferenceID)
+ if err != nil {
+ log.Warning(fmt.Sprintf("Load quota reference object (%s, %s) failed: %v", quota.Reference, quota.ReferenceID, err))
+ continue
+ }
+
+ quota.Ref = ref
+ }
+
+ return quotas, nil
+}
+
+// GetTotalOfQuotas returns total of quotas
+func GetTotalOfQuotas(query ...*models.QuotaQuery) (int64, error) {
+ condition, params := quotaQueryConditions(query...)
+ sql := fmt.Sprintf("SELECT COUNT(1) FROM quota AS a JOIN quota_usage AS b ON a.id = b.id %s", condition)
+
+ var count int64
+ if err := GetOrmer().Raw(sql, params).QueryRow(&count); err != nil {
+ return 0, err
+ }
+
+ return count, nil
+}
+
+func quotaQueryConditions(query ...*models.QuotaQuery) (string, []interface{}) {
+ params := []interface{}{}
+ sql := ""
+ if len(query) == 0 || query[0] == nil {
+ return sql, params
+ }
+
+ sql += `WHERE 1=1 `
+
+ q := query[0]
+ if q.ID != 0 {
+ sql += `AND a.id = ? `
+ params = append(params, q.ID)
+ }
+ if q.Reference != "" {
+ sql += `AND a.reference = ? `
+ params = append(params, q.Reference)
+ }
+ if q.ReferenceID != "" {
+ sql += `AND a.reference_id = ? `
+ params = append(params, q.ReferenceID)
+ }
+
+ if len(q.ReferenceIDs) != 0 {
+ sql += fmt.Sprintf(`AND a.reference_id IN (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs)))
+ params = append(params, q.ReferenceIDs)
+ }
+
+ return sql, params
+}
+
+func castQuantity(field string) string {
+ // cast -1 to max int64 when order by field
+ return fmt.Sprintf("CAST( (CASE WHEN (%[1]s) IS NULL THEN '0' WHEN (%[1]s) = '-1' THEN '9223372036854775807' ELSE (%[1]s) END) AS BIGINT )", field)
+}
+
+func quotaOrderBy(query ...*models.QuotaQuery) string {
+ orderBy := "b.creation_time DESC"
+
+ if len(query) > 0 && query[0] != nil && query[0].Sort != "" {
+ if val, ok := quotaOrderMap[query[0].Sort]; ok {
+ orderBy = val
+ } else {
+ sort := query[0].Sort
+
+ order := "ASC"
+ if sort[0] == '-' {
+ order = "DESC"
+ sort = sort[1:]
+ }
+
+ prefix := []string{"hard.", "used."}
+ for _, p := range prefix {
+ if strings.HasPrefix(sort, p) {
+ field := fmt.Sprintf("%s->>'%s'", strings.TrimSuffix(p, "."), strings.TrimPrefix(sort, p))
+ orderBy = fmt.Sprintf("(%s) %s", castQuantity(field), order)
+ break
+ }
+ }
+ }
+ }
+
+ return orderBy
+}
diff --git a/src/common/dao/quota_test.go b/src/common/dao/quota_test.go
new file mode 100644
index 000000000..21daf10b9
--- /dev/null
+++ b/src/common/dao/quota_test.go
@@ -0,0 +1,143 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/suite"
+)
+
+var (
+ quotaReference = "dao"
+ quotaUserReference = "user"
+ quotaHard = models.QuotaHard{"storage": 1024}
+ quotaHardLarger = models.QuotaHard{"storage": 2048}
+)
+
+type QuotaDaoSuite struct {
+ suite.Suite
+}
+
+func (suite *QuotaDaoSuite) equalHard(quota1 *models.Quota, quota2 *models.Quota) {
+ hard1, err := quota1.GetHard()
+ suite.Nil(err, "hard1 invalid")
+
+ hard2, err := quota2.GetHard()
+ suite.Nil(err, "hard2 invalid")
+
+ suite.Equal(hard1, hard2)
+}
+
+func (suite *QuotaDaoSuite) TearDownTest() {
+ ClearTable("quota")
+ ClearTable("quota_usage")
+}
+
+func (suite *QuotaDaoSuite) TestAddQuota() {
+ _, err1 := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()})
+ suite.Nil(err1)
+
+ // Will failed for reference and reference_id should unique in db
+ _, err2 := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()})
+ suite.Error(err2)
+
+ _, err3 := AddQuota(models.Quota{Reference: quotaUserReference, ReferenceID: "1", Hard: quotaHard.String()})
+ suite.Nil(err3)
+}
+
+func (suite *QuotaDaoSuite) TestGetQuota() {
+ quota1 := models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()}
+ id, err := AddQuota(quota1)
+ suite.Nil(err)
+
+ // Get the new added quota
+ quota2, err := GetQuota(id)
+ suite.Nil(err)
+ suite.NotNil(quota2)
+
+ // Get the quota which id is 10000 not found
+ quota3, err := GetQuota(10000)
+ suite.Nil(err)
+ suite.Nil(quota3)
+}
+
+func (suite *QuotaDaoSuite) TestUpdateQuota() {
+ quota1 := models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()}
+ id, err := AddQuota(quota1)
+ suite.Nil(err)
+
+ // Get the new added quota
+ quota2, err := GetQuota(id)
+ suite.Nil(err)
+ suite.equalHard("a1, quota2)
+
+ // Update the quota
+ quota2.SetHard(quotaHardLarger)
+ time.Sleep(time.Millisecond * 10) // Ensure that UpdateTime changed
+ suite.Nil(UpdateQuota(*quota2))
+
+ // Get the updated quota
+ quota3, err := GetQuota(id)
+ suite.Nil(err)
+ suite.equalHard(quota2, quota3)
+ suite.NotEqual(quota2.UpdateTime, quota3.UpdateTime)
+}
+
+func (suite *QuotaDaoSuite) TestListQuotas() {
+ id1, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "1", Hard: quotaHard.String()})
+ AddQuotaUsage(models.QuotaUsage{ID: id1, Reference: quotaReference, ReferenceID: "1", Used: "{}"})
+
+ id2, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "2", Hard: quotaHard.String()})
+ AddQuotaUsage(models.QuotaUsage{ID: id2, Reference: quotaReference, ReferenceID: "2", Used: "{}"})
+
+ id3, _ := AddQuota(models.Quota{Reference: quotaUserReference, ReferenceID: "1", Hard: quotaHardLarger.String()})
+ AddQuotaUsage(models.QuotaUsage{ID: id3, Reference: quotaUserReference, ReferenceID: "1", Used: "{}"})
+
+ id4, _ := AddQuota(models.Quota{Reference: quotaReference, ReferenceID: "3", Hard: quotaHard.String()})
+ AddQuotaUsage(models.QuotaUsage{ID: id4, Reference: quotaReference, ReferenceID: "3", Used: "{}"})
+
+ // List all the quotas
+ quotas, err := ListQuotas()
+ suite.Nil(err)
+ suite.Equal(4, len(quotas))
+ suite.Equal(quotaReference, quotas[0].Reference)
+
+ // List quotas filter by reference
+ quotas, err = ListQuotas(&models.QuotaQuery{Reference: quotaReference})
+ suite.Nil(err)
+ suite.Equal(3, len(quotas))
+
+ // List quotas filter by reference ids
+ quotas, err = ListQuotas(&models.QuotaQuery{Reference: quotaReference, ReferenceIDs: []string{"1", "2"}})
+ suite.Nil(err)
+ suite.Equal(2, len(quotas))
+
+ // List quotas by pagination
+ quotas, err = ListQuotas(&models.QuotaQuery{Pagination: models.Pagination{Size: 2}})
+ suite.Nil(err)
+ suite.Equal(2, len(quotas))
+
+ // List quotas by sorting
+ quotas, err = ListQuotas(&models.QuotaQuery{Sorting: models.Sorting{Sort: "-hard.storage"}})
+ suite.Nil(err)
+ suite.Equal(quotaUserReference, quotas[0].Reference)
+}
+
+func TestRunQuotaDaoSuite(t *testing.T) {
+ suite.Run(t, new(QuotaDaoSuite))
+}
diff --git a/src/common/dao/quota_usage.go b/src/common/dao/quota_usage.go
new file mode 100644
index 000000000..d8b55db9b
--- /dev/null
+++ b/src/common/dao/quota_usage.go
@@ -0,0 +1,144 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+var (
+ quotaUsageOrderMap = map[string]string{
+ "id": "id asc",
+ "+id": "id asc",
+ "-id": "id desc",
+ "creation_time": "creation_time asc",
+ "+creation_time": "creation_time asc",
+ "-creation_time": "creation_time desc",
+ "update_time": "update_time asc",
+ "+update_time": "update_time asc",
+ "-update_time": "update_time desc",
+ }
+)
+
+// AddQuotaUsage add quota usage to the database.
+func AddQuotaUsage(quotaUsage models.QuotaUsage) (int64, error) {
+ now := time.Now()
+ quotaUsage.CreationTime = now
+ quotaUsage.UpdateTime = now
+ return GetOrmer().Insert("aUsage)
+}
+
+// GetQuotaUsage returns quota usage by id.
+func GetQuotaUsage(id int64) (*models.QuotaUsage, error) {
+ q := models.QuotaUsage{ID: id}
+ err := GetOrmer().Read(&q, "ID")
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return &q, err
+}
+
+// UpdateQuotaUsage update the quota usage.
+func UpdateQuotaUsage(quotaUsage models.QuotaUsage) error {
+ quotaUsage.UpdateTime = time.Now()
+ _, err := GetOrmer().Update("aUsage)
+ return err
+}
+
+// ListQuotaUsages returns quota usages by query.
+func ListQuotaUsages(query ...*models.QuotaUsageQuery) ([]*models.QuotaUsage, error) {
+ condition, params := quotaUsageQueryConditions(query...)
+ sql := fmt.Sprintf(`select * %s`, condition)
+
+ orderBy := quotaUsageOrderBy(query...)
+ if orderBy != "" {
+ sql += ` order by ` + orderBy
+ }
+
+ if len(query) > 0 && query[0] != nil {
+ page, size := query[0].Page, query[0].Size
+ if size > 0 {
+ sql += ` limit ?`
+ params = append(params, size)
+ if page > 0 {
+ sql += ` offset ?`
+ params = append(params, size*(page-1))
+ }
+ }
+ }
+
+ var quotaUsages []*models.QuotaUsage
+ if _, err := GetOrmer().Raw(sql, params).QueryRows("aUsages); err != nil {
+ return nil, err
+ }
+
+ return quotaUsages, nil
+}
+
+func quotaUsageQueryConditions(query ...*models.QuotaUsageQuery) (string, []interface{}) {
+ params := []interface{}{}
+ sql := `from quota_usage `
+ if len(query) == 0 || query[0] == nil {
+ return sql, params
+ }
+
+ sql += `where 1=1 `
+
+ q := query[0]
+ if q.Reference != "" {
+ sql += `and reference = ? `
+ params = append(params, q.Reference)
+ }
+ if q.ReferenceID != "" {
+ sql += `and reference_id = ? `
+ params = append(params, q.ReferenceID)
+ }
+ if len(q.ReferenceIDs) != 0 {
+ sql += fmt.Sprintf(`and reference_id in (%s) `, ParamPlaceholderForIn(len(q.ReferenceIDs)))
+ params = append(params, q.ReferenceIDs)
+ }
+
+ return sql, params
+}
+
+func quotaUsageOrderBy(query ...*models.QuotaUsageQuery) string {
+ orderBy := ""
+
+ if len(query) > 0 && query[0] != nil && query[0].Sort != "" {
+ if val, ok := quotaUsageOrderMap[query[0].Sort]; ok {
+ orderBy = val
+ } else {
+ sort := query[0].Sort
+
+ order := "asc"
+ if sort[0] == '-' {
+ order = "desc"
+ sort = sort[1:]
+ }
+
+ prefix := "used."
+ if strings.HasPrefix(sort, prefix) {
+ orderBy = fmt.Sprintf("used->>'%s' %s", strings.TrimPrefix(sort, prefix), order)
+ }
+ }
+ }
+
+ return orderBy
+}
diff --git a/src/common/dao/quota_usage_test.go b/src/common/dao/quota_usage_test.go
new file mode 100644
index 000000000..40ff14124
--- /dev/null
+++ b/src/common/dao/quota_usage_test.go
@@ -0,0 +1,135 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/suite"
+)
+
+var (
+ quotaUsageReference = "project"
+ quotaUsageUserReference = "user"
+ quotaUsageUsed = models.QuotaUsed{"storage": 1024}
+ quotaUsageUsedLarger = models.QuotaUsed{"storage": 2048}
+)
+
+type QuotaUsageDaoSuite struct {
+ suite.Suite
+}
+
+func (suite *QuotaUsageDaoSuite) equalUsed(usage1 *models.QuotaUsage, usage2 *models.QuotaUsage) {
+ used1, err := usage1.GetUsed()
+ suite.Nil(err, "used1 invalid")
+
+ used2, err := usage2.GetUsed()
+ suite.Nil(err, "used2 invalid")
+
+ suite.Equal(used1, used2)
+}
+
+func (suite *QuotaUsageDaoSuite) TearDownTest() {
+ ClearTable("quota_usage")
+}
+
+func (suite *QuotaUsageDaoSuite) TestAddQuotaUsage() {
+ _, err1 := AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()})
+ suite.Nil(err1)
+
+ // Will failed for reference and reference_id should unique in db
+ _, err2 := AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()})
+ suite.Error(err2)
+
+ _, err3 := AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageUserReference, ReferenceID: "1", Used: quotaUsageUsed.String()})
+ suite.Nil(err3)
+}
+
+func (suite *QuotaUsageDaoSuite) TestGetQuotaUsage() {
+ quotaUsage1 := models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()}
+ id, err := AddQuotaUsage(quotaUsage1)
+ suite.Nil(err)
+
+ // Get the new added quotaUsage
+ quotaUsage2, err := GetQuotaUsage(id)
+ suite.Nil(err)
+ suite.NotNil(quotaUsage2)
+
+ // Get the quotaUsage which id is 10000 not found
+ quotaUsage3, err := GetQuotaUsage(10000)
+ suite.Nil(err)
+ suite.Nil(quotaUsage3)
+}
+
+func (suite *QuotaUsageDaoSuite) TestUpdateQuotaUsage() {
+ quotaUsage1 := models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()}
+ id, err := AddQuotaUsage(quotaUsage1)
+ suite.Nil(err)
+
+ // Get the new added quotaUsage
+ quotaUsage2, err := GetQuotaUsage(id)
+ suite.Nil(err)
+ suite.equalUsed("aUsage1, quotaUsage2)
+
+ // Update the quotaUsage
+ quotaUsage2.SetUsed(quotaUsageUsedLarger)
+ time.Sleep(time.Millisecond * 10) // Ensure that UpdateTime changed
+ suite.Nil(UpdateQuotaUsage(*quotaUsage2))
+
+ // Get the updated quotaUsage
+ quotaUsage3, err := GetQuotaUsage(id)
+ suite.Nil(err)
+ suite.equalUsed(quotaUsage2, quotaUsage3)
+ suite.NotEqual(quotaUsage2.UpdateTime, quotaUsage3.UpdateTime)
+}
+
+func (suite *QuotaUsageDaoSuite) TestListQuotaUsages() {
+ AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "1", Used: quotaUsageUsed.String()})
+ AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "2", Used: quotaUsageUsed.String()})
+ AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageReference, ReferenceID: "3", Used: quotaUsageUsed.String()})
+ AddQuotaUsage(models.QuotaUsage{Reference: quotaUsageUserReference, ReferenceID: "1", Used: quotaUsageUsedLarger.String()})
+
+ // List all the quotaUsages
+ quotaUsages, err := ListQuotaUsages()
+ suite.Nil(err)
+ suite.Equal(4, len(quotaUsages))
+ suite.Equal(quotaUsageReference, quotaUsages[0].Reference)
+
+ // List quotaUsages filter by reference
+ quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Reference: quotaUsageReference})
+ suite.Nil(err)
+ suite.Equal(3, len(quotaUsages))
+
+ // List quotaUsages filter by reference ids
+ quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Reference: quotaUsageReference, ReferenceIDs: []string{"1", "2"}})
+ suite.Nil(err)
+ suite.Equal(2, len(quotaUsages))
+
+ // List quotaUsages by pagination
+ quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Pagination: models.Pagination{Size: 2}})
+ suite.Nil(err)
+ suite.Equal(2, len(quotaUsages))
+
+ // List quotaUsages by sorting
+ quotaUsages, err = ListQuotaUsages(&models.QuotaUsageQuery{Sorting: models.Sorting{Sort: "-used.storage"}})
+ suite.Nil(err)
+ suite.Equal(quotaUsageUserReference, quotaUsages[0].Reference)
+}
+
+func TestRunQuotaUsageDaoSuite(t *testing.T) {
+ suite.Run(t, new(QuotaUsageDaoSuite))
+}
diff --git a/src/common/dao/repository.go b/src/common/dao/repository.go
index c05a46899..abb859525 100644
--- a/src/common/dao/repository.go
+++ b/src/common/dao/repository.go
@@ -178,7 +178,7 @@ func repositoryQueryConditions(query ...*models.RepositoryQuery) (string, []inte
if len(q.ProjectIDs) > 0 {
sql += fmt.Sprintf(`and r.project_id in ( %s ) `,
- paramPlaceholder(len(q.ProjectIDs)))
+ ParamPlaceholderForIn(len(q.ProjectIDs)))
params = append(params, q.ProjectIDs)
}
diff --git a/src/common/dao/scan_job.go b/src/common/dao/scan_job.go
index 6aa151bc7..fe4aa6ab9 100644
--- a/src/common/dao/scan_job.go
+++ b/src/common/dao/scan_job.go
@@ -15,12 +15,11 @@
package dao
import (
+ "encoding/json"
+ "fmt"
"github.com/astaxie/beego/orm"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
-
- "encoding/json"
- "fmt"
"time"
)
diff --git a/src/common/dao/testutils.go b/src/common/dao/testutils.go
index 95d6c3ab7..910d5af72 100644
--- a/src/common/dao/testutils.go
+++ b/src/common/dao/testutils.go
@@ -120,6 +120,19 @@ func PrepareTestData(clearSqls []string, initSqls []string) {
}
}
+// ExecuteBatchSQL ...
+func ExecuteBatchSQL(sqls []string) {
+ o := GetOrmer()
+
+ for _, sql := range sqls {
+ fmt.Printf("Exec sql:%v\n", sql)
+ _, err := o.Raw(sql).Exec()
+ if err != nil {
+ fmt.Printf("failed to execute batch sql, sql:%v, error: %v", sql, err)
+ }
+ }
+}
+
// ArrayEqual ...
func ArrayEqual(arrayA, arrayB []int) bool {
if len(arrayA) != len(arrayB) {
diff --git a/src/common/dao/user.go b/src/common/dao/user.go
index 9349c3477..04e79d066 100644
--- a/src/common/dao/user.go
+++ b/src/common/dao/user.go
@@ -234,6 +234,14 @@ func OnBoardUser(u *models.User) error {
}
if created {
u.UserID = int(id)
+ // current orm framework doesn't support to fetch a pointer or sql.NullString with QueryRow
+ // https://github.com/astaxie/beego/issues/3767
+ if len(u.Email) == 0 {
+ _, err = o.Raw("update harbor_user set email = null where user_id = ? ", id).Exec()
+ if err != nil {
+ return err
+ }
+ }
} else {
existing, err := GetUser(*u)
if err != nil {
diff --git a/src/common/dao/user_test.go b/src/common/dao/user_test.go
index ff48b27ec..2b3029c17 100644
--- a/src/common/dao/user_test.go
+++ b/src/common/dao/user_test.go
@@ -90,3 +90,23 @@ func TestOnBoardUser(t *testing.T) {
assert.True(u.UserID == id)
CleanUser(int64(id))
}
+func TestOnBoardUser_EmptyEmail(t *testing.T) {
+ assert := assert.New(t)
+ u := &models.User{
+ Username: "empty_email",
+ Password: "password1",
+ Realname: "empty_email",
+ }
+ err := OnBoardUser(u)
+ assert.Nil(err)
+ id := u.UserID
+ assert.True(id > 0)
+ err = OnBoardUser(u)
+ assert.Nil(err)
+ assert.True(u.UserID == id)
+ assert.Equal("", u.Email)
+
+ user, err := GetUser(models.User{Username: "empty_email"})
+ assert.Equal("", user.Email)
+ CleanUser(int64(id))
+}
diff --git a/src/common/dao/utils.go b/src/common/dao/utils.go
new file mode 100644
index 000000000..489f43e45
--- /dev/null
+++ b/src/common/dao/utils.go
@@ -0,0 +1,11 @@
+package dao
+
+import (
+ "fmt"
+ "strings"
+)
+
+// JoinNumberConditions - To join number condition into string,used in sql query
+func JoinNumberConditions(ids []int) string {
+ return strings.Trim(strings.Replace(fmt.Sprint(ids), " ", ",", -1), "[]")
+}
diff --git a/src/common/dao/utils_test.go b/src/common/dao/utils_test.go
new file mode 100644
index 000000000..78f2f4a3a
--- /dev/null
+++ b/src/common/dao/utils_test.go
@@ -0,0 +1,24 @@
+package dao
+
+import "testing"
+
+func TestJoinNumberConditions(t *testing.T) {
+ type args struct {
+ ids []int
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {name: "normal test", args: args{[]int{1, 2, 3}}, want: "1,2,3"},
+ {name: "dummy test", args: args{[]int{}}, want: ""},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := JoinNumberConditions(tt.args.ids); got != tt.want {
+ t.Errorf("JoinNumberConditions() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/common/http/client.go b/src/common/http/client.go
index 533212dc0..7699e33f2 100644
--- a/src/common/http/client.go
+++ b/src/common/http/client.go
@@ -16,6 +16,7 @@ package http
import (
"bytes"
+ "crypto/tls"
"encoding/json"
"errors"
"io"
@@ -35,6 +36,36 @@ type Client struct {
client *http.Client
}
+var defaultHTTPTransport, secureHTTPTransport, insecureHTTPTransport *http.Transport
+
+func init() {
+ defaultHTTPTransport = &http.Transport{}
+
+ secureHTTPTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: false,
+ },
+ }
+ insecureHTTPTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ TLSClientConfig: &tls.Config{
+ InsecureSkipVerify: true,
+ },
+ }
+}
+
+// GetHTTPTransport returns HttpTransport based on insecure configuration
+func GetHTTPTransport(insecure ...bool) *http.Transport {
+ if len(insecure) == 0 {
+ return defaultHTTPTransport
+ }
+ if insecure[0] {
+ return insecureHTTPTransport
+ }
+ return secureHTTPTransport
+}
+
// NewClient creates an instance of Client.
// Use net/http.Client as the default value if c is nil.
// Modifiers modify the request before sending it.
diff --git a/src/common/http/client_test.go b/src/common/http/client_test.go
new file mode 100644
index 000000000..09f576c97
--- /dev/null
+++ b/src/common/http/client_test.go
@@ -0,0 +1,14 @@
+package http
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGetHTTPTransport(t *testing.T) {
+ transport := GetHTTPTransport(true)
+ assert.True(t, transport.TLSClientConfig.InsecureSkipVerify)
+ transport = GetHTTPTransport(false)
+ assert.False(t, transport.TLSClientConfig.InsecureSkipVerify)
+}
diff --git a/src/common/job/client.go b/src/common/job/client.go
index 51ce18301..01f3c18e2 100644
--- a/src/common/job/client.go
+++ b/src/common/job/client.go
@@ -11,9 +11,16 @@ import (
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/http/modifier/auth"
"github.com/goharbor/harbor/src/common/job/models"
+ "github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/jobservice/job"
)
+var (
+ // GlobalClient is an instance of the default client that can be used globally
+ // Notes: the client needs to be initialized before can be used
+ GlobalClient Client
+)
+
// Client wraps interface to access jobservice.
type Client interface {
SubmitJob(*models.JobData) (string, error)
@@ -29,6 +36,11 @@ type DefaultClient struct {
client *commonhttp.Client
}
+// Init the GlobalClient
+func Init() {
+ GlobalClient = NewDefaultClient(config.InternalJobServiceURL(), config.CoreSecret())
+}
+
// NewDefaultClient creates a default client based on endpoint and secret.
func NewDefaultClient(endpoint, secret string) *DefaultClient {
var c *commonhttp.Client
diff --git a/src/common/models/artifact.go b/src/common/models/artifact.go
new file mode 100644
index 000000000..fa6760702
--- /dev/null
+++ b/src/common/models/artifact.go
@@ -0,0 +1,32 @@
+package models
+
+import (
+ "time"
+)
+
+// Artifact holds the details of a artifact.
+type Artifact struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ PID int64 `orm:"column(project_id)" json:"project_id"`
+ Repo string `orm:"column(repo)" json:"repo"`
+ Tag string `orm:"column(tag)" json:"tag"`
+ Digest string `orm:"column(digest)" json:"digest"`
+ Kind string `orm:"column(kind)" json:"kind"`
+ PushTime time.Time `orm:"column(push_time)" json:"push_time"`
+ PullTime time.Time `orm:"column(pull_time)" json:"pull_time"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+}
+
+// TableName ...
+func (af *Artifact) TableName() string {
+ return "artifact"
+}
+
+// ArtifactQuery ...
+type ArtifactQuery struct {
+ PID int64
+ Repo string
+ Tag string
+ Digest string
+ Pagination
+}
diff --git a/src/common/models/artifact_blob.go b/src/common/models/artifact_blob.go
new file mode 100644
index 000000000..a402306ee
--- /dev/null
+++ b/src/common/models/artifact_blob.go
@@ -0,0 +1,18 @@
+package models
+
+import (
+ "time"
+)
+
+// ArtifactAndBlob holds the relationship between manifest and blob.
+type ArtifactAndBlob struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ DigestAF string `orm:"column(digest_af)" json:"digest_af"`
+ DigestBlob string `orm:"column(digest_blob)" json:"digest_blob"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+}
+
+// TableName ...
+func (afb *ArtifactAndBlob) TableName() string {
+ return "artifact_blob"
+}
diff --git a/src/common/models/base.go b/src/common/models/base.go
index be8877cb8..de04d0285 100644
--- a/src/common/models/base.go
+++ b/src/common/models/base.go
@@ -36,5 +36,15 @@ func init() {
new(AdminJob),
new(JobLog),
new(Robot),
- new(OIDCUser))
+ new(OIDCUser),
+ new(NotificationPolicy),
+ new(NotificationJob),
+ new(Blob),
+ new(ProjectBlob),
+ new(Artifact),
+ new(ArtifactAndBlob),
+ new(CVEWhitelist),
+ new(Quota),
+ new(QuotaUsage),
+ )
}
diff --git a/src/common/models/blob.go b/src/common/models/blob.go
new file mode 100644
index 000000000..71a3c9b67
--- /dev/null
+++ b/src/common/models/blob.go
@@ -0,0 +1,19 @@
+package models
+
+import (
+ "time"
+)
+
+// Blob holds the details of a blob.
+type Blob struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ Digest string `orm:"column(digest)" json:"digest"`
+ ContentType string `orm:"column(content_type)" json:"content_type"`
+ Size int64 `orm:"column(size)" json:"size"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+}
+
+// TableName ...
+func (b *Blob) TableName() string {
+ return "blob"
+}
diff --git a/src/common/models/config.go b/src/common/models/config.go
index cbcb3f810..dfd13d4bb 100644
--- a/src/common/models/config.go
+++ b/src/common/models/config.go
@@ -45,12 +45,14 @@ type SQLite struct {
// PostGreSQL ...
type PostGreSQL struct {
- Host string `json:"host"`
- Port int `json:"port"`
- Username string `json:"username"`
- Password string `json:"password,omitempty"`
- Database string `json:"database"`
- SSLMode string `json:"sslmode"`
+ Host string `json:"host"`
+ Port int `json:"port"`
+ Username string `json:"username"`
+ Password string `json:"password,omitempty"`
+ Database string `json:"database"`
+ SSLMode string `json:"sslmode"`
+ MaxIdleConns int `json:"max_idle_conns"`
+ MaxOpenConns int `json:"max_open_conns"`
}
// Email ...
@@ -70,7 +72,7 @@ type HTTPAuthProxy struct {
Endpoint string `json:"endpoint"`
TokenReviewEndpoint string `json:"tokenreivew_endpoint"`
VerifyCert bool `json:"verify_cert"`
- AlwaysOnBoard bool `json:"always_onboard"`
+ SkipSearch bool `json:"skip_search"`
}
// OIDCSetting wraps the settings for OIDC auth endpoint
@@ -84,6 +86,12 @@ type OIDCSetting struct {
Scope []string `json:"scope"`
}
+// QuotaSetting wraps the settings for Quota
+type QuotaSetting struct {
+ CountPerProject int64 `json:"count_per_project"`
+ StoragePerProject int64 `json:"storage_per_project"`
+}
+
// ConfigEntry ...
type ConfigEntry struct {
ID int64 `orm:"pk;auto;column(id)" json:"-"`
diff --git a/src/common/models/cve_whitelist.go b/src/common/models/cve_whitelist.go
new file mode 100644
index 000000000..90badb372
--- /dev/null
+++ b/src/common/models/cve_whitelist.go
@@ -0,0 +1,55 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package models
+
+import "time"
+
+// CVEWhitelist defines the data model for a CVE whitelist
+type CVEWhitelist struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ ProjectID int64 `orm:"column(project_id)" json:"project_id"`
+ ExpiresAt *int64 `orm:"column(expires_at)" json:"expires_at,omitempty"`
+ Items []CVEWhitelistItem `orm:"-" json:"items"`
+ ItemsText string `orm:"column(items)" json:"-"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
+}
+
+// CVEWhitelistItem defines one item in the CVE whitelist
+type CVEWhitelistItem struct {
+ CVEID string `json:"cve_id"`
+}
+
+// TableName ...
+func (c *CVEWhitelist) TableName() string {
+ return "cve_whitelist"
+}
+
+// CVESet returns the set of CVE id of the items in the whitelist to help filter the vulnerability list
+func (c *CVEWhitelist) CVESet() map[string]struct{} {
+ r := map[string]struct{}{}
+ for _, it := range c.Items {
+ r[it.CVEID] = struct{}{}
+ }
+ return r
+}
+
+// IsExpired returns whether the whitelist is expired
+func (c *CVEWhitelist) IsExpired() bool {
+ if c.ExpiresAt == nil {
+ return false
+ }
+ return time.Now().Unix() >= *c.ExpiresAt
+}
diff --git a/src/common/models/cve_whitelist_test.go b/src/common/models/cve_whitelist_test.go
new file mode 100644
index 000000000..cb47e7021
--- /dev/null
+++ b/src/common/models/cve_whitelist_test.go
@@ -0,0 +1,72 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package models
+
+import (
+ "github.com/stretchr/testify/assert"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestCVEWhitelist_All(t *testing.T) {
+ future := int64(4411494000)
+ now := time.Now().Unix()
+ cases := []struct {
+ input CVEWhitelist
+ cveset map[string]struct{}
+ expired bool
+ }{
+ {
+ input: CVEWhitelist{
+ ID: 1,
+ ProjectID: 0,
+ Items: []CVEWhitelistItem{},
+ },
+ cveset: map[string]struct{}{},
+ expired: false,
+ },
+ {
+ input: CVEWhitelist{
+ ID: 1,
+ ProjectID: 0,
+ Items: []CVEWhitelistItem{},
+ ExpiresAt: &now,
+ },
+ cveset: map[string]struct{}{},
+ expired: true,
+ },
+ {
+ input: CVEWhitelist{
+ ID: 2,
+ ProjectID: 3,
+ Items: []CVEWhitelistItem{
+ {CVEID: "CVE-1999-0067"},
+ {CVEID: "CVE-2016-7654321"},
+ },
+ ExpiresAt: &future,
+ },
+ cveset: map[string]struct{}{
+ "CVE-1999-0067": {},
+ "CVE-2016-7654321": {},
+ },
+ expired: false,
+ },
+ }
+ for _, c := range cases {
+ assert.Equal(t, c.expired, c.input.IsExpired())
+ assert.True(t, reflect.DeepEqual(c.cveset, c.input.CVESet()))
+ }
+}
diff --git a/src/common/models/hook_notification.go b/src/common/models/hook_notification.go
new file mode 100755
index 000000000..60c667afd
--- /dev/null
+++ b/src/common/models/hook_notification.go
@@ -0,0 +1,111 @@
+package models
+
+import (
+ "encoding/json"
+ "time"
+)
+
+const (
+ // NotificationPolicyTable is table name for notification policies
+ NotificationPolicyTable = "notification_policy"
+ // NotificationJobTable is table name for notification job
+ NotificationJobTable = "notification_job"
+)
+
+// NotificationPolicy is the model for a notification policy.
+type NotificationPolicy struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ Name string `orm:"column(name)" json:"name"`
+ Description string `orm:"column(description)" json:"description"`
+ ProjectID int64 `orm:"column(project_id)" json:"project_id"`
+ TargetsDB string `orm:"column(targets)" json:"-"`
+ Targets []EventTarget `orm:"-" json:"targets"`
+ EventTypesDB string `orm:"column(event_types)" json:"-"`
+ EventTypes []string `orm:"-" json:"event_types"`
+ Creator string `orm:"column(creator)" json:"creator"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now_add" json:"update_time"`
+ Enabled bool `orm:"column(enabled)" json:"enabled"`
+}
+
+// TableName set table name for ORM.
+func (w *NotificationPolicy) TableName() string {
+ return NotificationPolicyTable
+}
+
+// ConvertToDBModel convert struct data in notification policy to DB model data
+func (w *NotificationPolicy) ConvertToDBModel() error {
+ if len(w.Targets) != 0 {
+ targets, err := json.Marshal(w.Targets)
+ if err != nil {
+ return err
+ }
+ w.TargetsDB = string(targets)
+ }
+ if len(w.EventTypes) != 0 {
+ eventTypes, err := json.Marshal(w.EventTypes)
+ if err != nil {
+ return err
+ }
+ w.EventTypesDB = string(eventTypes)
+ }
+
+ return nil
+}
+
+// ConvertFromDBModel convert from DB model data to struct data
+func (w *NotificationPolicy) ConvertFromDBModel() error {
+ targets := []EventTarget{}
+ if len(w.TargetsDB) != 0 {
+ err := json.Unmarshal([]byte(w.TargetsDB), &targets)
+ if err != nil {
+ return err
+ }
+ }
+ w.Targets = targets
+
+ types := []string{}
+ if len(w.EventTypesDB) != 0 {
+ err := json.Unmarshal([]byte(w.EventTypesDB), &types)
+ if err != nil {
+ return err
+ }
+ }
+ w.EventTypes = types
+
+ return nil
+}
+
+// NotificationJob is the model for a notification job
+type NotificationJob struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ PolicyID int64 `orm:"column(policy_id)" json:"policy_id"`
+ EventType string `orm:"column(event_type)" json:"event_type"`
+ NotifyType string `orm:"column(notify_type)" json:"notify_type"`
+ Status string `orm:"column(status)" json:"status"`
+ JobDetail string `orm:"column(job_detail)" json:"job_detail"`
+ UUID string `orm:"column(job_uuid)" json:"-"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
+}
+
+// TableName set table name for ORM.
+func (w *NotificationJob) TableName() string {
+ return NotificationJobTable
+}
+
+// NotificationJobQuery holds query conditions for notification job
+type NotificationJobQuery struct {
+ PolicyID int64
+ Statuses []string
+ EventTypes []string
+ Pagination
+}
+
+// EventTarget defines the structure of target a notification send to
+type EventTarget struct {
+ Type string `json:"type"`
+ Address string `json:"address"`
+ AuthHeader string `json:"auth_header,omitempty"`
+ SkipCertVerify bool `json:"skip_cert_verify"`
+}
diff --git a/src/common/models/hook_notification_test.go b/src/common/models/hook_notification_test.go
new file mode 100644
index 000000000..31c18c8b6
--- /dev/null
+++ b/src/common/models/hook_notification_test.go
@@ -0,0 +1,114 @@
+package models
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNotificationPolicy_ConvertFromDBModel(t *testing.T) {
+ tests := []struct {
+ name string
+ policy *NotificationPolicy
+ want *NotificationPolicy
+ wantErr bool
+ }{
+ {
+ name: "ConvertFromDBModel want error 1",
+ policy: &NotificationPolicy{
+ TargetsDB: "[{{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\"}]",
+ },
+ wantErr: true,
+ },
+ {
+ name: "ConvertFromDBModel want error 2",
+ policy: &NotificationPolicy{
+ EventTypesDB: "[{\"pushImage\",\"pullImage\",\"deleteImage\"]",
+ },
+ wantErr: true,
+ },
+ {
+ name: "ConvertFromDBModel 1",
+ policy: &NotificationPolicy{
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://10.173.32.58:9009\"}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\"]",
+ },
+ want: &NotificationPolicy{
+ Targets: []EventTarget{
+ {
+ Type: "http",
+ Address: "http://10.173.32.58:9009",
+ },
+ },
+ EventTypes: []string{"pushImage", "pullImage", "deleteImage"},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.policy.ConvertFromDBModel()
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.want.Targets, tt.policy.Targets)
+ assert.Equal(t, tt.want.EventTypes, tt.policy.EventTypes)
+ })
+ }
+}
+
+func TestNotificationPolicy_ConvertToDBModel(t *testing.T) {
+ tests := []struct {
+ name string
+ policy *NotificationPolicy
+ want *NotificationPolicy
+ wantErr bool
+ }{
+ {
+ name: "ConvertToDBModel 1",
+ policy: &NotificationPolicy{
+ Targets: []EventTarget{
+ {
+ Type: "http",
+ Address: "http://127.0.0.1",
+ SkipCertVerify: false,
+ },
+ },
+ EventTypes: []string{"pushImage", "pullImage", "deleteImage"},
+ },
+ want: &NotificationPolicy{
+ TargetsDB: "[{\"type\":\"http\",\"address\":\"http://127.0.0.1\",\"skip_cert_verify\":false}]",
+ EventTypesDB: "[\"pushImage\",\"pullImage\",\"deleteImage\"]",
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.policy.ConvertToDBModel()
+ if tt.wantErr {
+ require.NotNil(t, err, "wantErr: %s", err)
+ return
+ }
+ require.Nil(t, err)
+ assert.Equal(t, tt.want.TargetsDB, tt.policy.TargetsDB)
+ assert.Equal(t, tt.want.EventTypesDB, tt.policy.EventTypesDB)
+ })
+ }
+}
+
+func TestNotificationJob_TableName(t *testing.T) {
+ job := &NotificationJob{}
+ got := job.TableName()
+ assert.Equal(t, NotificationJobTable, got)
+}
+
+func TestNotificationPolicy_TableName(t *testing.T) {
+ policy := &NotificationPolicy{}
+ got := policy.TableName()
+ assert.Equal(t, NotificationPolicyTable, got)
+
+}
diff --git a/src/common/models/pro_meta.go b/src/common/models/pro_meta.go
index 97427ac6a..d9952714c 100644
--- a/src/common/models/pro_meta.go
+++ b/src/common/models/pro_meta.go
@@ -20,16 +20,17 @@ import (
// keys of project metadata and severity values
const (
- ProMetaPublic = "public"
- ProMetaEnableContentTrust = "enable_content_trust"
- ProMetaPreventVul = "prevent_vul" // prevent vulnerable images from being pulled
- ProMetaSeverity = "severity"
- ProMetaAutoScan = "auto_scan"
- SeverityNone = "negligible"
- SeverityLow = "low"
- SeverityMedium = "medium"
- SeverityHigh = "high"
- SeverityCritical = "critical"
+ ProMetaPublic = "public"
+ ProMetaEnableContentTrust = "enable_content_trust"
+ ProMetaPreventVul = "prevent_vul" // prevent vulnerable images from being pulled
+ ProMetaSeverity = "severity"
+ ProMetaAutoScan = "auto_scan"
+ ProMetaReuseSysCVEWhitelist = "reuse_sys_cve_whitelist"
+ SeverityNone = "negligible"
+ SeverityLow = "low"
+ SeverityMedium = "medium"
+ SeverityHigh = "high"
+ SeverityCritical = "critical"
)
// ProjectMetadata holds the metadata of a project.
diff --git a/src/common/models/project.go b/src/common/models/project.go
index bebadcdd1..1b56284a3 100644
--- a/src/common/models/project.go
+++ b/src/common/models/project.go
@@ -17,10 +17,18 @@ package models
import (
"strings"
"time"
+
+ "github.com/goharbor/harbor/src/pkg/types"
)
-// ProjectTable is the table name for project
-const ProjectTable = "project"
+const (
+ // ProjectTable is the table name for project
+ ProjectTable = "project"
+ // ProjectPublic means project is public
+ ProjectPublic = "public"
+ // ProjectPrivate means project is private
+ ProjectPrivate = "private"
+)
// Project holds the details of a project.
type Project struct {
@@ -36,6 +44,7 @@ type Project struct {
RepoCount int64 `orm:"-" json:"repo_count"`
ChartCount uint64 `orm:"-" json:"chart_count"`
Metadata map[string]string `orm:"-" json:"metadata"`
+ CVEWhitelist CVEWhitelist `orm:"-" json:"cve_whitelist"`
}
// GetMetadata ...
@@ -83,6 +92,15 @@ func (p *Project) VulPrevented() bool {
return isTrue(prevent)
}
+// ReuseSysCVEWhitelist ...
+func (p *Project) ReuseSysCVEWhitelist() bool {
+ r, ok := p.GetMetadata(ProMetaReuseSysCVEWhitelist)
+ if !ok {
+ return true
+ }
+ return isTrue(r)
+}
+
// Severity ...
func (p *Project) Severity() string {
severity, exist := p.GetMetadata(ProMetaSeverity)
@@ -128,9 +146,9 @@ type ProjectQueryParam struct {
// MemberQuery filter by member's username and role
type MemberQuery struct {
- Name string // the username of member
- Role int // the role of the member has to the project
- GroupList []*UserGroup // the group list of current user
+ Name string // the username of member
+ Role int // the role of the member has to the project
+ GroupIDs []int // the group ID of current user belongs to
}
// Pagination ...
@@ -154,9 +172,13 @@ type BaseProjectCollection struct {
// ProjectRequest holds informations that need for creating project API
type ProjectRequest struct {
- Name string `json:"project_name"`
- Public *int `json:"public"` // deprecated, reserved for project creation in replication
- Metadata map[string]string `json:"metadata"`
+ Name string `json:"project_name"`
+ Public *int `json:"public"` // deprecated, reserved for project creation in replication
+ Metadata map[string]string `json:"metadata"`
+ CVEWhitelist CVEWhitelist `json:"cve_whitelist"`
+
+ CountLimit *int64 `json:"count_limit,omitempty"`
+ StorageLimit *int64 `json:"storage_limit,omitempty"`
}
// ProjectQueryResult ...
@@ -169,3 +191,19 @@ type ProjectQueryResult struct {
func (p *Project) TableName() string {
return ProjectTable
}
+
+// ProjectSummary ...
+type ProjectSummary struct {
+ RepoCount int64 `json:"repo_count"`
+ ChartCount uint64 `json:"chart_count"`
+
+ ProjectAdminCount int64 `json:"project_admin_count"`
+ MasterCount int64 `json:"master_count"`
+ DeveloperCount int64 `json:"developer_count"`
+ GuestCount int64 `json:"guest_count"`
+
+ Quota struct {
+ Hard types.ResourceList `json:"hard"`
+ Used types.ResourceList `json:"used"`
+ } `json:"quota"`
+}
diff --git a/src/common/models/project_blob.go b/src/common/models/project_blob.go
new file mode 100644
index 000000000..119dadbc0
--- /dev/null
+++ b/src/common/models/project_blob.go
@@ -0,0 +1,32 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package models
+
+import (
+ "time"
+)
+
+// ProjectBlob holds the relationship between manifest and blob.
+type ProjectBlob struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ ProjectID int64 `orm:"column(project_id)" json:"project_id"`
+ BlobID int64 `orm:"column(blob_id)" json:"blob_id"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+}
+
+// TableName ...
+func (*ProjectBlob) TableName() string {
+ return "project_blob"
+}
diff --git a/src/common/models/quota.go b/src/common/models/quota.go
new file mode 100644
index 000000000..e7d8ade6e
--- /dev/null
+++ b/src/common/models/quota.go
@@ -0,0 +1,85 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package models
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+// QuotaHard a map for the quota hard
+type QuotaHard map[string]int64
+
+func (h QuotaHard) String() string {
+ bytes, _ := json.Marshal(h)
+ return string(bytes)
+}
+
+// Copy returns copied quota hard
+func (h QuotaHard) Copy() QuotaHard {
+ hard := QuotaHard{}
+ for key, value := range h {
+ hard[key] = value
+ }
+
+ return hard
+}
+
+// Quota model for quota
+type Quota struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ Reference string `orm:"column(reference)" json:"reference"` // The reference type for quota, eg: project, user
+ ReferenceID string `orm:"column(reference_id)" json:"reference_id"`
+ Hard string `orm:"column(hard);type(jsonb)" json:"-"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
+}
+
+// TableName returns table name for orm
+func (q *Quota) TableName() string {
+ return "quota"
+}
+
+// GetHard returns quota hard
+func (q *Quota) GetHard() (QuotaHard, error) {
+ var hard QuotaHard
+ if err := json.Unmarshal([]byte(q.Hard), &hard); err != nil {
+ return nil, err
+ }
+
+ return hard, nil
+}
+
+// SetHard set new quota hard
+func (q *Quota) SetHard(hard QuotaHard) {
+ q.Hard = hard.String()
+}
+
+// QuotaQuery query parameters for quota
+type QuotaQuery struct {
+ ID int64
+ Reference string
+ ReferenceID string
+ ReferenceIDs []string
+ Pagination
+ Sorting
+}
+
+// QuotaUpdateRequest the request for quota update
+type QuotaUpdateRequest struct {
+ Hard types.ResourceList `json:"hard"`
+}
diff --git a/src/common/models/quota_usage.go b/src/common/models/quota_usage.go
new file mode 100644
index 000000000..c5c24eeb3
--- /dev/null
+++ b/src/common/models/quota_usage.go
@@ -0,0 +1,77 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package models
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// QuotaUsed a map for the quota used
+type QuotaUsed map[string]int64
+
+func (u QuotaUsed) String() string {
+ bytes, _ := json.Marshal(u)
+ return string(bytes)
+}
+
+// Copy returns copied quota used
+func (u QuotaUsed) Copy() QuotaUsed {
+ used := QuotaUsed{}
+ for key, value := range u {
+ used[key] = value
+ }
+
+ return used
+}
+
+// QuotaUsage model for quota usage
+type QuotaUsage struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ Reference string `orm:"column(reference)" json:"reference"` // The reference type for quota usage, eg: project, user
+ ReferenceID string `orm:"column(reference_id)" json:"reference_id"`
+ Used string `orm:"column(used);type(jsonb)" json:"-"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
+}
+
+// TableName returns table name for orm
+func (qu *QuotaUsage) TableName() string {
+ return "quota_usage"
+}
+
+// GetUsed returns quota used
+func (qu *QuotaUsage) GetUsed() (QuotaUsed, error) {
+ var used QuotaUsed
+ if err := json.Unmarshal([]byte(qu.Used), &used); err != nil {
+ return nil, err
+ }
+
+ return used, nil
+}
+
+// SetUsed set quota used
+func (qu *QuotaUsage) SetUsed(used QuotaUsed) {
+ qu.Used = used.String()
+}
+
+// QuotaUsageQuery query parameters for quota
+type QuotaUsageQuery struct {
+ Reference string
+ ReferenceID string
+ ReferenceIDs []string
+ Pagination
+ Sorting
+}
diff --git a/src/common/models/repo.go b/src/common/models/repo.go
index 92a51d375..9993fbcc6 100644
--- a/src/common/models/repo.go
+++ b/src/common/models/repo.go
@@ -16,6 +16,9 @@ package models
import (
"time"
+
+ "github.com/goharbor/harbor/src/common/utils/notary/model"
+ "github.com/theupdateframework/notary/tuf/data"
)
// RepoTable is the table name for repository
@@ -47,3 +50,38 @@ type RepositoryQuery struct {
Pagination
Sorting
}
+
+// TagResp holds the information of one image tag
+type TagResp struct {
+ TagDetail
+ Signature *model.Target `json:"signature"`
+ ScanOverview *ImgScanOverview `json:"scan_overview,omitempty"`
+ Labels []*Label `json:"labels"`
+ PushTime time.Time `json:"push_time"`
+ PullTime time.Time `json:"pull_time"`
+}
+
+// TagDetail ...
+type TagDetail struct {
+ Digest string `json:"digest"`
+ Name string `json:"name"`
+ Size int64 `json:"size"`
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version"`
+ DockerVersion string `json:"docker_version"`
+ Author string `json:"author"`
+ Created time.Time `json:"created"`
+ Config *TagCfg `json:"config"`
+}
+
+// TagCfg ...
+type TagCfg struct {
+ Labels map[string]string `json:"labels"`
+}
+
+// Signature ...
+type Signature struct {
+ Tag string `json:"tag"`
+ Hashes data.Hashes `json:"hashes"`
+}
diff --git a/src/common/models/robot.go b/src/common/models/robot.go
index b4bb119b2..2e64ca8d2 100644
--- a/src/common/models/robot.go
+++ b/src/common/models/robot.go
@@ -65,7 +65,6 @@ func (rq *RobotReq) Valid(v *validation.Validation) {
// RobotRep ...
type RobotRep struct {
- ID int64 `json:"id"`
Name string `json:"name"`
Token string `json:"token"`
}
diff --git a/src/common/models/scan_job.go b/src/common/models/scan_job.go
index 8a26fd741..75546223d 100644
--- a/src/common/models/scan_job.go
+++ b/src/common/models/scan_job.go
@@ -34,31 +34,6 @@ type ScanJob struct {
UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
}
-// Severity represents the severity of a image/component in terms of vulnerability.
-type Severity int64
-
-// Sevxxx is the list of severity of image after scanning.
-const (
- _ Severity = iota
- SevNone
- SevUnknown
- SevLow
- SevMedium
- SevHigh
-)
-
-// String is the output function for sererity variable
-func (sev Severity) String() string {
- name := []string{"negligible", "unknown", "low", "medium", "high"}
- i := int64(sev)
- switch {
- case i >= 1 && i <= int64(SevHigh):
- return name[i-1]
- default:
- return "unknown"
- }
-}
-
// TableName is required by by beego orm to map ScanJob to table img_scan_job
func (s *ScanJob) TableName() string {
return ScanJobTable
@@ -101,17 +76,6 @@ type ImageScanReq struct {
Tag string `json:"tag"`
}
-// VulnerabilityItem is an item in the vulnerability result returned by vulnerability details API.
-type VulnerabilityItem struct {
- ID string `json:"id"`
- Severity Severity `json:"severity"`
- Pkg string `json:"package"`
- Version string `json:"version"`
- Description string `json:"description"`
- Link string `json:"link"`
- Fixed string `json:"fixedVersion,omitempty"`
-}
-
// ScanAllPolicy is represent the json request and object for scan all policy, the parm is het
type ScanAllPolicy struct {
Type string `json:"type"`
diff --git a/src/common/models/sev.go b/src/common/models/sev.go
new file mode 100644
index 000000000..3ccf89753
--- /dev/null
+++ b/src/common/models/sev.go
@@ -0,0 +1,26 @@
+package models
+
+// Severity represents the severity of a image/component in terms of vulnerability.
+type Severity int64
+
+// Sevxxx is the list of severity of image after scanning.
+const (
+ _ Severity = iota
+ SevNone
+ SevUnknown
+ SevLow
+ SevMedium
+ SevHigh
+)
+
+// String is the output function for severity variable
+func (sev Severity) String() string {
+ name := []string{"negligible", "unknown", "low", "medium", "high"}
+ i := int64(sev)
+ switch {
+ case i >= 1 && i <= int64(SevHigh):
+ return name[i-1]
+ default:
+ return "unknown"
+ }
+}
diff --git a/src/common/models/token.go b/src/common/models/token.go
index f5bbd797b..ac50fba42 100644
--- a/src/common/models/token.go
+++ b/src/common/models/token.go
@@ -16,9 +16,19 @@ package models
// Token represents the json returned by registry token service
type Token struct {
- Token string `json:"token"`
- ExpiresIn int `json:"expires_in"`
- IssuedAt string `json:"issued_at"`
+ Token string `json:"token"`
+ AccessToken string `json:"access_token"` // the token returned by azure container registry is called "access_token"
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt string `json:"issued_at"`
+}
+
+// GetToken returns the content of the token
+func (t *Token) GetToken() string {
+ token := t.Token
+ if len(token) == 0 {
+ token = t.AccessToken
+ }
+ return token
}
// ResourceActions ...
diff --git a/src/common/models/user.go b/src/common/models/user.go
index 9b224bd80..77fac1a83 100644
--- a/src/common/models/user.go
+++ b/src/common/models/user.go
@@ -35,13 +35,13 @@ type User struct {
// to it.
Role int `orm:"-" json:"role_id"`
// RoleList []Role `json:"role_list"`
- HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"`
- ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"`
- Salt string `orm:"column(salt)" json:"-"`
- CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
- UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
- GroupList []*UserGroup `orm:"-" json:"-"`
- OIDCUserMeta *OIDCUser `orm:"-" json:"oidc_user_meta,omitempty"`
+ HasAdminRole bool `orm:"column(sysadmin_flag)" json:"has_admin_role"`
+ ResetUUID string `orm:"column(reset_uuid)" json:"reset_uuid"`
+ Salt string `orm:"column(salt)" json:"-"`
+ CreationTime time.Time `orm:"column(creation_time);auto_now_add" json:"creation_time"`
+ UpdateTime time.Time `orm:"column(update_time);auto_now" json:"update_time"`
+ GroupIDs []int `orm:"-" json:"-"`
+ OIDCUserMeta *OIDCUser `orm:"-" json:"oidc_user_meta,omitempty"`
}
// UserQuery ...
diff --git a/src/common/quota/driver/driver.go b/src/common/quota/driver/driver.go
new file mode 100644
index 000000000..fbd339e37
--- /dev/null
+++ b/src/common/quota/driver/driver.go
@@ -0,0 +1,59 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package driver
+
+import (
+ "sync"
+
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+var (
+ driversMu sync.RWMutex
+ drivers = map[string]Driver{}
+)
+
+// RefObject type for quota ref object
+type RefObject map[string]interface{}
+
+// Driver the driver for quota
+type Driver interface {
+ // HardLimits returns default resource list
+ HardLimits() types.ResourceList
+ // Load returns quota ref object by key
+ Load(key string) (RefObject, error)
+ // Validate validate the hard limits
+ Validate(hardLimits types.ResourceList) error
+}
+
+// Register register quota driver
+func Register(name string, driver Driver) {
+ driversMu.Lock()
+ defer driversMu.Unlock()
+ if driver == nil {
+ panic("quota: Register driver is nil")
+ }
+
+ drivers[name] = driver
+}
+
+// Get returns quota driver by name
+func Get(name string) (Driver, bool) {
+ driversMu.Lock()
+ defer driversMu.Unlock()
+
+ driver, ok := drivers[name]
+ return driver, ok
+}
diff --git a/src/common/quota/driver/mocks/driver.go b/src/common/quota/driver/mocks/driver.go
new file mode 100644
index 000000000..8f8c1ac82
--- /dev/null
+++ b/src/common/quota/driver/mocks/driver.go
@@ -0,0 +1,65 @@
+// Code generated by mockery v1.0.0. DO NOT EDIT.
+
+package mocks
+
+import driver "github.com/goharbor/harbor/src/common/quota/driver"
+import mock "github.com/stretchr/testify/mock"
+import types "github.com/goharbor/harbor/src/pkg/types"
+
+// Driver is an autogenerated mock type for the Driver type
+type Driver struct {
+ mock.Mock
+}
+
+// HardLimits provides a mock function with given fields:
+func (_m *Driver) HardLimits() types.ResourceList {
+ ret := _m.Called()
+
+ var r0 types.ResourceList
+ if rf, ok := ret.Get(0).(func() types.ResourceList); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(types.ResourceList)
+ }
+ }
+
+ return r0
+}
+
+// Load provides a mock function with given fields: key
+func (_m *Driver) Load(key string) (driver.RefObject, error) {
+ ret := _m.Called(key)
+
+ var r0 driver.RefObject
+ if rf, ok := ret.Get(0).(func(string) driver.RefObject); ok {
+ r0 = rf(key)
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(driver.RefObject)
+ }
+ }
+
+ var r1 error
+ if rf, ok := ret.Get(1).(func(string) error); ok {
+ r1 = rf(key)
+ } else {
+ r1 = ret.Error(1)
+ }
+
+ return r0, r1
+}
+
+// Validate provides a mock function with given fields: resources
+func (_m *Driver) Validate(resources types.ResourceList) error {
+ ret := _m.Called(resources)
+
+ var r0 error
+ if rf, ok := ret.Get(0).(func(types.ResourceList) error); ok {
+ r0 = rf(resources)
+ } else {
+ r0 = ret.Error(0)
+ }
+
+ return r0
+}
diff --git a/src/common/quota/driver/project/driver.go b/src/common/quota/driver/project/driver.go
new file mode 100644
index 000000000..8fafded6c
--- /dev/null
+++ b/src/common/quota/driver/project/driver.go
@@ -0,0 +1,143 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package project
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "github.com/goharbor/harbor/src/common"
+ "github.com/goharbor/harbor/src/common/config"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ dr "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/graph-gophers/dataloader"
+)
+
+func init() {
+ dr.Register("project", newDriver())
+}
+
+func getProjectsBatchFn(ctx context.Context, keys dataloader.Keys) []*dataloader.Result {
+ handleError := func(err error) []*dataloader.Result {
+ var results []*dataloader.Result
+ var result dataloader.Result
+ result.Error = err
+ results = append(results, &result)
+ return results
+ }
+
+ var projectIDs []int64
+ for _, key := range keys {
+ id, err := strconv.ParseInt(key.String(), 10, 64)
+ if err != nil {
+ return handleError(err)
+ }
+ projectIDs = append(projectIDs, id)
+ }
+
+ projects, err := dao.GetProjects(&models.ProjectQueryParam{})
+ if err != nil {
+ return handleError(err)
+ }
+
+ var projectsMap = make(map[int64]*models.Project, len(projectIDs))
+ for _, project := range projects {
+ projectsMap[project.ProjectID] = project
+ }
+
+ var results []*dataloader.Result
+ for _, projectID := range projectIDs {
+ project, ok := projectsMap[projectID]
+ if !ok {
+ return handleError(fmt.Errorf("project not found, "+"project_id: %d", projectID))
+ }
+
+ result := dataloader.Result{
+ Data: project,
+ Error: nil,
+ }
+ results = append(results, &result)
+ }
+
+ return results
+}
+
+type driver struct {
+ cfg *config.CfgManager
+ loader *dataloader.Loader
+}
+
+func (d *driver) HardLimits() types.ResourceList {
+ return types.ResourceList{
+ types.ResourceCount: d.cfg.Get(common.CountPerProject).GetInt64(),
+ types.ResourceStorage: d.cfg.Get(common.StoragePerProject).GetInt64(),
+ }
+}
+
+func (d *driver) Load(key string) (dr.RefObject, error) {
+ thunk := d.loader.Load(context.TODO(), dataloader.StringKey(key))
+
+ result, err := thunk()
+ if err != nil {
+ return nil, err
+ }
+
+ project, ok := result.(*models.Project)
+ if !ok {
+ return nil, fmt.Errorf("bad result for project: %s", key)
+ }
+
+ return dr.RefObject{
+ "id": project.ProjectID,
+ "name": project.Name,
+ "owner_name": project.OwnerName,
+ }, nil
+}
+
+func (d *driver) Validate(hardLimits types.ResourceList) error {
+ resources := map[types.ResourceName]bool{
+ types.ResourceCount: true,
+ types.ResourceStorage: true,
+ }
+
+ for resource, value := range hardLimits {
+ if !resources[resource] {
+ return fmt.Errorf("resource %s not support", resource)
+ }
+
+ if value <= 0 && value != types.UNLIMITED {
+ return fmt.Errorf("invalid value for resource %s", resource)
+ }
+ }
+
+ for resource := range resources {
+ if _, found := hardLimits[resource]; !found {
+ return fmt.Errorf("resource %s not found", resource)
+ }
+ }
+
+ return nil
+}
+
+func newDriver() dr.Driver {
+ cfg := config.NewDBCfgManager()
+
+ loader := dataloader.NewBatchedLoader(getProjectsBatchFn)
+
+ return &driver{cfg: cfg, loader: loader}
+}
diff --git a/src/common/quota/driver/project/driver_test.go b/src/common/quota/driver/project/driver_test.go
new file mode 100644
index 000000000..992af0ae9
--- /dev/null
+++ b/src/common/quota/driver/project/driver_test.go
@@ -0,0 +1,77 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package project
+
+import (
+ "os"
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ dr "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/stretchr/testify/suite"
+)
+
+type DriverSuite struct {
+ suite.Suite
+}
+
+func (suite *DriverSuite) TestHardLimits() {
+ driver := newDriver()
+
+ suite.Equal(types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}, driver.HardLimits())
+}
+
+func (suite *DriverSuite) TestLoad() {
+ driver := newDriver()
+
+ if ref, err := driver.Load("1"); suite.Nil(err) {
+ obj := dr.RefObject{
+ "id": int64(1),
+ "name": "library",
+ "owner_name": "",
+ }
+
+ suite.Equal(obj, ref)
+ }
+
+ if ref, err := driver.Load("100000"); suite.Error(err) {
+ suite.Empty(ref)
+ }
+
+ if ref, err := driver.Load("library"); suite.Error(err) {
+ suite.Empty(ref)
+ }
+}
+
+func (suite *DriverSuite) TestValidate() {
+ driver := newDriver()
+
+ suite.Nil(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 1024}))
+ suite.Error(driver.Validate(types.ResourceList{}))
+ suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1}))
+ suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 0}))
+ suite.Error(driver.Validate(types.ResourceList{types.ResourceCount: 1, types.ResourceName("foo"): 1}))
+}
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+
+ os.Exit(m.Run())
+}
+
+func TestRunDriverSuite(t *testing.T) {
+ suite.Run(t, new(DriverSuite))
+}
diff --git a/src/common/quota/errors.go b/src/common/quota/errors.go
new file mode 100644
index 000000000..c828734dd
--- /dev/null
+++ b/src/common/quota/errors.go
@@ -0,0 +1,111 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+// Errors contains all happened errors
+type Errors []error
+
+// GetErrors gets all errors that have occurred and returns a slice of errors (Error type)
+func (errs Errors) GetErrors() []error {
+ return errs
+}
+
+// Add adds an error to a given slice of errors
+func (errs Errors) Add(newErrors ...error) Errors {
+ for _, err := range newErrors {
+ if err == nil {
+ continue
+ }
+
+ if errors, ok := err.(Errors); ok {
+ errs = errs.Add(errors...)
+ } else {
+ ok = true
+ for _, e := range errs {
+ if err == e {
+ ok = false
+ }
+ }
+ if ok {
+ errs = append(errs, err)
+ }
+ }
+ }
+
+ return errs
+}
+
+// Error takes a slice of all errors that have occurred and returns it as a formatted string
+func (errs Errors) Error() string {
+ var errors = []string{}
+ for _, e := range errs {
+ errors = append(errors, e.Error())
+ }
+ return strings.Join(errors, "; ")
+}
+
+// ResourceOverflow ...
+type ResourceOverflow struct {
+ Resource types.ResourceName
+ HardLimit int64
+ CurrentUsed int64
+ NewUsed int64
+}
+
+func (e *ResourceOverflow) Error() string {
+ resource := e.Resource
+ var (
+ op string
+ delta int64
+ )
+
+ if e.NewUsed > e.CurrentUsed {
+ op = "add"
+ delta = e.NewUsed - e.CurrentUsed
+ } else {
+ op = "subtract"
+ delta = e.CurrentUsed - e.NewUsed
+ }
+
+ return fmt.Sprintf("%s %s of %s resource overflow the hard limit, current usage is %s and hard limit is %s",
+ op, resource.FormatValue(delta), resource,
+ resource.FormatValue(e.CurrentUsed), resource.FormatValue(e.HardLimit))
+}
+
+// NewResourceOverflowError ...
+func NewResourceOverflowError(resource types.ResourceName, hardLimit, currentUsed, newUsed int64) error {
+ return &ResourceOverflow{Resource: resource, HardLimit: hardLimit, CurrentUsed: currentUsed, NewUsed: newUsed}
+}
+
+// ResourceNotFound ...
+type ResourceNotFound struct {
+ Resource types.ResourceName
+}
+
+func (e *ResourceNotFound) Error() string {
+ return fmt.Sprintf("resource %s not found", e.Resource)
+}
+
+// NewResourceNotFoundError ...
+func NewResourceNotFoundError(resource types.ResourceName) error {
+ return &ResourceNotFound{Resource: resource}
+}
diff --git a/src/common/quota/manager.go b/src/common/quota/manager.go
new file mode 100644
index 000000000..a70199ed3
--- /dev/null
+++ b/src/common/quota/manager.go
@@ -0,0 +1,276 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+// Manager manager for quota
+type Manager struct {
+ driver driver.Driver
+ reference string
+ referenceID string
+}
+
+func (m *Manager) addQuota(o orm.Ormer, hardLimits types.ResourceList, now time.Time) (int64, error) {
+ quota := &models.Quota{
+ Reference: m.reference,
+ ReferenceID: m.referenceID,
+ Hard: hardLimits.String(),
+ CreationTime: now,
+ UpdateTime: now,
+ }
+
+ return o.Insert(quota)
+}
+
+func (m *Manager) addUsage(o orm.Ormer, used types.ResourceList, now time.Time, ids ...int64) (int64, error) {
+ usage := &models.QuotaUsage{
+ Reference: m.reference,
+ ReferenceID: m.referenceID,
+ Used: used.String(),
+ CreationTime: now,
+ UpdateTime: now,
+ }
+
+ if len(ids) > 0 {
+ usage.ID = ids[0]
+ }
+
+ return o.Insert(usage)
+}
+
+func (m *Manager) newQuota(o orm.Ormer, hardLimits types.ResourceList, usages ...types.ResourceList) (int64, error) {
+ now := time.Now()
+
+ id, err := m.addQuota(o, hardLimits, now)
+ if err != nil {
+ return 0, err
+ }
+
+ var used types.ResourceList
+ if len(usages) > 0 {
+ used = usages[0]
+ } else {
+ used = types.Zero(hardLimits)
+ }
+
+ if _, err := m.addUsage(o, used, now, id); err != nil {
+ return 0, err
+ }
+
+ return id, nil
+}
+
+func (m *Manager) getQuotaForUpdate(o orm.Ormer) (*models.Quota, error) {
+ quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID}
+ if err := o.ReadForUpdate(quota, "reference", "reference_id"); err != nil {
+ if err == orm.ErrNoRows {
+ if _, err := m.newQuota(o, m.driver.HardLimits()); err != nil {
+ return nil, err
+ }
+
+ return m.getQuotaForUpdate(o)
+ }
+
+ return nil, err
+ }
+
+ return quota, nil
+}
+
+func (m *Manager) getUsageForUpdate(o orm.Ormer) (*models.QuotaUsage, error) {
+ usage := &models.QuotaUsage{Reference: m.reference, ReferenceID: m.referenceID}
+ if err := o.ReadForUpdate(usage, "reference", "reference_id"); err != nil {
+ return nil, err
+ }
+
+ return usage, nil
+}
+
+func (m *Manager) updateUsage(o orm.Ormer, resources types.ResourceList,
+ calculate func(types.ResourceList, types.ResourceList) types.ResourceList) error {
+
+ quota, err := m.getQuotaForUpdate(o)
+ if err != nil {
+ return err
+ }
+ hardLimits, err := types.NewResourceList(quota.Hard)
+ if err != nil {
+ return err
+ }
+
+ usage, err := m.getUsageForUpdate(o)
+ if err != nil {
+ return err
+ }
+ used, err := types.NewResourceList(usage.Used)
+ if err != nil {
+ return err
+ }
+
+ newUsed := calculate(used, resources)
+
+ // ensure that new used is never negative
+ if negativeUsed := types.IsNegative(newUsed); len(negativeUsed) > 0 {
+ return fmt.Errorf("quota usage is negative for resource(s): %s", prettyPrintResourceNames(negativeUsed))
+ }
+
+ if err := isSafe(hardLimits, used, newUsed); err != nil {
+ return err
+ }
+
+ usage.Used = newUsed.String()
+ usage.UpdateTime = time.Now()
+
+ _, err = o.Update(usage)
+ return err
+}
+
+// NewQuota create new quota for (reference, reference id)
+func (m *Manager) NewQuota(hardLimit types.ResourceList, usages ...types.ResourceList) (int64, error) {
+ var id int64
+ err := dao.WithTransaction(func(o orm.Ormer) (err error) {
+ id, err = m.newQuota(o, hardLimit, usages...)
+ return err
+ })
+
+ if err != nil {
+ return 0, err
+ }
+
+ return id, nil
+}
+
+// DeleteQuota delete the quota
+func (m *Manager) DeleteQuota() error {
+ return dao.WithTransaction(func(o orm.Ormer) error {
+ quota := &models.Quota{Reference: m.reference, ReferenceID: m.referenceID}
+ if _, err := o.Delete(quota, "reference", "reference_id"); err != nil {
+ return err
+ }
+
+ usage := &models.QuotaUsage{Reference: m.reference, ReferenceID: m.referenceID}
+ if _, err := o.Delete(usage, "reference", "reference_id"); err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
+
+// UpdateQuota update the quota resource spec
+func (m *Manager) UpdateQuota(hardLimits types.ResourceList) error {
+ o := dao.GetOrmer()
+ if err := m.driver.Validate(hardLimits); err != nil {
+ return err
+ }
+
+ sql := `UPDATE quota SET hard = ? WHERE reference = ? AND reference_id = ?`
+ _, err := o.Raw(sql, hardLimits.String(), m.reference, m.referenceID).Exec()
+
+ return err
+}
+
+// EnsureQuota ensures the reference has quota and usage,
+// if non-existent, will create new quota and usage.
+// if existent, update the quota and usage.
+func (m *Manager) EnsureQuota(usages types.ResourceList) error {
+ query := &models.QuotaQuery{
+ Reference: m.reference,
+ ReferenceID: m.referenceID,
+ }
+ quotas, err := dao.ListQuotas(query)
+ if err != nil {
+ return err
+ }
+
+ // non-existent: create quota and usage
+ defaultHardLimit := m.driver.HardLimits()
+ if len(quotas) == 0 {
+ _, err := m.NewQuota(defaultHardLimit, usages)
+ if err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // existent
+ used := usages
+ quotaUsed, err := types.NewResourceList(quotas[0].Used)
+ if err != nil {
+ return err
+ }
+ if types.Equals(quotaUsed, used) {
+ return nil
+ }
+ dao.WithTransaction(func(o orm.Ormer) error {
+ usage, err := m.getUsageForUpdate(o)
+ if err != nil {
+ return err
+ }
+ usage.Used = used.String()
+ usage.UpdateTime = time.Now()
+ _, err = o.Update(usage)
+ if err != nil {
+ return err
+ }
+ return nil
+ })
+
+ return nil
+}
+
+// AddResources add resources to usage
+func (m *Manager) AddResources(resources types.ResourceList) error {
+ return dao.WithTransaction(func(o orm.Ormer) error {
+ return m.updateUsage(o, resources, types.Add)
+ })
+}
+
+// SubtractResources subtract resources from usage
+func (m *Manager) SubtractResources(resources types.ResourceList) error {
+ return dao.WithTransaction(func(o orm.Ormer) error {
+ return m.updateUsage(o, resources, types.Subtract)
+ })
+}
+
+// NewManager returns quota manager
+func NewManager(reference string, referenceID string) (*Manager, error) {
+ d, ok := driver.Get(reference)
+ if !ok {
+ return nil, fmt.Errorf("quota not support for %s", reference)
+ }
+
+ if _, err := d.Load(referenceID); err != nil {
+ log.Warning(fmt.Sprintf("Load quota reference object (%s, %s) failed: %v", reference, referenceID, err))
+ return nil, err
+ }
+
+ return &Manager{
+ driver: d,
+ reference: reference,
+ referenceID: referenceID,
+ }, nil
+}
diff --git a/src/common/quota/manager_test.go b/src/common/quota/manager_test.go
new file mode 100644
index 000000000..344d06e47
--- /dev/null
+++ b/src/common/quota/manager_test.go
@@ -0,0 +1,342 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/common/quota/driver/mocks"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+)
+
+var (
+ hardLimits = types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: 1000}
+ reference = "mock"
+)
+
+func init() {
+ mockDriver := &mocks.Driver{}
+
+ mockHardLimitsFn := func() types.ResourceList {
+ return types.ResourceList{
+ types.ResourceCount: -1,
+ types.ResourceStorage: -1,
+ }
+ }
+
+ mockLoadFn := func(key string) driver.RefObject {
+ return driver.RefObject{"id": key}
+ }
+
+ mockDriver.On("HardLimits").Return(mockHardLimitsFn)
+ mockDriver.On("Load", mock.AnythingOfType("string")).Return(mockLoadFn, nil)
+ mockDriver.On("Validate", mock.AnythingOfType("types.ResourceList")).Return(nil)
+
+ driver.Register(reference, mockDriver)
+}
+
+func mustResourceList(s string) types.ResourceList {
+ resources, _ := types.NewResourceList(s)
+ return resources
+}
+
+type ManagerSuite struct {
+ suite.Suite
+}
+
+func (suite *ManagerSuite) SetupTest() {
+ _, ok := driver.Get(reference)
+ if !ok {
+ suite.Fail("driver not found for %s", reference)
+ }
+}
+
+func (suite *ManagerSuite) quotaManager(referenceIDs ...string) *Manager {
+ referenceID := "1"
+ if len(referenceIDs) > 0 {
+ referenceID = referenceIDs[0]
+ }
+
+ mgr, _ := NewManager(reference, referenceID)
+ return mgr
+}
+
+func (suite *ManagerSuite) TearDownTest() {
+ dao.ClearTable("quota")
+ dao.ClearTable("quota_usage")
+}
+
+func (suite *ManagerSuite) TestNewQuota() {
+ mgr := suite.quotaManager()
+
+ if id, err := mgr.NewQuota(hardLimits); suite.Nil(err) {
+ quota, _ := dao.GetQuota(id)
+ suite.Equal(hardLimits, mustResourceList(quota.Hard))
+ }
+
+ mgr = suite.quotaManager("2")
+ used := types.ResourceList{types.ResourceStorage: 100}
+ if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) {
+ quota, _ := dao.GetQuota(id)
+ suite.Equal(hardLimits, mustResourceList(quota.Hard))
+
+ usage, _ := dao.GetQuotaUsage(id)
+ suite.Equal(used, mustResourceList(usage.Used))
+ }
+}
+
+func (suite *ManagerSuite) TestDeleteQuota() {
+ mgr := suite.quotaManager()
+
+ id, err := mgr.NewQuota(hardLimits)
+ if suite.Nil(err) {
+ quota, _ := dao.GetQuota(id)
+ suite.Equal(hardLimits, mustResourceList(quota.Hard))
+ }
+
+ if err := mgr.DeleteQuota(); suite.Nil(err) {
+ quota, _ := dao.GetQuota(id)
+ suite.Nil(quota)
+ }
+}
+
+func (suite *ManagerSuite) TestUpdateQuota() {
+ mgr := suite.quotaManager()
+
+ id, _ := mgr.NewQuota(hardLimits)
+ largeHardLimits := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: 1000000}
+
+ if err := mgr.UpdateQuota(largeHardLimits); suite.Nil(err) {
+ quota, _ := dao.GetQuota(id)
+ suite.Equal(largeHardLimits, mustResourceList(quota.Hard))
+ }
+}
+
+func (suite *ManagerSuite) TestEnsureQuota() {
+ // non-existent
+ nonExistRefID := "3"
+ mgr := suite.quotaManager(nonExistRefID)
+ infinite := types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}
+ usage := types.ResourceList{types.ResourceCount: 10, types.ResourceStorage: 10}
+ err := mgr.EnsureQuota(usage)
+ suite.Nil(err)
+ query := &models.QuotaQuery{
+ Reference: reference,
+ ReferenceID: nonExistRefID,
+ }
+ quotas, err := dao.ListQuotas(query)
+ suite.Nil(err)
+ suite.Equal(usage, mustResourceList(quotas[0].Used))
+ suite.Equal(infinite, mustResourceList(quotas[0].Hard))
+
+ // existent
+ existRefID := "4"
+ mgr = suite.quotaManager(existRefID)
+ used := types.ResourceList{types.ResourceCount: 11, types.ResourceStorage: 11}
+ if id, err := mgr.NewQuota(hardLimits, used); suite.Nil(err) {
+ quota, _ := dao.GetQuota(id)
+ suite.Equal(hardLimits, mustResourceList(quota.Hard))
+
+ usage, _ := dao.GetQuotaUsage(id)
+ suite.Equal(used, mustResourceList(usage.Used))
+ }
+
+ usage2 := types.ResourceList{types.ResourceCount: 12, types.ResourceStorage: 12}
+ err = mgr.EnsureQuota(usage2)
+ suite.Nil(err)
+ query2 := &models.QuotaQuery{
+ Reference: reference,
+ ReferenceID: existRefID,
+ }
+ quotas2, err := dao.ListQuotas(query2)
+ suite.Equal(usage2, mustResourceList(quotas2[0].Used))
+ suite.Equal(hardLimits, mustResourceList(quotas2[0].Hard))
+
+}
+
+func (suite *ManagerSuite) TestQuotaAutoCreation() {
+ for i := 0; i < 10; i++ {
+ mgr := suite.quotaManager(fmt.Sprintf("%d", i))
+ resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100}
+
+ suite.Nil(mgr.AddResources(resource))
+ }
+}
+
+func (suite *ManagerSuite) TestAddResources() {
+ mgr := suite.quotaManager()
+ id, _ := mgr.NewQuota(hardLimits)
+
+ resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100}
+
+ if suite.Nil(mgr.AddResources(resource)) {
+ usage, _ := dao.GetQuotaUsage(id)
+ suite.Equal(resource, mustResourceList(usage.Used))
+ }
+
+ if suite.Nil(mgr.AddResources(resource)) {
+ usage, _ := dao.GetQuotaUsage(id)
+ suite.Equal(types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 200}, mustResourceList(usage.Used))
+ }
+
+ if err := mgr.AddResources(types.ResourceList{types.ResourceStorage: 10000}); suite.Error(err) {
+ if errs, ok := err.(Errors); suite.True(ok) {
+ for _, err := range errs {
+ suite.IsType(&ResourceOverflow{}, err)
+ }
+ }
+ }
+}
+
+func (suite *ManagerSuite) TestSubtractResources() {
+ mgr := suite.quotaManager()
+ id, _ := mgr.NewQuota(hardLimits)
+
+ resource := types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 100}
+
+ if suite.Nil(mgr.AddResources(resource)) {
+ usage, _ := dao.GetQuotaUsage(id)
+ suite.Equal(resource, mustResourceList(usage.Used))
+ }
+
+ if suite.Nil(mgr.SubtractResources(resource)) {
+ usage, _ := dao.GetQuotaUsage(id)
+ suite.Equal(types.ResourceList{types.ResourceCount: 0, types.ResourceStorage: 0}, mustResourceList(usage.Used))
+ }
+}
+
+func (suite *ManagerSuite) TestRaceAddResources() {
+ mgr := suite.quotaManager()
+ mgr.NewQuota(hardLimits)
+
+ resources := types.ResourceList{
+ types.ResourceStorage: 100,
+ }
+
+ var wg sync.WaitGroup
+
+ results := make([]bool, 100)
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ results[i] = mgr.AddResources(resources) == nil
+ }(i)
+ }
+ wg.Wait()
+
+ var success int
+ for _, result := range results {
+ if result {
+ success++
+ }
+ }
+
+ suite.Equal(10, success)
+}
+
+func (suite *ManagerSuite) TestRaceSubtractResources() {
+ mgr := suite.quotaManager()
+ mgr.NewQuota(hardLimits, types.ResourceList{types.ResourceStorage: 1000})
+
+ resources := types.ResourceList{
+ types.ResourceStorage: 100,
+ }
+
+ var wg sync.WaitGroup
+
+ results := make([]bool, 100)
+ for i := 0; i < 100; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+
+ results[i] = mgr.SubtractResources(resources) == nil
+ }(i)
+ }
+ wg.Wait()
+
+ var success int
+ for _, result := range results {
+ if result {
+ success++
+ }
+ }
+
+ suite.Equal(10, success)
+}
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+
+ if result := m.Run(); result != 0 {
+ os.Exit(result)
+ }
+}
+
+func TestRunManagerSuite(t *testing.T) {
+ suite.Run(t, new(ManagerSuite))
+}
+
+func BenchmarkAddResources(b *testing.B) {
+ defer func() {
+ dao.ClearTable("quota")
+ dao.ClearTable("quota_usage")
+ }()
+
+ mgr, _ := NewManager(reference, "1")
+ mgr.NewQuota(types.ResourceList{types.ResourceStorage: int64(b.N)})
+
+ resource := types.ResourceList{
+ types.ResourceStorage: 1,
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ mgr.AddResources(resource)
+ }
+ b.StopTimer()
+}
+
+func BenchmarkAddResourcesParallel(b *testing.B) {
+ defer func() {
+ dao.ClearTable("quota")
+ dao.ClearTable("quota_usage")
+ }()
+
+ mgr, _ := NewManager(reference, "1")
+ mgr.NewQuota(types.ResourceList{})
+
+ resource := types.ResourceList{
+ types.ResourceStorage: 1,
+ }
+
+ b.ResetTimer()
+ b.RunParallel(func(b *testing.PB) {
+ for b.Next() {
+ mgr.AddResources(resource)
+ }
+ })
+ b.StopTimer()
+}
diff --git a/src/replication/adapter/chart_registry.go b/src/common/quota/quota.go
similarity index 57%
rename from src/replication/adapter/chart_registry.go
rename to src/common/quota/quota.go
index fef80e18e..4446d61eb 100644
--- a/src/replication/adapter/chart_registry.go
+++ b/src/common/quota/quota.go
@@ -12,19 +12,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package adapter
+package quota
import (
- "io"
+ "fmt"
- "github.com/goharbor/harbor/src/replication/model"
+ "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/pkg/types"
+
+ // project driver for quota
+ _ "github.com/goharbor/harbor/src/common/quota/driver/project"
)
-// ChartRegistry defines the capabilities that a chart registry should have
-type ChartRegistry interface {
- FetchCharts(filters []*model.Filter) ([]*model.Resource, error)
- ChartExist(name, version string) (bool, error)
- DownloadChart(name, version string) (io.ReadCloser, error)
- UploadChart(name, version string, chart io.Reader) error
- DeleteChart(name, version string) error
+// Validate validate hard limits
+func Validate(reference string, hardLimits types.ResourceList) error {
+ d, ok := driver.Get(reference)
+ if !ok {
+ return fmt.Errorf("quota not support for %s", reference)
+ }
+
+ return d.Validate(hardLimits)
}
diff --git a/src/common/quota/quota_test.go b/src/common/quota/quota_test.go
new file mode 100644
index 000000000..cc089e86c
--- /dev/null
+++ b/src/common/quota/quota_test.go
@@ -0,0 +1,45 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "testing"
+
+ _ "github.com/goharbor/harbor/src/common/quota/driver/project"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+func TestValidate(t *testing.T) {
+ type args struct {
+ reference string
+ hardLimits types.ResourceList
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {"valid", args{"project", types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 1}}, false},
+ {"invalid", args{"project", types.ResourceList{types.ResourceCount: 1, types.ResourceStorage: 0}}, true},
+ {"not support", args{"not support", types.ResourceList{types.ResourceCount: 1}}, true},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := Validate(tt.args.reference, tt.args.hardLimits); (err != nil) != tt.wantErr {
+ t.Errorf("Validate() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/src/common/quota/types.go b/src/common/quota/types.go
new file mode 100644
index 000000000..35a6f60cc
--- /dev/null
+++ b/src/common/quota/types.go
@@ -0,0 +1,32 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+var (
+ // ResourceCount alias types.ResourceCount
+ ResourceCount = types.ResourceCount
+ // ResourceStorage alias types.ResourceStorage
+ ResourceStorage = types.ResourceStorage
+)
+
+// ResourceName alias types.ResourceName
+type ResourceName = types.ResourceName
+
+// ResourceList alias types.ResourceList
+type ResourceList = types.ResourceList
diff --git a/src/common/quota/util.go b/src/common/quota/util.go
new file mode 100644
index 000000000..5f8687cc7
--- /dev/null
+++ b/src/common/quota/util.go
@@ -0,0 +1,57 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "sort"
+ "strings"
+
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+func isSafe(hardLimits types.ResourceList, currentUsed types.ResourceList, newUsed types.ResourceList) error {
+ var errs Errors
+
+ for resource, value := range newUsed {
+ hardLimit, found := hardLimits[resource]
+ if !found {
+ errs = errs.Add(NewResourceNotFoundError(resource))
+ continue
+ }
+
+ if hardLimit == types.UNLIMITED || value == currentUsed[resource] {
+ continue
+ }
+
+ if value > hardLimit {
+ errs = errs.Add(NewResourceOverflowError(resource, hardLimit, currentUsed[resource], value))
+ }
+ }
+
+ if len(errs) > 0 {
+ return errs
+ }
+
+ return nil
+}
+
+func prettyPrintResourceNames(a []types.ResourceName) string {
+ values := []string{}
+ for _, value := range a {
+ values = append(values, string(value))
+ }
+ sort.Strings(values)
+ return strings.Join(values, ",")
+}
diff --git a/src/common/quota/util_test.go b/src/common/quota/util_test.go
new file mode 100644
index 000000000..d0db1166a
--- /dev/null
+++ b/src/common/quota/util_test.go
@@ -0,0 +1,78 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+func Test_isSafe(t *testing.T) {
+ type args struct {
+ hardLimits types.ResourceList
+ currentUsed types.ResourceList
+ newUsed types.ResourceList
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ "unlimited",
+ args{
+ types.ResourceList{types.ResourceStorage: types.UNLIMITED},
+ types.ResourceList{types.ResourceStorage: 1000},
+ types.ResourceList{types.ResourceStorage: 1000},
+ },
+ false,
+ },
+ {
+ "ok",
+ args{
+ types.ResourceList{types.ResourceStorage: 100},
+ types.ResourceList{types.ResourceStorage: 10},
+ types.ResourceList{types.ResourceStorage: 1},
+ },
+ false,
+ },
+ {
+ "over the hard limit",
+ args{
+ types.ResourceList{types.ResourceStorage: 100},
+ types.ResourceList{types.ResourceStorage: 0},
+ types.ResourceList{types.ResourceStorage: 200},
+ },
+ true,
+ },
+ {
+ "hard limit not found",
+ args{
+ types.ResourceList{types.ResourceStorage: 100},
+ types.ResourceList{types.ResourceCount: 0},
+ types.ResourceList{types.ResourceCount: 1},
+ },
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := isSafe(tt.args.hardLimits, tt.args.currentUsed, tt.args.newUsed); (err != nil) != tt.wantErr {
+ t.Errorf("isSafe() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/src/common/rbac/const.go b/src/common/rbac/const.go
old mode 100644
new mode 100755
index 226759d34..6cadbddef
--- a/src/common/rbac/const.go
+++ b/src/common/rbac/const.go
@@ -27,6 +27,8 @@ const (
ActionUpdate = Action("update")
ActionDelete = Action("delete")
ActionList = Action("list")
+
+ ActionOperate = Action("operate")
)
// const resource variables
@@ -46,6 +48,7 @@ const (
ResourceReplicationExecution = Resource("replication-execution")
ResourceReplicationTask = Resource("replication-task")
ResourceRepository = Resource("repository")
+ ResourceTagRetention = Resource("tag-retention")
ResourceRepositoryLabel = Resource("repository-label")
ResourceRepositoryTag = Resource("repository-tag")
ResourceRepositoryTagLabel = Resource("repository-tag-label")
@@ -53,5 +56,6 @@ const (
ResourceRepositoryTagScanJob = Resource("repository-tag-scan-job")
ResourceRepositoryTagVulnerability = Resource("repository-tag-vulnerability")
ResourceRobot = Resource("robot")
+ ResourceNotificationPolicy = Resource("notification-policy")
ResourceSelf = Resource("") // subresource for self
)
diff --git a/src/common/rbac/project/util.go b/src/common/rbac/project/util.go
index 2a7a6968d..3de3f5810 100644
--- a/src/common/rbac/project/util.go
+++ b/src/common/rbac/project/util.go
@@ -54,6 +54,7 @@ var (
{Resource: rbac.ResourceSelf, Action: rbac.ActionDelete},
{Resource: rbac.ResourceMember, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceMember, Action: rbac.ActionRead},
{Resource: rbac.ResourceMember, Action: rbac.ActionUpdate},
{Resource: rbac.ResourceMember, Action: rbac.ActionDelete},
{Resource: rbac.ResourceMember, Action: rbac.ActionList},
@@ -87,6 +88,13 @@ var (
{Resource: rbac.ResourceReplicationTask, Action: rbac.ActionUpdate},
{Resource: rbac.ResourceReplicationTask, Action: rbac.ActionDelete},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionRead},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionUpdate},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionDelete},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate},
+
{Resource: rbac.ResourceLabel, Action: rbac.ActionCreate},
{Resource: rbac.ResourceLabel, Action: rbac.ActionRead},
{Resource: rbac.ResourceLabel, Action: rbac.ActionUpdate},
@@ -143,6 +151,12 @@ var (
{Resource: rbac.ResourceRobot, Action: rbac.ActionUpdate},
{Resource: rbac.ResourceRobot, Action: rbac.ActionDelete},
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
+
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionUpdate},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead},
}
)
diff --git a/src/common/rbac/project/visitor_role.go b/src/common/rbac/project/visitor_role.go
old mode 100644
new mode 100755
index 4287f97db..36202a602
--- a/src/common/rbac/project/visitor_role.go
+++ b/src/common/rbac/project/visitor_role.go
@@ -27,6 +27,7 @@ var (
{Resource: rbac.ResourceSelf, Action: rbac.ActionDelete},
{Resource: rbac.ResourceMember, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceMember, Action: rbac.ActionRead},
{Resource: rbac.ResourceMember, Action: rbac.ActionUpdate},
{Resource: rbac.ResourceMember, Action: rbac.ActionDelete},
{Resource: rbac.ResourceMember, Action: rbac.ActionList},
@@ -60,6 +61,13 @@ var (
{Resource: rbac.ResourceRepository, Action: rbac.ActionPull},
{Resource: rbac.ResourceRepository, Action: rbac.ActionPush},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionRead},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionUpdate},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionDelete},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate},
+
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate},
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete},
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList},
@@ -100,11 +108,18 @@ var (
{Resource: rbac.ResourceRobot, Action: rbac.ActionUpdate},
{Resource: rbac.ResourceRobot, Action: rbac.ActionDelete},
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
+
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionUpdate},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionDelete},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionRead},
},
"master": {
{Resource: rbac.ResourceSelf, Action: rbac.ActionRead},
+ {Resource: rbac.ResourceMember, Action: rbac.ActionRead},
{Resource: rbac.ResourceMember, Action: rbac.ActionList},
{Resource: rbac.ResourceMetadata, Action: rbac.ActionCreate},
@@ -131,6 +146,13 @@ var (
{Resource: rbac.ResourceRepository, Action: rbac.ActionPush},
{Resource: rbac.ResourceRepository, Action: rbac.ActionPull},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionCreate},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionRead},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionUpdate},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionDelete},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionList},
+ {Resource: rbac.ResourceTagRetention, Action: rbac.ActionOperate},
+
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionCreate},
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionDelete},
{Resource: rbac.ResourceRepositoryLabel, Action: rbac.ActionList},
@@ -167,11 +189,14 @@ var (
{Resource: rbac.ResourceRobot, Action: rbac.ActionRead},
{Resource: rbac.ResourceRobot, Action: rbac.ActionList},
+
+ {Resource: rbac.ResourceNotificationPolicy, Action: rbac.ActionList},
},
"developer": {
{Resource: rbac.ResourceSelf, Action: rbac.ActionRead},
+ {Resource: rbac.ResourceMember, Action: rbac.ActionRead},
{Resource: rbac.ResourceMember, Action: rbac.ActionList},
{Resource: rbac.ResourceLog, Action: rbac.ActionList},
@@ -221,6 +246,7 @@ var (
"guest": {
{Resource: rbac.ResourceSelf, Action: rbac.ActionRead},
+ {Resource: rbac.ResourceMember, Action: rbac.ActionRead},
{Resource: rbac.ResourceMember, Action: rbac.ActionList},
{Resource: rbac.ResourceLog, Action: rbac.ActionList},
diff --git a/src/common/security/local/context.go b/src/common/security/local/context.go
index 655fe34b1..907521e2f 100644
--- a/src/common/security/local/context.go
+++ b/src/common/security/local/context.go
@@ -17,7 +17,6 @@ package local
import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
- "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/rbac/project"
@@ -128,10 +127,24 @@ func (s *SecurityContext) GetProjectRoles(projectIDOrName interface{}) []int {
roles = append(roles, common.RoleGuest)
}
}
- if len(roles) != 0 {
- return roles
+ return mergeRoles(roles, s.GetRolesByGroup(projectIDOrName))
+}
+
+func mergeRoles(rolesA, rolesB []int) []int {
+ type void struct{}
+ var roles []int
+ var placeHolder void
+ roleSet := make(map[int]void)
+ for _, r := range rolesA {
+ roleSet[r] = placeHolder
}
- return s.GetRolesByGroup(projectIDOrName)
+ for _, r := range rolesB {
+ roleSet[r] = placeHolder
+ }
+ for r := range roleSet {
+ roles = append(roles, r)
+ }
+ return roles
}
// GetRolesByGroup - Get the group role of current user to the project
@@ -140,12 +153,11 @@ func (s *SecurityContext) GetRolesByGroup(projectIDOrName interface{}) []int {
user := s.user
project, err := s.pm.Get(projectIDOrName)
// No user, group or project info
- if err != nil || project == nil || user == nil || len(user.GroupList) == 0 {
+ if err != nil || project == nil || user == nil || len(user.GroupIDs) == 0 {
return roles
}
- // Get role by LDAP group
- groupDNConditions := group.GetGroupDNQueryCondition(user.GroupList)
- roles, err = dao.GetRolesByLDAPGroup(project.ProjectID, groupDNConditions)
+ // Get role by Group ID
+ roles, err = dao.GetRolesByGroupID(project.ProjectID, user.GroupIDs)
if err != nil {
return nil
}
@@ -157,8 +169,8 @@ func (s *SecurityContext) GetMyProjects() ([]*models.Project, error) {
result, err := s.pm.List(
&models.ProjectQueryParam{
Member: &models.MemberQuery{
- Name: s.GetUsername(),
- GroupList: s.user.GroupList,
+ Name: s.GetUsername(),
+ GroupIDs: s.user.GroupIDs,
},
})
if err != nil {
diff --git a/src/common/security/local/context_test.go b/src/common/security/local/context_test.go
index 955c041cc..ffbb51885 100644
--- a/src/common/security/local/context_test.go
+++ b/src/common/security/local/context_test.go
@@ -20,6 +20,7 @@ import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/dao/project"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/rbac"
@@ -253,9 +254,16 @@ func TestHasPushPullPermWithGroup(t *testing.T) {
if err != nil {
t.Errorf("Error occurred when GetUser: %v", err)
}
- developer.GroupList = []*models.UserGroup{
- {GroupName: "test_group", GroupType: 1, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"},
+
+ userGroups, err := group.QueryUserGroup(models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"})
+ if err != nil {
+ t.Errorf("Failed to query user group %v", err)
}
+ if len(userGroups) < 1 {
+ t.Errorf("Failed to retrieve user group")
+ }
+
+ developer.GroupIDs = []int{userGroups[0].ID}
resource := rbac.NewProjectNamespace(project.Name).Resource(rbac.ResourceRepository)
@@ -332,9 +340,15 @@ func TestSecurityContext_GetRolesByGroup(t *testing.T) {
if err != nil {
t.Errorf("Error occurred when GetUser: %v", err)
}
- developer.GroupList = []*models.UserGroup{
- {GroupName: "test_group", GroupType: 1, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"},
+ userGroups, err := group.QueryUserGroup(models.UserGroup{GroupType: common.LDAPGroupType, LdapGroupDN: "cn=harbor_user,dc=example,dc=com"})
+ if err != nil {
+ t.Errorf("Failed to query user group %v", err)
}
+ if len(userGroups) < 1 {
+ t.Errorf("Failed to retrieve user group")
+ }
+
+ developer.GroupIDs = []int{userGroups[0].ID}
type fields struct {
user *models.User
pm promgr.ProjectManager
@@ -394,3 +408,27 @@ func TestSecurityContext_GetMyProjects(t *testing.T) {
})
}
}
+
+func Test_mergeRoles(t *testing.T) {
+ type args struct {
+ rolesA []int
+ rolesB []int
+ }
+ tests := []struct {
+ name string
+ args args
+ want []int
+ }{
+ {"normal", args{[]int{3, 4}, []int{1, 2, 3, 4}}, []int{1, 2, 3, 4}},
+ {"empty", args{[]int{}, []int{}}, []int{}},
+ {"left empty", args{[]int{}, []int{1, 2, 3, 4}}, []int{1, 2, 3, 4}},
+ {"right empty", args{[]int{1, 2, 3, 4}, []int{}}, []int{1, 2, 3, 4}},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := mergeRoles(tt.args.rolesA, tt.args.rolesB); !test.CheckSetsEqual(got, tt.want) {
+ t.Errorf("mergeRoles() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/common/utils/clair/utils.go b/src/common/utils/clair/utils.go
index 2eb986e75..48ba3711e 100644
--- a/src/common/utils/clair/utils.go
+++ b/src/common/utils/clair/utils.go
@@ -15,10 +15,7 @@
package clair
import (
- "fmt"
- "github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
- "github.com/goharbor/harbor/src/common/utils/log"
"strings"
)
@@ -41,26 +38,6 @@ func ParseClairSev(clairSev string) models.Severity {
}
}
-// UpdateScanOverview qeuries the vulnerability based on the layerName and update the record in img_scan_overview table based on digest.
-func UpdateScanOverview(digest, layerName string, clairEndpoint string, l ...*log.Logger) error {
- var logger *log.Logger
- if len(l) > 1 {
- return fmt.Errorf("More than one logger specified")
- } else if len(l) == 1 {
- logger = l[0]
- } else {
- logger = log.DefaultLogger()
- }
- client := NewClient(clairEndpoint, logger)
- res, err := client.GetResult(layerName)
- if err != nil {
- logger.Errorf("Failed to get result from Clair, error: %v", err)
- return err
- }
- compOverview, sev := transformVuln(res)
- return dao.UpdateImgScanOverview(digest, layerName, sev, compOverview)
-}
-
func transformVuln(clairVuln *models.ClairLayerEnvelope) (*models.ComponentsOverview, models.Severity) {
vulnMap := make(map[models.Severity]int)
features := clairVuln.Layer.Features
diff --git a/src/common/utils/ldap/ldap.go b/src/common/utils/ldap/ldap.go
index e7c453376..512af7618 100644
--- a/src/common/utils/ldap/ldap.go
+++ b/src/common/utils/ldap/ldap.go
@@ -220,6 +220,27 @@ func (session *Session) SearchUser(username string) ([]models.LdapUser, error) {
}
u.GroupDNList = groupDNList
}
+
+ log.Debugf("Searching for nested groups")
+ nestedGroupDNList := []string{}
+ nestedGroupFilter := createNestedGroupFilter(ldapEntry.DN)
+ result, err := session.SearchLdap(nestedGroupFilter)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, groupEntry := range result.Entries {
+ if !contains(u.GroupDNList, groupEntry.DN) {
+ nestedGroupDNList = append(nestedGroupDNList, strings.TrimSpace(groupEntry.DN))
+ log.Debugf("Found group %v", groupEntry.DN)
+ } else {
+ log.Debugf("%v is already in GroupDNList", groupEntry.DN)
+ }
+ }
+
+ u.GroupDNList = append(u.GroupDNList, nestedGroupDNList...)
+ log.Debugf("Done searching for nested groups")
+
u.DN = ldapEntry.DN
ldapUsers = append(ldapUsers, u)
@@ -330,13 +351,13 @@ func (session *Session) createUserFilter(username string) string {
filterTag = goldap.EscapeFilter(username)
}
- ldapFilter := session.ldapConfig.LdapFilter
+ ldapFilter := normalizeFilter(session.ldapConfig.LdapFilter)
ldapUID := session.ldapConfig.LdapUID
if ldapFilter == "" {
ldapFilter = "(" + ldapUID + "=" + filterTag + ")"
} else {
- ldapFilter = "(&" + ldapFilter + "(" + ldapUID + "=" + filterTag + "))"
+ ldapFilter = "(&(" + ldapFilter + ")(" + ldapUID + "=" + filterTag + "))"
}
log.Debug("ldap filter :", ldapFilter)
@@ -404,6 +425,7 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st
filter := ""
groupName = goldap.EscapeFilter(groupName)
groupNameAttribute = goldap.EscapeFilter(groupNameAttribute)
+ oldFilter = normalizeFilter(oldFilter)
if len(oldFilter) == 0 {
if len(groupName) == 0 {
filter = groupNameAttribute + "=*"
@@ -419,3 +441,26 @@ func createGroupSearchFilter(oldFilter, groupName, groupNameAttribute string) st
}
return filter
}
+
+func createNestedGroupFilter(userDN string) string {
+ filter := ""
+ filter = "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:=" + userDN + "))"
+ return filter
+}
+
+func contains(s []string, e string) bool {
+ for _, a := range s {
+ if a == e {
+ return true
+ }
+ }
+ return false
+}
+
+// normalizeFilter - remove '(' and ')' in ldap filter
+func normalizeFilter(filter string) string {
+ norFilter := strings.TrimSpace(filter)
+ norFilter = strings.TrimPrefix(norFilter, "(")
+ norFilter = strings.TrimSuffix(norFilter, ")")
+ return norFilter
+}
diff --git a/src/common/utils/ldap/ldap_test.go b/src/common/utils/ldap/ldap_test.go
index ed80fd17a..e7b3344a6 100644
--- a/src/common/utils/ldap/ldap_test.go
+++ b/src/common/utils/ldap/ldap_test.go
@@ -369,3 +369,25 @@ func TestSession_SearchGroupByDN(t *testing.T) {
})
}
}
+
+func TestNormalizeFilter(t *testing.T) {
+ type args struct {
+ filter string
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {"normal test", args{"(objectclass=user)"}, "objectclass=user"},
+ {"with space", args{" (objectclass=user) "}, "objectclass=user"},
+ {"nothing", args{"objectclass=user"}, "objectclass=user"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := normalizeFilter(tt.args.filter); got != tt.want {
+ t.Errorf("normalizeFilter() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/common/utils/log/logger.go b/src/common/utils/log/logger.go
index 882f8a716..5c8b6b376 100644
--- a/src/common/utils/log/logger.go
+++ b/src/common/utils/log/logger.go
@@ -278,7 +278,7 @@ func line(callDepth int) string {
line = 0
}
l := strings.SplitN(file, srcSeparator, 2)
- if len(l) > 0 {
+ if len(l) > 1 {
file = l[1]
}
return fmt.Sprintf("[%s:%d]:", file, line)
diff --git a/src/common/utils/notary/helper.go b/src/common/utils/notary/helper.go
index 76bd2ac0f..db80a9450 100644
--- a/src/common/utils/notary/helper.go
+++ b/src/common/utils/notary/helper.go
@@ -22,6 +22,8 @@ import (
"path"
"strings"
+ "github.com/goharbor/harbor/src/common/utils/notary/model"
+
"github.com/docker/distribution/registry/auth/token"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/common/utils/registry"
@@ -41,14 +43,6 @@ var (
mockRetriever notary.PassRetriever
)
-// Target represents the json object of a target of a docker image in notary.
-// The struct will be used when repository is know so it won'g contain the name of a repository.
-type Target struct {
- Tag string `json:"tag"`
- Hashes data.Hashes `json:"hashes"`
- // TODO: update fields as needed.
-}
-
func init() {
mockRetriever = func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) {
passphrase = "hardcode"
@@ -60,7 +54,7 @@ func init() {
}
// GetInternalTargets wraps GetTargets to read config values for getting full-qualified repo from internal notary instance.
-func GetInternalTargets(notaryEndpoint string, username string, repo string) ([]Target, error) {
+func GetInternalTargets(notaryEndpoint string, username string, repo string) ([]model.Target, error) {
ext, err := config.ExtEndpoint()
if err != nil {
log.Errorf("Error while reading external endpoint: %v", err)
@@ -74,8 +68,8 @@ func GetInternalTargets(notaryEndpoint string, username string, repo string) ([]
// GetTargets is a help function called by API to fetch signature information of a given repository.
// Per docker's convention the repository should contain the information of endpoint, i.e. it should look
// like "192.168.0.1/library/ubuntu", instead of "library/ubuntu" (fqRepo for fully-qualified repo)
-func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target, error) {
- res := []Target{}
+func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]model.Target, error) {
+ res := []model.Target{}
t, err := tokenutil.MakeToken(username, tokenutil.Notary,
[]*token.ResourceActions{
{
@@ -109,13 +103,16 @@ func GetTargets(notaryEndpoint string, username string, fqRepo string) ([]Target
log.Warningf("Failed to clear cached root.json: %s, error: %v, when repo is removed from notary the signature status maybe incorrect", rootJSON, rmErr)
}
for _, t := range targets {
- res = append(res, Target{t.Name, t.Hashes})
+ res = append(res, model.Target{
+ Tag: t.Name,
+ Hashes: t.Hashes,
+ })
}
return res, nil
}
// DigestFromTarget get a target and return the value of digest, in accordance to Docker-Content-Digest
-func DigestFromTarget(t Target) (string, error) {
+func DigestFromTarget(t model.Target) (string, error) {
sha, ok := t.Hashes["sha256"]
if !ok {
return "", fmt.Errorf("no valid hash, expecting sha256")
diff --git a/src/common/utils/notary/helper_test.go b/src/common/utils/notary/helper_test.go
index d3c11e63b..a0c2a1f34 100644
--- a/src/common/utils/notary/helper_test.go
+++ b/src/common/utils/notary/helper_test.go
@@ -17,6 +17,8 @@ import (
"encoding/json"
"fmt"
+ "github.com/goharbor/harbor/src/common/utils/notary/model"
+
notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
"github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/config"
@@ -81,17 +83,19 @@ func TestGetDigestFromTarget(t *testing.T) {
}
}`
- var t1 Target
+ var t1 model.Target
err := json.Unmarshal([]byte(str), &t1)
if err != nil {
panic(err)
}
hash2 := make(map[string][]byte)
- t2 := Target{"2.0", hash2}
+ t2 := model.Target{
+ Tag: "2.0",
+ Hashes: hash2,
+ }
d1, err1 := DigestFromTarget(t1)
assert.Nil(t, err1, "Unexpected error: %v", err1)
assert.Equal(t, "sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7", d1, "digest mismatch")
_, err2 := DigestFromTarget(t2)
assert.NotNil(t, err2, "")
-
}
diff --git a/src/common/utils/notary/model/model.go b/src/common/utils/notary/model/model.go
new file mode 100644
index 000000000..ef83ef60c
--- /dev/null
+++ b/src/common/utils/notary/model/model.go
@@ -0,0 +1,25 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import "github.com/theupdateframework/notary/tuf/data"
+
+// Target represents the json object of a target of a docker image in notary.
+// The struct will be used when repository is know so it won'g contain the name of a repository.
+type Target struct {
+ Tag string `json:"tag"`
+ Hashes data.Hashes `json:"hashes"`
+ // TODO: update fields as needed.
+}
diff --git a/src/common/utils/oidc/helper.go b/src/common/utils/oidc/helper.go
index 32fee3c29..30f14e209 100644
--- a/src/common/utils/oidc/helper.go
+++ b/src/common/utils/oidc/helper.go
@@ -35,20 +35,14 @@ const googleEndpoint = "https://accounts.google.com"
type providerHelper struct {
sync.Mutex
- ep endpoint
- instance atomic.Value
- setting atomic.Value
-}
-
-type endpoint struct {
- url string
- VerifyCert bool
+ instance atomic.Value
+ setting atomic.Value
+ creationTime time.Time
}
func (p *providerHelper) get() (*gooidc.Provider, error) {
if p.instance.Load() != nil {
- s := p.setting.Load().(models.OIDCSetting)
- if s.Endpoint != p.ep.url || s.VerifyCert != p.ep.VerifyCert { // relevant settings have changed, need to re-create provider.
+ if time.Now().Sub(p.creationTime) > 3*time.Second {
if err := p.create(); err != nil {
return nil, err
}
@@ -57,7 +51,7 @@ func (p *providerHelper) get() (*gooidc.Provider, error) {
p.Lock()
defer p.Unlock()
if p.instance.Load() == nil {
- if err := p.reload(); err != nil {
+ if err := p.reloadSetting(); err != nil {
return nil, err
}
if err := p.create(); err != nil {
@@ -65,7 +59,7 @@ func (p *providerHelper) get() (*gooidc.Provider, error) {
}
go func() {
for {
- if err := p.reload(); err != nil {
+ if err := p.reloadSetting(); err != nil {
log.Warningf("Failed to refresh configuration, error: %v", err)
}
time.Sleep(3 * time.Second)
@@ -73,10 +67,11 @@ func (p *providerHelper) get() (*gooidc.Provider, error) {
}()
}
}
+
return p.instance.Load().(*gooidc.Provider), nil
}
-func (p *providerHelper) reload() error {
+func (p *providerHelper) reloadSetting() error {
conf, err := config.OIDCSetting()
if err != nil {
return fmt.Errorf("failed to load OIDC setting: %v", err)
@@ -96,10 +91,7 @@ func (p *providerHelper) create() error {
return fmt.Errorf("failed to create OIDC provider, error: %v", err)
}
p.instance.Store(provider)
- p.ep = endpoint{
- url: s.Endpoint,
- VerifyCert: s.VerifyCert,
- }
+ p.creationTime = time.Now()
return nil
}
@@ -214,3 +206,19 @@ func RefreshToken(ctx context.Context, token *Token) (*Token, error) {
}
return &Token{Token: *t, IDToken: it}, nil
}
+
+// Conn wraps connection info of an OIDC endpoint
+type Conn struct {
+ URL string `json:"url"`
+ VerifyCert bool `json:"verify_cert"`
+}
+
+// TestEndpoint tests whether the endpoint is a valid OIDC endpoint.
+// The nil return value indicates the success of the test
+func TestEndpoint(conn Conn) error {
+
+ // gooidc will try to call the discovery api when creating the provider and that's all we need to check
+ ctx := clientCtx(context.Background(), conn.VerifyCert)
+ _, err := gooidc.NewProvider(ctx, conn.URL)
+ return err
+}
diff --git a/src/common/utils/oidc/helper_test.go b/src/common/utils/oidc/helper_test.go
index e1e71a8b9..d706836b8 100644
--- a/src/common/utils/oidc/helper_test.go
+++ b/src/common/utils/oidc/helper_test.go
@@ -49,21 +49,20 @@ func TestMain(m *testing.M) {
func TestHelperLoadConf(t *testing.T) {
testP := &providerHelper{}
assert.Nil(t, testP.setting.Load())
- err := testP.reload()
+ err := testP.reloadSetting()
assert.Nil(t, err)
assert.Equal(t, "test", testP.setting.Load().(models.OIDCSetting).Name)
- assert.Equal(t, endpoint{}, testP.ep)
}
func TestHelperCreate(t *testing.T) {
testP := &providerHelper{}
- err := testP.reload()
+ err := testP.reloadSetting()
assert.Nil(t, err)
assert.Nil(t, testP.instance.Load())
err = testP.create()
assert.Nil(t, err)
- assert.EqualValues(t, "https://accounts.google.com", testP.ep.url)
assert.NotNil(t, testP.instance.Load())
+ assert.True(t, time.Now().Sub(testP.creationTime) < 2*time.Second)
}
func TestHelperGet(t *testing.T) {
@@ -98,3 +97,16 @@ func TestAuthCodeURL(t *testing.T) {
assert.Equal(t, "offline", q.Get("access_type"))
assert.False(t, strings.Contains(q.Get("scope"), "offline_access"))
}
+
+func TestTestEndpoint(t *testing.T) {
+ c1 := Conn{
+ URL: googleEndpoint,
+ VerifyCert: true,
+ }
+ c2 := Conn{
+ URL: "https://www.baidu.com",
+ VerifyCert: false,
+ }
+ assert.Nil(t, TestEndpoint(c1))
+ assert.NotNil(t, TestEndpoint(c2))
+}
diff --git a/src/common/utils/passports.go b/src/common/utils/passports.go
new file mode 100644
index 000000000..bce88be9e
--- /dev/null
+++ b/src/common/utils/passports.go
@@ -0,0 +1,128 @@
+package utils
+
+import (
+ "context"
+ "sync"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+)
+
+// PassportsPool holds a given number of passports, they can be applied or be revoked. PassportsPool
+// is used to control the concurrency of tasks, the pool size determine the max concurrency. When users
+// want to start a goroutine to perform some task, they must apply a passport firstly, and after finish
+// the task, the passport must be revoked.
+type PassportsPool interface {
+ // Apply applies a passport from the pool.
+ Apply() bool
+ // Revoke revokes a passport to the pool
+ Revoke() bool
+}
+
+type passportsPool struct {
+ passports chan struct{}
+ stopped <-chan struct{}
+}
+
+// NewPassportsPool creates a passports pool with given size
+func NewPassportsPool(size int, stopped <-chan struct{}) PassportsPool {
+ return &passportsPool{
+ passports: make(chan struct{}, size),
+ stopped: stopped,
+ }
+}
+
+// Apply applies a passport from the pool. Returning value 'true' means passport acquired
+// successfully. If no available passports in the pool, 'Apply' will wait for it. If the
+// all passports in the pool are turned into invalid by the 'stopped' channel, then false
+// is returned, means no more passports will be dispatched.
+func (p *passportsPool) Apply() bool {
+ select {
+ case p.passports <- struct{}{}:
+ return true
+ case <-p.stopped:
+ return false
+ }
+}
+
+// Revoke revokes a passport to the pool. Returning value 'true' means passport revoked
+// successfully, otherwise 'Revoke' will wait. If pool turns into invalid by 'stopped' channel
+// false will be returned.
+func (p *passportsPool) Revoke() bool {
+ select {
+ case <-p.passports:
+ return true
+ case <-p.stopped:
+ return false
+ }
+}
+
+// LimitedConcurrentRunner is used to run tasks, but limit the max concurrency.
+type LimitedConcurrentRunner interface {
+ // AddTask adds a task to run
+ AddTask(task func() error)
+ // Wait waits all the tasks to be finished
+ Wait()
+ // Cancel cancels all tasks, tasks that already started will continue to run
+ Cancel()
+ // IsCancelled checks whether context is cancelled. This happens when some task encountered
+ // critical errors.
+ IsCancelled() bool
+}
+
+type limitedConcurrentRunner struct {
+ wg *sync.WaitGroup
+ ctx context.Context
+ cancel context.CancelFunc
+ passportsPool PassportsPool
+}
+
+// NewLimitedConcurrentRunner creates a runner
+func NewLimitedConcurrentRunner(limit int) LimitedConcurrentRunner {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &limitedConcurrentRunner{
+ wg: new(sync.WaitGroup),
+ ctx: ctx,
+ cancel: cancel,
+ passportsPool: NewPassportsPool(limit, ctx.Done()),
+ }
+}
+
+// AddTask adds a task to run
+func (r *limitedConcurrentRunner) AddTask(task func() error) {
+ r.wg.Add(1)
+ go func() {
+ defer func() {
+ r.wg.Done()
+ }()
+
+ // Return false means no passport acquired, and no valid passport will be dispatched any more.
+ // For example, some crucial errors happened and all tasks should be cancelled.
+ if ok := r.passportsPool.Apply(); !ok {
+ return
+ }
+ defer func() {
+ r.passportsPool.Revoke()
+ }()
+
+ err := task()
+ if err != nil {
+ log.Errorf("%v", err)
+ r.cancel()
+ }
+ }()
+}
+
+// Wait waits all the tasks to be finished
+func (r *limitedConcurrentRunner) Wait() {
+ r.wg.Wait()
+}
+
+// Cancel cancels all tasks, tasks that already started will continue to run
+func (r *limitedConcurrentRunner) Cancel() {
+ r.cancel()
+}
+
+// IsCancelled checks whether context is cancelled. This happens when some task encountered critical errors.
+func (r *limitedConcurrentRunner) IsCancelled() bool {
+ return r.ctx.Err() != nil
+}
diff --git a/src/common/utils/redis/helper.go b/src/common/utils/redis/helper.go
new file mode 100644
index 000000000..5a137acdd
--- /dev/null
+++ b/src/common/utils/redis/helper.go
@@ -0,0 +1,232 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package redis
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+)
+
+var (
+ // ErrUnLock ...
+ ErrUnLock = errors.New("error to release the redis lock")
+)
+
+const (
+ unlockScript = `
+if redis.call("get",KEYS[1]) == ARGV[1] then
+ return redis.call("del",KEYS[1])
+else
+ return 0
+end
+`
+)
+
+// Mutex ...
+type Mutex struct {
+ Conn redis.Conn
+ key string
+ value string
+ opts Options
+}
+
+// New ...
+func New(conn redis.Conn, key, value string) *Mutex {
+ o := *DefaultOptions()
+ if value == "" {
+ value = utils.GenerateRandomString()
+ }
+ return &Mutex{conn, key, value, o}
+}
+
+// Require retry to require the lock
+func (rm *Mutex) Require() (bool, error) {
+ var isRequired bool
+ var err error
+
+ for i := 0; i < rm.opts.maxRetry; i++ {
+ isRequired, err = rm.require()
+ if isRequired {
+ break
+ }
+ if err != nil || !isRequired {
+ time.Sleep(rm.opts.retryDelay)
+ }
+ }
+
+ return isRequired, err
+}
+
+// require get the redis lock, for details, just refer to https://redis.io/topics/distlock
+func (rm *Mutex) require() (bool, error) {
+ reply, err := redis.String(rm.Conn.Do("SET", rm.key, rm.value, "NX", "PX", int(rm.opts.expiry/time.Millisecond)))
+ if err != nil {
+ return false, err
+ }
+ return reply == "OK", nil
+}
+
+// Free releases the lock, for details, just refer to https://redis.io/topics/distlock
+func (rm *Mutex) Free() (bool, error) {
+ script := redis.NewScript(1, unlockScript)
+ resp, err := redis.Int(script.Do(rm.Conn, rm.key, rm.value))
+ if err != nil {
+ return false, err
+ }
+ if resp == 0 {
+ return false, ErrUnLock
+ }
+ return true, nil
+}
+
+// Options ...
+type Options struct {
+ retryDelay time.Duration
+ expiry time.Duration
+ maxRetry int
+}
+
+var (
+ opt *Options
+ optOnce sync.Once
+
+ defaultDelay = int64(1) // 1 second
+ defaultMaxRetry = 600
+ defaultExpire = int64(2 * time.Hour / time.Second) // 2 hours
+)
+
+// DefaultOptions ...
+func DefaultOptions() *Options {
+ optOnce.Do(func() {
+ retryDelay, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_RETRY_DELAY"), 10, 64)
+ if err != nil || retryDelay < 0 {
+ retryDelay = defaultDelay
+ }
+
+ maxRetry, err := strconv.Atoi(os.Getenv("REDIS_LOCK_MAX_RETRY"))
+ if err != nil || maxRetry < 0 {
+ maxRetry = defaultMaxRetry
+ }
+
+ expire, err := strconv.ParseInt(os.Getenv("REDIS_LOCK_EXPIRE"), 10, 64)
+ if err != nil || expire < 0 {
+ expire = defaultExpire
+ }
+
+ opt = &Options{
+ retryDelay: time.Duration(retryDelay) * time.Second,
+ expiry: time.Duration(expire) * time.Second,
+ maxRetry: maxRetry,
+ }
+ })
+
+ return opt
+}
+
+var (
+ pool *redis.Pool
+ poolOnce sync.Once
+
+ poolMaxIdle = 200
+ poolMaxActive = 1000
+ poolIdleTimeout int64 = 180
+)
+
+// DefaultPool return default redis pool
+func DefaultPool() *redis.Pool {
+ poolOnce.Do(func() {
+ maxIdle, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_IDLE"))
+ if err != nil || maxIdle < 0 {
+ maxIdle = poolMaxIdle
+ }
+
+ maxActive, err := strconv.Atoi(os.Getenv("REDIS_POOL_MAX_ACTIVE"))
+ if err != nil || maxActive < 0 {
+ maxActive = poolMaxActive
+ }
+
+ idleTimeout, err := strconv.ParseInt(os.Getenv("REDIS_POOL_IDLE_TIMEOUT"), 10, 64)
+ if err != nil || idleTimeout < 0 {
+ idleTimeout = poolIdleTimeout
+ }
+
+ pool = &redis.Pool{
+ Dial: func() (redis.Conn, error) {
+ url := config.GetRedisOfRegURL()
+ if url == "" {
+ url = "redis://localhost:6379/1"
+ }
+
+ return redis.DialURL(url)
+ },
+ TestOnBorrow: func(c redis.Conn, t time.Time) error {
+ _, err := c.Do("PING")
+ return err
+ },
+ MaxIdle: maxIdle,
+ MaxActive: maxActive,
+ IdleTimeout: time.Duration(idleTimeout) * time.Second,
+ Wait: true,
+ }
+ })
+
+ return pool
+}
+
+// RequireLock returns lock by key
+func RequireLock(key string, conns ...redis.Conn) (*Mutex, error) {
+ var conn redis.Conn
+ if len(conns) > 0 {
+ conn = conns[0]
+ } else {
+ conn = DefaultPool().Get()
+ }
+
+ m := New(conn, key, utils.GenerateRandomString())
+ ok, err := m.Require()
+ if err != nil {
+ return nil, fmt.Errorf("require redis lock failed: %v", err)
+ }
+
+ if !ok {
+ return nil, fmt.Errorf("unable to require lock for %s", key)
+ }
+
+ return m, nil
+}
+
+// FreeLock free lock
+func FreeLock(m *Mutex) error {
+ if _, err := m.Free(); err != nil {
+ log.Warningf("failed to free lock %s, error: %v", m.key, err)
+ return err
+ }
+
+ if err := m.Conn.Close(); err != nil {
+ log.Warningf("failed to close the redis con for lock %s, error: %v", m.key, err)
+ return err
+ }
+
+ return nil
+}
diff --git a/src/common/utils/redis/helper_test.go b/src/common/utils/redis/helper_test.go
new file mode 100644
index 000000000..71572bc01
--- /dev/null
+++ b/src/common/utils/redis/helper_test.go
@@ -0,0 +1,102 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package redis
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/stretchr/testify/assert"
+)
+
+const testingRedisHost = "REDIS_HOST"
+
+func init() {
+ os.Setenv("REDIS_LOCK_MAX_RETRY", "5")
+}
+
+func TestRedisLock(t *testing.T) {
+ con, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379))
+ assert.Nil(t, err)
+ defer con.Close()
+
+ rm := New(con, "test-redis-lock", "test-value")
+
+ successLock, err := rm.Require()
+ assert.Nil(t, err)
+ assert.True(t, successLock)
+
+ time.Sleep(2 * time.Second)
+ _, err = rm.Require()
+ assert.NotNil(t, err)
+
+ successUnLock, err := rm.Free()
+ assert.Nil(t, err)
+ assert.True(t, successUnLock)
+
+}
+
+func TestRequireLock(t *testing.T) {
+ assert := assert.New(t)
+
+ conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379))
+ assert.Nil(err)
+ defer conn.Close()
+
+ if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) {
+ l.Free()
+ }
+
+ if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) {
+ FreeLock(l)
+ }
+
+ key := utils.GenerateRandomString()
+ if l, err := RequireLock(key); assert.Nil(err) {
+ defer FreeLock(l)
+
+ _, err = RequireLock(key)
+ assert.Error(err)
+ }
+}
+
+func TestFreeLock(t *testing.T) {
+ assert := assert.New(t)
+
+ if l, err := RequireLock(utils.GenerateRandomString()); assert.Nil(err) {
+ assert.Nil(FreeLock(l))
+ }
+
+ conn, err := redis.Dial("tcp", fmt.Sprintf("%s:%d", getRedisHost(), 6379))
+ assert.Nil(err)
+
+ if l, err := RequireLock(utils.GenerateRandomString(), conn); assert.Nil(err) {
+ conn.Close()
+ assert.Error(FreeLock(l))
+ }
+}
+
+func getRedisHost() string {
+ redisHost := os.Getenv(testingRedisHost)
+ if redisHost == "" {
+ redisHost = "127.0.0.1" // for local test
+ }
+
+ return redisHost
+}
diff --git a/src/common/utils/registry/auth/tokenauthorizer.go b/src/common/utils/registry/auth/tokenauthorizer.go
index cac7c1c4a..1f1c95569 100644
--- a/src/common/utils/registry/auth/tokenauthorizer.go
+++ b/src/common/utils/registry/auth/tokenauthorizer.go
@@ -15,6 +15,7 @@
package auth
import (
+ "errors"
"fmt"
"net/http"
"net/url"
@@ -111,7 +112,12 @@ func (t *tokenAuthorizer) Modify(req *http.Request) error {
}
}
- req.Header.Add(http.CanonicalHeaderKey("Authorization"), fmt.Sprintf("Bearer %s", token.Token))
+ tk := token.GetToken()
+ if len(tk) == 0 {
+ return errors.New("empty token content")
+ }
+
+ req.Header.Add(http.CanonicalHeaderKey("Authorization"), fmt.Sprintf("Bearer %s", tk))
return nil
}
diff --git a/src/common/utils/registry/auth/util.go b/src/common/utils/registry/auth/util.go
index ad86229d8..c3e8e217e 100644
--- a/src/common/utils/registry/auth/util.go
+++ b/src/common/utils/registry/auth/util.go
@@ -30,7 +30,7 @@ const (
service = "harbor-registry"
)
-// GetToken requests a token against the endpoint using credetial provided
+// GetToken requests a token against the endpoint using credential provided
func GetToken(endpoint string, insecure bool, credential Credential,
scopes []*token.ResourceActions) (*models.Token, error) {
client := &http.Client{
diff --git a/src/common/utils/registry/registry.go b/src/common/utils/registry/registry.go
index c835d4892..563c25ed2 100644
--- a/src/common/utils/registry/registry.go
+++ b/src/common/utils/registry/registry.go
@@ -22,8 +22,6 @@ import (
"net/url"
"strings"
- // "time"
-
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils"
)
@@ -130,9 +128,18 @@ func (r *Registry) Catalog() ([]string, error) {
return repos, nil
}
-// Ping ...
+// Ping checks by Head method
func (r *Registry) Ping() error {
- req, err := http.NewRequest(http.MethodHead, buildPingURL(r.Endpoint.String()), nil)
+ return r.ping(http.MethodHead)
+}
+
+// PingGet checks by Get method
+func (r *Registry) PingGet() error {
+ return r.ping(http.MethodGet)
+}
+
+func (r *Registry) ping(method string) error {
+ req, err := http.NewRequest(method, buildPingURL(r.Endpoint.String()), nil)
if err != nil {
return err
}
diff --git a/src/common/utils/registry/repository.go b/src/common/utils/registry/repository.go
index 87f06dc43..7a4a1c6c7 100644
--- a/src/common/utils/registry/repository.go
+++ b/src/common/utils/registry/repository.go
@@ -25,11 +25,9 @@ import (
"sort"
"strconv"
"strings"
- // "time"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
-
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils"
)
@@ -211,7 +209,7 @@ func (r *Repository) PushManifest(reference, mediaType string, payload []byte) (
defer resp.Body.Close()
- if resp.StatusCode == http.StatusCreated {
+ if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusOK {
digest = resp.Header.Get(http.CanonicalHeaderKey("Docker-Content-Digest"))
return
}
@@ -407,6 +405,7 @@ func (r *Repository) monolithicBlobUpload(location, digest string, size int64, d
if err != nil {
return err
}
+ req.ContentLength = size
resp, err := r.client.Do(req)
if err != nil {
diff --git a/src/common/utils/test/test.go b/src/common/utils/test/test.go
index 28046e5db..ad8048296 100644
--- a/src/common/utils/test/test.go
+++ b/src/common/utils/test/test.go
@@ -22,10 +22,11 @@ import (
"strings"
"fmt"
- "github.com/goharbor/harbor/src/common"
- "github.com/gorilla/mux"
"os"
"sort"
+
+ "github.com/goharbor/harbor/src/common"
+ "github.com/gorilla/mux"
)
// RequestHandlerMapping is a mapping between request and its handler
@@ -120,7 +121,7 @@ func GetUnitTestConfig() map[string]interface{} {
common.LDAPGroupBaseDN: "dc=example,dc=com",
common.LDAPGroupAttributeName: "cn",
common.LDAPGroupSearchScope: 2,
- common.LdapGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com",
+ common.LDAPGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com",
common.WithNotary: "false",
common.WithChartMuseum: "false",
common.SelfRegistration: "true",
@@ -141,3 +142,33 @@ func TraceCfgMap(cfgs map[string]interface{}) {
fmt.Printf("%v=%v\n", k, cfgs[k])
}
}
+
+// CheckSetsEqual - check int set if they are equals
+func CheckSetsEqual(setA, setB []int) bool {
+ if len(setA) != len(setB) {
+ return false
+ }
+ type void struct{}
+ var exist void
+ setAll := make(map[int]void)
+ for _, r := range setA {
+ setAll[r] = exist
+ }
+ for _, r := range setB {
+ if _, ok := setAll[r]; !ok {
+ return false
+ }
+ }
+
+ setAll = make(map[int]void)
+ for _, r := range setB {
+ setAll[r] = exist
+ }
+ for _, r := range setA {
+ if _, ok := setAll[r]; !ok {
+ return false
+ }
+ }
+ return true
+
+}
diff --git a/src/common/utils/utils.go b/src/common/utils/utils.go
index cea54d342..24a12258d 100644
--- a/src/common/utils/utils.go
+++ b/src/common/utils/utils.go
@@ -230,7 +230,14 @@ func GetStrValueOfAnyType(value interface{}) string {
}
strVal = string(b)
} else {
- strVal = fmt.Sprintf("%v", value)
+ switch val := value.(type) {
+ case float64:
+ strVal = strconv.FormatFloat(val, 'f', -1, 64)
+ case float32:
+ strVal = strconv.FormatFloat(float64(val), 'f', -1, 32)
+ default:
+ strVal = fmt.Sprintf("%v", value)
+ }
}
return strVal
}
@@ -255,3 +262,8 @@ func IsContainIllegalChar(s string, illegalChar []string) bool {
}
return false
}
+
+// IsDigest A sha256 is a string with 64 characters.
+func IsDigest(ref string) bool {
+ return strings.HasPrefix(ref, "sha256:") && len(ref) == 71
+}
diff --git a/src/common/utils/utils_test.go b/src/common/utils/utils_test.go
index 66c4bca0f..437f16152 100644
--- a/src/common/utils/utils_test.go
+++ b/src/common/utils/utils_test.go
@@ -381,3 +381,37 @@ func TestTrimLower(t *testing.T) {
})
}
}
+
+func TestGetStrValueOfAnyType(t *testing.T) {
+ type args struct {
+ value interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {"float", args{float32(1048576.1)}, "1048576.1"},
+ {"float", args{float64(1048576.12)}, "1048576.12"},
+ {"float", args{1048576.000}, "1048576"},
+ {"int", args{1048576}, "1048576"},
+ {"int", args{9223372036854775807}, "9223372036854775807"},
+ {"string", args{"hello world"}, "hello world"},
+ {"bool", args{true}, "true"},
+ {"bool", args{false}, "false"},
+ {"map", args{map[string]interface{}{"key1": "value1"}}, "{\"key1\":\"value1\"}"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := GetStrValueOfAnyType(tt.args.value); got != tt.want {
+ t.Errorf("GetStrValueOfAnyType() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestIsDigest(t *testing.T) {
+ assert := assert.New(t)
+ assert.False(IsDigest("latest"))
+ assert.True(IsDigest("sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7"))
+}
diff --git a/src/core/api/api_test.go b/src/core/api/api_test.go
index 8b9d3bfaf..f8e1ccdd0 100644
--- a/src/core/api/api_test.go
+++ b/src/core/api/api_test.go
@@ -207,6 +207,17 @@ func TestMain(m *testing.M) {
if err := prepare(); err != nil {
panic(err)
}
+ dao.ExecuteBatchSQL([]string{
+ "insert into user_group (group_name, group_type, ldap_group_dn) values ('test_group_01_api', 1, 'cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com')",
+ "insert into user_group (group_name, group_type, ldap_group_dn) values ('vsphere.local\\administrators', 2, '')",
+ })
+
+ defer dao.ExecuteBatchSQL([]string{
+ "delete from harbor_label",
+ "delete from robot",
+ "delete from user_group",
+ "delete from project_member",
+ })
ret := m.Run()
clean()
diff --git a/src/core/api/base.go b/src/core/api/base.go
index bea127d0b..7b4b4bade 100644
--- a/src/core/api/base.go
+++ b/src/core/api/base.go
@@ -15,9 +15,15 @@
package api
import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/goharbor/harbor/src/pkg/retention"
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+
"net/http"
- "errors"
"github.com/ghodss/yaml"
"github.com/goharbor/harbor/src/common/api"
"github.com/goharbor/harbor/src/common/security"
@@ -25,12 +31,24 @@ import (
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
"github.com/goharbor/harbor/src/core/promgr"
+ "github.com/goharbor/harbor/src/pkg/project"
+ "github.com/goharbor/harbor/src/pkg/repository"
)
const (
yamlFileContentType = "application/x-yaml"
)
+// the managers/controllers used globally
+var (
+ projectMgr project.Manager
+ repositoryMgr repository.Manager
+ retentionScheduler scheduler.Scheduler
+ retentionMgr retention.Manager
+ retentionLauncher retention.Launcher
+ retentionController retention.APIController
+)
+
// BaseController ...
type BaseController struct {
api.BaseAPI
@@ -41,13 +59,6 @@ type BaseController struct {
ProjectMgr promgr.ProjectManager
}
-const (
- // ReplicationJobType ...
- ReplicationJobType = "replication"
- // ScanJobType ...
- ScanJobType = "scan"
-)
-
// Prepare inits security context and project manager from request
// context
func (b *BaseController) Prepare() {
@@ -85,12 +96,50 @@ func (b *BaseController) WriteYamlData(object interface{}) {
w := b.Ctx.ResponseWriter
w.Header().Set("Content-Type", yamlFileContentType)
w.WriteHeader(http.StatusOK)
- w.Write(yData)
+ _, _ = w.Write(yData)
}
// Init related objects/configurations for the API controllers
func Init() error {
registerHealthCheckers()
+
+ // init chart controller
+ if err := initChartController(); err != nil {
+ return err
+ }
+
+ // init project manager
+ initProjectManager()
+
+ // init repository manager
+ initRepositoryManager()
+
+ initRetentionScheduler()
+
+ retentionMgr = retention.NewManager()
+
+ retentionLauncher = retention.NewLauncher(projectMgr, repositoryMgr, retentionMgr)
+
+ retentionController = retention.NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher)
+
+ callbackFun := func(p interface{}) error {
+ str, ok := p.(string)
+ if !ok {
+ return fmt.Errorf("the type of param %v isn't string", p)
+ }
+ param := &retention.TriggerParam{}
+ if err := json.Unmarshal([]byte(str), param); err != nil {
+ return fmt.Errorf("failed to unmarshal the param: %v", err)
+ }
+ _, err := retentionController.TriggerRetentionExec(param.PolicyID, param.Trigger, false)
+ return err
+ }
+ err := scheduler.Register(retention.SchedulerCallback, callbackFun)
+
+ return err
+}
+
+func initChartController() error {
// If chart repository is not enabled then directly return
if !config.WithChartMuseum() {
return nil
@@ -102,6 +151,17 @@ func Init() error {
}
chartController = chartCtl
-
return nil
}
+
+func initProjectManager() {
+ projectMgr = project.New()
+}
+
+func initRepositoryManager() {
+ repositoryMgr = repository.New(projectMgr, chartController)
+}
+
+func initRetentionScheduler() {
+ retentionScheduler = scheduler.GlobalScheduler
+}
diff --git a/src/core/api/chart_repository.go b/src/core/api/chart_repository.go
old mode 100644
new mode 100755
index e44f65174..dd9be934f
--- a/src/core/api/chart_repository.go
+++ b/src/core/api/chart_repository.go
@@ -12,13 +12,14 @@ import (
"net/url"
"strings"
- "github.com/goharbor/harbor/src/common"
- "github.com/goharbor/harbor/src/core/label"
-
"github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/rbac"
hlog "github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/label"
+
+ "github.com/goharbor/harbor/src/core/middlewares"
rep_event "github.com/goharbor/harbor/src/replication/event"
"github.com/goharbor/harbor/src/replication/model"
)
@@ -46,6 +47,11 @@ const (
// chartController is a singleton instance
var chartController *chartserver.Controller
+// GetChartController returns the chart controller
+func GetChartController() *chartserver.Controller {
+ return chartController
+}
+
// ChartRepositoryAPI provides related API handlers for the chart repository APIs
type ChartRepositoryAPI struct {
// The base controller to provide common utilities
@@ -526,7 +532,7 @@ func initializeChartController() (*chartserver.Controller, error) {
return nil, errors.New("Endpoint URL of chart storage server is malformed")
}
- controller, err := chartserver.NewController(url)
+ controller, err := chartserver.NewController(url, middlewares.New(middlewares.ChartMiddlewares).Create())
if err != nil {
return nil, errors.New("Failed to initialize chart API controller")
}
diff --git a/src/core/api/email_test.go b/src/core/api/email_test.go
index c38fbbb29..7fff60776 100644
--- a/src/core/api/email_test.go
+++ b/src/core/api/email_test.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// +build !darwin
+
package api
import (
diff --git a/src/core/api/harborapi_test.go b/src/core/api/harborapi_test.go
index a0ca5c8e4..b6ed840b2 100644
--- a/src/core/api/harborapi_test.go
+++ b/src/core/api/harborapi_test.go
@@ -35,12 +35,14 @@ import (
testutils "github.com/goharbor/harbor/src/common/utils/test"
api_models "github.com/goharbor/harbor/src/core/api/models"
apimodels "github.com/goharbor/harbor/src/core/api/models"
+ quota "github.com/goharbor/harbor/src/core/api/quota"
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
+ "github.com/goharbor/harbor/src/pkg/notification"
"github.com/goharbor/harbor/src/replication/model"
- "github.com/goharbor/harbor/tests/apitests/apilib"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
)
const (
@@ -103,6 +105,7 @@ func init() {
beego.Router("/api/users/:id/permissions", &UserAPI{}, "get:ListUserPermissions")
beego.Router("/api/users/:id/sysadmin", &UserAPI{}, "put:ToggleUserAdminRole")
beego.Router("/api/projects/:id([0-9]+)/logs", &ProjectAPI{}, "get:Logs")
+ beego.Router("/api/projects/:id([0-9]+)/summary", &ProjectAPI{}, "get:Summary")
beego.Router("/api/projects/:id([0-9]+)/_deletable", &ProjectAPI{}, "get:Deletable")
beego.Router("/api/projects/:id([0-9]+)/metadatas/?:name", &MetadataAPI{}, "get:Get")
beego.Router("/api/projects/:id([0-9]+)/metadatas/", &MetadataAPI{}, "post:Post")
@@ -144,6 +147,8 @@ func init() {
beego.Router("/api/system/gc/:id([0-9]+)/log", &GCAPI{}, "get:GetLog")
beego.Router("/api/system/gc/schedule", &GCAPI{}, "get:Get;put:Put;post:Post")
beego.Router("/api/system/scanAll/schedule", &ScanAllAPI{}, "get:Get;put:Put;post:Post")
+ beego.Router("/api/system/CVEWhitelist", &SysCVEWhitelistAPI{}, "get:Get;put:Put")
+ beego.Router("/api/system/oidc/ping", &OIDCAPI{}, "post:Ping")
beego.Router("/api/projects/:pid([0-9]+)/robots/", &RobotAPI{}, "post:Post;get:List")
beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &RobotAPI{}, "get:Get;put:Put;delete:Delete")
@@ -157,6 +162,22 @@ func init() {
beego.Router("/api/replication/policies", &ReplicationPolicyAPI{}, "get:List;post:Create")
beego.Router("/api/replication/policies/:id([0-9]+)", &ReplicationPolicyAPI{}, "get:Get;put:Update;delete:Delete")
+ beego.Router("/api/retentions/metadatas", &RetentionAPI{}, "get:GetMetadatas")
+ beego.Router("/api/retentions/:id", &RetentionAPI{}, "get:GetRetention")
+ beego.Router("/api/retentions", &RetentionAPI{}, "post:CreateRetention")
+ beego.Router("/api/retentions/:id", &RetentionAPI{}, "put:UpdateRetention")
+ beego.Router("/api/retentions/:id/executions", &RetentionAPI{}, "post:TriggerRetentionExec")
+ beego.Router("/api/retentions/:id/executions/:eid", &RetentionAPI{}, "patch:OperateRetentionExec")
+ beego.Router("/api/retentions/:id/executions", &RetentionAPI{}, "get:ListRetentionExecs")
+ beego.Router("/api/retentions/:id/executions/:eid/tasks", &RetentionAPI{}, "get:ListRetentionExecTasks")
+ beego.Router("/api/retentions/:id/executions/:eid/tasks/:tid", &RetentionAPI{}, "get:GetRetentionExecTaskLog")
+
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/policies", &NotificationPolicyAPI{}, "get:List;post:Post")
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/:id([0-9]+)", &NotificationPolicyAPI{})
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/test", &NotificationPolicyAPI{}, "post:Test")
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/lasttrigger", &NotificationPolicyAPI{}, "get:ListGroupByEventType")
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &NotificationJobAPI{}, "get:List")
+
// Charts are controlled under projects
chartRepositoryAPIType := &ChartRepositoryAPI{}
beego.Router("/api/chartrepo/health", chartRepositoryAPIType, "get:GetHealthStatus")
@@ -178,16 +199,30 @@ func init() {
beego.Router("/api/chartrepo/:repo/charts/:name/:version/labels", chartLabelAPIType, "get:GetLabels;post:MarkLabel")
beego.Router("/api/chartrepo/:repo/charts/:name/:version/labels/:id([0-9]+)", chartLabelAPIType, "delete:RemoveLabel")
+ quotaAPIType := &QuotaAPI{}
+ beego.Router("/api/quotas", quotaAPIType, "get:List")
+ beego.Router("/api/quotas/:id([0-9]+)", quotaAPIType, "get:Get;put:Put")
+
+ beego.Router("/api/internal/switchquota", &InternalAPI{}, "put:SwitchQuota")
+ beego.Router("/api/internal/syncquota", &InternalAPI{}, "post:SyncQuota")
+
// syncRegistry
if err := SyncRegistry(config.GlobalProjectMgr); err != nil {
log.Fatalf("failed to sync repositories from registry: %v", err)
}
+ if err := quota.Sync(config.GlobalProjectMgr, false); err != nil {
+ log.Fatalf("failed to sync quota from backend: %v", err)
+ }
+
// Init user Info
admin = &usrInfo{adminName, adminPwd}
unknownUsr = &usrInfo{"unknown", "unknown"}
testUser = &usrInfo{TestUserName, TestUserPwd}
+ // Init notification related check map
+ notification.Init()
+
// Init mock jobservice
mockServer := test.NewJobServiceServer()
defer mockServer.Close()
@@ -452,6 +487,23 @@ func (a testapi) ProjectDeletable(prjUsr usrInfo, projectID int64) (int, bool, e
return code, deletable.Deletable, nil
}
+// ProjectSummary returns summary for the project
+func (a testapi) ProjectSummary(prjUsr usrInfo, projectID string) (int, apilib.ProjectSummary, error) {
+ _sling := sling.New().Get(a.basePath)
+
+ // create api path
+ path := "api/projects/" + projectID + "/summary"
+ _sling = _sling.Path(path)
+
+ var successPayload apilib.ProjectSummary
+
+ httpStatusCode, body, err := request(_sling, jsonAcceptHeader, prjUsr)
+ if err == nil && httpStatusCode == 200 {
+ err = json.Unmarshal(body, &successPayload)
+ }
+ return httpStatusCode, successPayload, err
+}
+
// -------------------------Member Test---------------------------------------//
// Return relevant role members of projectID
@@ -554,7 +606,7 @@ func (a testapi) GetRepos(authInfo usrInfo, projectID, keyword string) (
return code, nil, nil
}
-func (a testapi) GetTag(authInfo usrInfo, repository string, tag string) (int, *tagResp, error) {
+func (a testapi) GetTag(authInfo usrInfo, repository string, tag string) (int, *models.TagResp, error) {
_sling := sling.New().Get(a.basePath).Path(fmt.Sprintf("/api/repositories/%s/tags/%s", repository, tag))
code, data, err := request(_sling, jsonAcceptHeader, authInfo)
if err != nil {
@@ -566,7 +618,7 @@ func (a testapi) GetTag(authInfo usrInfo, repository string, tag string) (int, *
return code, nil, nil
}
- result := tagResp{}
+ result := models.TagResp{}
if err := json.Unmarshal(data, &result); err != nil {
return 0, nil, err
}
@@ -590,7 +642,7 @@ func (a testapi) GetReposTags(authInfo usrInfo, repoName string) (int, interface
return httpStatusCode, body, nil
}
- result := []tagResp{}
+ result := []models.TagResp{}
if err := json.Unmarshal(body, &result); err != nil {
return 0, nil, err
}
@@ -1211,3 +1263,55 @@ func (a testapi) RegistryUpdate(authInfo usrInfo, registryID int64, req *apimode
return code, nil
}
+
+// QuotasGet returns quotas
+func (a testapi) QuotasGet(query *apilib.QuotaQuery, authInfo ...usrInfo) (int, []apilib.Quota, error) {
+ _sling := sling.New().Get(a.basePath).
+ Path("api/quotas").
+ QueryStruct(query)
+
+ var successPayload []apilib.Quota
+
+ var httpStatusCode int
+ var err error
+ var body []byte
+ if len(authInfo) > 0 {
+ httpStatusCode, body, err = request(_sling, jsonAcceptHeader, authInfo[0])
+ } else {
+ httpStatusCode, body, err = request(_sling, jsonAcceptHeader)
+ }
+
+ if err == nil && httpStatusCode == 200 {
+ err = json.Unmarshal(body, &successPayload)
+ } else {
+ log.Println(string(body))
+ }
+
+ return httpStatusCode, successPayload, err
+}
+
+// Return specific quota
+func (a testapi) QuotasGetByID(authInfo usrInfo, quotaID string) (int, apilib.Quota, error) {
+ _sling := sling.New().Get(a.basePath)
+
+ // create api path
+ path := "api/quotas/" + quotaID
+ _sling = _sling.Path(path)
+
+ var successPayload apilib.Quota
+
+ httpStatusCode, body, err := request(_sling, jsonAcceptHeader, authInfo)
+ if err == nil && httpStatusCode == 200 {
+ err = json.Unmarshal(body, &successPayload)
+ }
+ return httpStatusCode, successPayload, err
+}
+
+// Update spec for the quota
+func (a testapi) QuotasPut(authInfo usrInfo, quotaID string, req models.QuotaUpdateRequest) (int, error) {
+ path := "/api/quotas/" + quotaID
+ _sling := sling.New().Put(a.basePath).Path(path).BodyJSON(req)
+
+ httpStatusCode, _, err := request(_sling, jsonAcceptHeader, authInfo)
+ return httpStatusCode, err
+}
diff --git a/src/core/api/health.go b/src/core/api/health.go
index 1a43ab68e..0d4ef2cac 100644
--- a/src/core/api/health.go
+++ b/src/core/api/health.go
@@ -34,8 +34,9 @@ import (
)
var (
- timeout = 60 * time.Second
- healthCheckerRegistry = map[string]health.Checker{}
+ timeout = 60 * time.Second
+ // HealthCheckerRegistry ...
+ HealthCheckerRegistry = map[string]health.Checker{}
)
type overallHealthStatus struct {
@@ -67,11 +68,11 @@ type HealthAPI struct {
func (h *HealthAPI) CheckHealth() {
var isHealthy healthy = true
components := []*componentHealthStatus{}
- c := make(chan *componentHealthStatus, len(healthCheckerRegistry))
- for name, checker := range healthCheckerRegistry {
+ c := make(chan *componentHealthStatus, len(HealthCheckerRegistry))
+ for name, checker := range HealthCheckerRegistry {
go check(name, checker, timeout, c)
}
- for i := 0; i < len(healthCheckerRegistry); i++ {
+ for i := 0; i < len(HealthCheckerRegistry); i++ {
componentStatus := <-c
if len(componentStatus.Error) != 0 {
isHealthy = false
@@ -212,10 +213,10 @@ func jobserviceHealthChecker() health.Checker {
}
func registryHealthChecker() health.Checker {
- url := getRegistryURL() + "/v2"
+ url := getRegistryURL() + "/"
timeout := 60 * time.Second
period := 10 * time.Second
- checker := HTTPStatusCodeHealthChecker(http.MethodGet, url, nil, timeout, http.StatusUnauthorized)
+ checker := HTTPStatusCodeHealthChecker(http.MethodGet, url, nil, timeout, http.StatusOK)
return PeriodicHealthChecker(checker, period)
}
@@ -290,21 +291,21 @@ func redisHealthChecker() health.Checker {
}
func registerHealthCheckers() {
- healthCheckerRegistry["core"] = coreHealthChecker()
- healthCheckerRegistry["portal"] = portalHealthChecker()
- healthCheckerRegistry["jobservice"] = jobserviceHealthChecker()
- healthCheckerRegistry["registry"] = registryHealthChecker()
- healthCheckerRegistry["registryctl"] = registryCtlHealthChecker()
- healthCheckerRegistry["database"] = databaseHealthChecker()
- healthCheckerRegistry["redis"] = redisHealthChecker()
+ HealthCheckerRegistry["core"] = coreHealthChecker()
+ HealthCheckerRegistry["portal"] = portalHealthChecker()
+ HealthCheckerRegistry["jobservice"] = jobserviceHealthChecker()
+ HealthCheckerRegistry["registry"] = registryHealthChecker()
+ HealthCheckerRegistry["registryctl"] = registryCtlHealthChecker()
+ HealthCheckerRegistry["database"] = databaseHealthChecker()
+ HealthCheckerRegistry["redis"] = redisHealthChecker()
if config.WithChartMuseum() {
- healthCheckerRegistry["chartmuseum"] = chartmuseumHealthChecker()
+ HealthCheckerRegistry["chartmuseum"] = chartmuseumHealthChecker()
}
if config.WithClair() {
- healthCheckerRegistry["clair"] = clairHealthChecker()
+ HealthCheckerRegistry["clair"] = clairHealthChecker()
}
if config.WithNotary() {
- healthCheckerRegistry["notary"] = notaryHealthChecker()
+ HealthCheckerRegistry["notary"] = notaryHealthChecker()
}
}
diff --git a/src/core/api/health_test.go b/src/core/api/health_test.go
index 8426a74b1..c98d021b5 100644
--- a/src/core/api/health_test.go
+++ b/src/core/api/health_test.go
@@ -92,9 +92,9 @@ func fakeHealthChecker(healthy bool) health.Checker {
}
func TestCheckHealth(t *testing.T) {
// component01: healthy, component02: healthy => status: healthy
- healthCheckerRegistry = map[string]health.Checker{}
- healthCheckerRegistry["component01"] = fakeHealthChecker(true)
- healthCheckerRegistry["component02"] = fakeHealthChecker(true)
+ HealthCheckerRegistry = map[string]health.Checker{}
+ HealthCheckerRegistry["component01"] = fakeHealthChecker(true)
+ HealthCheckerRegistry["component02"] = fakeHealthChecker(true)
status := map[string]interface{}{}
err := handleAndParse(&testingRequest{
method: http.MethodGet,
@@ -104,9 +104,9 @@ func TestCheckHealth(t *testing.T) {
assert.Equal(t, "healthy", status["status"].(string))
// component01: healthy, component02: unhealthy => status: unhealthy
- healthCheckerRegistry = map[string]health.Checker{}
- healthCheckerRegistry["component01"] = fakeHealthChecker(true)
- healthCheckerRegistry["component02"] = fakeHealthChecker(false)
+ HealthCheckerRegistry = map[string]health.Checker{}
+ HealthCheckerRegistry["component01"] = fakeHealthChecker(true)
+ HealthCheckerRegistry["component02"] = fakeHealthChecker(false)
status = map[string]interface{}{}
err = handleAndParse(&testingRequest{
method: http.MethodGet,
@@ -128,7 +128,7 @@ func TestDatabaseHealthChecker(t *testing.T) {
}
func TestRegisterHealthCheckers(t *testing.T) {
- healthCheckerRegistry = map[string]health.Checker{}
+ HealthCheckerRegistry = map[string]health.Checker{}
registerHealthCheckers()
- assert.NotNil(t, healthCheckerRegistry["core"])
+ assert.NotNil(t, HealthCheckerRegistry["core"])
}
diff --git a/src/core/api/internal.go b/src/core/api/internal.go
index 71f1f317e..06e6c45a2 100644
--- a/src/core/api/internal.go
+++ b/src/core/api/internal.go
@@ -15,12 +15,21 @@
package api
import (
- "errors"
-
+ "fmt"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
+ common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils/log"
+
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/jobservice/logger"
+ "github.com/pkg/errors"
+ "strconv"
+
+ quota "github.com/goharbor/harbor/src/core/api/quota"
+
+ comcfg "github.com/goharbor/harbor/src/common/config"
)
// InternalAPI handles request of harbor admin...
@@ -69,3 +78,103 @@ func (ia *InternalAPI) RenameAdmin() {
log.Debugf("The super user has been renamed to: %s", newName)
ia.DestroySession()
}
+
+// QuotaSwitcher ...
+type QuotaSwitcher struct {
+ Enabled bool
+}
+
+// SwitchQuota ...
+func (ia *InternalAPI) SwitchQuota() {
+ var req QuotaSwitcher
+ if err := ia.DecodeJSONReq(&req); err != nil {
+ ia.SendBadRequestError(err)
+ return
+ }
+ // quota per project from disable to enable, it needs to update the quota usage bases on the DB records.
+ if !config.QuotaPerProjectEnable() && req.Enabled {
+ if err := ia.ensureQuota(); err != nil {
+ ia.SendInternalServerError(err)
+ return
+ }
+ }
+ defer func() {
+ config.GetCfgManager().Set(common.QuotaPerProjectEnable, req.Enabled)
+ config.GetCfgManager().Save()
+ }()
+ return
+}
+
+func (ia *InternalAPI) ensureQuota() error {
+ projects, err := dao.GetProjects(nil)
+ if err != nil {
+ return err
+ }
+ for _, project := range projects {
+ pSize, err := dao.CountSizeOfProject(project.ProjectID)
+ if err != nil {
+ logger.Warningf("error happen on counting size of project:%d , error:%v, just skip it.", project.ProjectID, err)
+ continue
+ }
+ afQuery := &models.ArtifactQuery{
+ PID: project.ProjectID,
+ }
+ afs, err := dao.ListArtifacts(afQuery)
+ if err != nil {
+ logger.Warningf("error happen on counting number of project:%d , error:%v, just skip it.", project.ProjectID, err)
+ continue
+ }
+ pCount := int64(len(afs))
+
+ // it needs to append the chart count
+ if config.WithChartMuseum() {
+ count, err := chartController.GetCountOfCharts([]string{project.Name})
+ if err != nil {
+ err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID))
+ logger.Error(err)
+ continue
+ }
+ pCount = pCount + int64(count)
+ }
+
+ quotaMgr, err := common_quota.NewManager("project", strconv.FormatInt(project.ProjectID, 10))
+ if err != nil {
+ logger.Errorf("Error occurred when to new quota manager %v, just skip it.", err)
+ continue
+ }
+ used := common_quota.ResourceList{
+ common_quota.ResourceStorage: pSize,
+ common_quota.ResourceCount: pCount,
+ }
+ if err := quotaMgr.EnsureQuota(used); err != nil {
+ logger.Errorf("cannot ensure quota for the project: %d, err: %v, just skip it.", project.ProjectID, err)
+ continue
+ }
+ }
+ return nil
+}
+
+// SyncQuota ...
+func (ia *InternalAPI) SyncQuota() {
+ cur := config.ReadOnly()
+ cfgMgr := comcfg.NewDBCfgManager()
+ if cur != true {
+ cfgMgr.Set(common.ReadOnly, true)
+ }
+ // For api call, to avoid the timeout, it should be asynchronous
+ go func() {
+ defer func() {
+ if cur != true {
+ cfgMgr.Set(common.ReadOnly, false)
+ }
+ }()
+ log.Info("start to sync quota(API), the system will be set to ReadOnly and back it normal once it done.")
+ err := quota.Sync(ia.ProjectMgr, false)
+ if err != nil {
+ log.Errorf("fail to sync quota(API), but with error: %v, please try to do it again.", err)
+ return
+ }
+ log.Info("success to sync quota(API).")
+ }()
+ return
+}
diff --git a/src/core/api/internal_test.go b/src/core/api/internal_test.go
new file mode 100644
index 000000000..02903a98b
--- /dev/null
+++ b/src/core/api/internal_test.go
@@ -0,0 +1,89 @@
+// Copyright 2018 Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "net/http"
+ "testing"
+)
+
+// cannot verify the real scenario here
+func TestSwitchQuota(t *testing.T) {
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/internal/switchquota",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/internal/switchquota",
+ credential: sysAdmin,
+ bodyJSON: &QuotaSwitcher{
+ Enabled: true,
+ },
+ },
+ code: http.StatusOK,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ url: "/api/internal/switchquota",
+ method: http.MethodPut,
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+// cannot verify the real scenario here
+func TestSyncQuota(t *testing.T) {
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/internal/syncquota",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/internal/syncquota",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ url: "/api/internal/syncquota",
+ method: http.MethodPost,
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
diff --git a/src/core/api/notification_job.go b/src/core/api/notification_job.go
new file mode 100755
index 000000000..775c9fc9f
--- /dev/null
+++ b/src/core/api/notification_job.go
@@ -0,0 +1,108 @@
+package api
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/rbac"
+ "github.com/goharbor/harbor/src/pkg/notification"
+)
+
+// NotificationJobAPI ...
+type NotificationJobAPI struct {
+ BaseController
+ project *models.Project
+}
+
+// Prepare ...
+func (w *NotificationJobAPI) Prepare() {
+ w.BaseController.Prepare()
+ if !w.SecurityCtx.IsAuthenticated() {
+ w.SendUnAuthorizedError(errors.New("UnAuthorized"))
+ return
+ }
+
+ pid, err := w.GetInt64FromPath(":pid")
+ if err != nil {
+ w.SendBadRequestError(fmt.Errorf("failed to get project ID: %v", err))
+ return
+ }
+ if pid <= 0 {
+ w.SendBadRequestError(fmt.Errorf("invalid project ID: %d", pid))
+ return
+ }
+
+ project, err := w.ProjectMgr.Get(pid)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to get project %d: %v", pid, err))
+ return
+ }
+ if project == nil {
+ w.SendNotFoundError(fmt.Errorf("project %d not found", pid))
+ return
+ }
+ w.project = project
+}
+
+// List ...
+func (w *NotificationJobAPI) List() {
+ if !w.validateRBAC(rbac.ActionList, w.project.ProjectID) {
+ return
+ }
+
+ policyID, err := w.GetInt64("policy_id")
+ if err != nil || policyID <= 0 {
+ w.SendBadRequestError(fmt.Errorf("invalid policy_id: %s", w.GetString("policy_id")))
+ return
+ }
+
+ policy, err := notification.PolicyMgr.Get(policyID)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to get policy %d: %v", policyID, err))
+ return
+ }
+ if policy == nil {
+ w.SendBadRequestError(fmt.Errorf("policy %d not found", policyID))
+ return
+ }
+
+ query := &models.NotificationJobQuery{
+ PolicyID: policyID,
+ }
+
+ query.Statuses = w.GetStrings("status")
+
+ query.Page, query.Size, err = w.GetPaginationParams()
+ if err != nil {
+ w.SendBadRequestError(err)
+ return
+ }
+
+ total, jobs, err := notification.JobMgr.List(query)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to list notification jobs: %v", err))
+ return
+ }
+ w.SetPaginationHeader(total, query.Page, query.Size)
+ w.WriteJSONData(jobs)
+}
+
+func (w *NotificationJobAPI) validateRBAC(action rbac.Action, projectID int64) bool {
+ if w.SecurityCtx.IsSysAdmin() {
+ return true
+ }
+
+ project, err := w.ProjectMgr.Get(projectID)
+ if err != nil {
+ w.ParseAndHandleError(fmt.Sprintf("failed to get project %d", projectID), err)
+ return false
+ }
+
+ resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceNotificationPolicy)
+ if !w.SecurityCtx.Can(action, resource) {
+ w.SendForbiddenError(errors.New(w.SecurityCtx.GetUsername()))
+ return false
+ }
+ return true
+}
diff --git a/src/core/api/notification_job_test.go b/src/core/api/notification_job_test.go
new file mode 100644
index 000000000..d6a9ac099
--- /dev/null
+++ b/src/core/api/notification_job_test.go
@@ -0,0 +1,107 @@
+package api
+
+import (
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/pkg/notification"
+ "github.com/goharbor/harbor/src/pkg/notification/model"
+)
+
+type fakedNotificationJobMgr struct {
+}
+
+func (f *fakedNotificationJobMgr) Create(job *models.NotificationJob) (int64, error) {
+ return 1, nil
+}
+
+func (f *fakedNotificationJobMgr) List(...*models.NotificationJobQuery) (int64, []*models.NotificationJob, error) {
+ return 0, nil, nil
+}
+
+func (f *fakedNotificationJobMgr) Update(job *models.NotificationJob, props ...string) error {
+ return nil
+}
+
+func (f *fakedNotificationJobMgr) ListJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) {
+ return []*models.NotificationJob{
+ {
+ EventType: model.EventTypePullImage,
+ CreationTime: time.Now(),
+ },
+ {
+ EventType: model.EventTypeDeleteImage,
+ CreationTime: time.Now(),
+ },
+ }, nil
+}
+
+func TestNotificationJobAPI_List(t *testing.T) {
+ policyMgr := notification.PolicyMgr
+ jobMgr := notification.JobMgr
+ defer func() {
+ notification.PolicyMgr = policyMgr
+ notification.JobMgr = jobMgr
+ }()
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+ notification.JobMgr = &fakedNotificationJobMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/jobs?policy_id=1",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/jobs?policy_id=1",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 400 policyID invalid
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/jobs?policy_id=0",
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 400 policyID not found
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/jobs?policy_id=123",
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 404 project not found
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/123/webhook/jobs?policy_id=1",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/jobs?policy_id=1",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
diff --git a/src/core/api/notification_policy.go b/src/core/api/notification_policy.go
new file mode 100755
index 000000000..c7acdbea2
--- /dev/null
+++ b/src/core/api/notification_policy.go
@@ -0,0 +1,384 @@
+package api
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/rbac"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/pkg/notification"
+)
+
+// NotificationPolicyAPI ...
+type NotificationPolicyAPI struct {
+ BaseController
+ project *models.Project
+}
+
+// notificationPolicyForUI defines the structure of notification policy info display in UI
+type notificationPolicyForUI struct {
+ EventType string `json:"event_type"`
+ Enabled bool `json:"enabled"`
+ CreationTime *time.Time `json:"creation_time"`
+ LastTriggerTime *time.Time `json:"last_trigger_time,omitempty"`
+}
+
+// Prepare ...
+func (w *NotificationPolicyAPI) Prepare() {
+ w.BaseController.Prepare()
+ if !w.SecurityCtx.IsAuthenticated() {
+ w.SendUnAuthorizedError(errors.New("UnAuthorized"))
+ return
+ }
+
+ pid, err := w.GetInt64FromPath(":pid")
+ if err != nil {
+ w.SendBadRequestError(fmt.Errorf("failed to get project ID: %v", err))
+ return
+ }
+ if pid <= 0 {
+ w.SendBadRequestError(fmt.Errorf("invalid project ID: %d", pid))
+ return
+ }
+
+ project, err := w.ProjectMgr.Get(pid)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to get project %d: %v", pid, err))
+ return
+ }
+ if project == nil {
+ w.SendNotFoundError(fmt.Errorf("project %d not found", pid))
+ return
+ }
+ w.project = project
+}
+
+// Get ...
+func (w *NotificationPolicyAPI) Get() {
+ if !w.validateRBAC(rbac.ActionRead, w.project.ProjectID) {
+ return
+ }
+
+ id, err := w.GetIDFromURL()
+ if err != nil {
+ w.SendBadRequestError(err)
+ return
+ }
+
+ policy, err := notification.PolicyMgr.Get(id)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to get the notification policy %d: %v", id, err))
+ return
+ }
+ if policy == nil {
+ w.SendNotFoundError(fmt.Errorf("notification policy %d not found", id))
+ return
+ }
+
+ if w.project.ProjectID != policy.ProjectID {
+ w.SendBadRequestError(fmt.Errorf("notification policy %d with projectID %d not belong to project %d in URL", id, policy.ProjectID, w.project.ProjectID))
+ return
+ }
+
+ w.WriteJSONData(policy)
+}
+
+// Post ...
+func (w *NotificationPolicyAPI) Post() {
+ if !w.validateRBAC(rbac.ActionCreate, w.project.ProjectID) {
+ return
+ }
+
+ policy := &models.NotificationPolicy{}
+ isValid, err := w.DecodeJSONReqAndValidate(policy)
+ if !isValid {
+ w.SendBadRequestError(err)
+ return
+ }
+
+ if !w.validateTargets(policy) {
+ return
+ }
+
+ if !w.validateEventTypes(policy) {
+ return
+ }
+
+ if policy.ID != 0 {
+ w.SendBadRequestError(fmt.Errorf("cannot accept policy creating request with ID: %d", policy.ID))
+ return
+ }
+
+ policy.Creator = w.SecurityCtx.GetUsername()
+ policy.ProjectID = w.project.ProjectID
+
+ id, err := notification.PolicyMgr.Create(policy)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to create the notification policy: %v", err))
+ return
+ }
+ w.Redirect(http.StatusCreated, strconv.FormatInt(id, 10))
+}
+
+// Put ...
+func (w *NotificationPolicyAPI) Put() {
+ if !w.validateRBAC(rbac.ActionUpdate, w.project.ProjectID) {
+ return
+ }
+
+ id, err := w.GetIDFromURL()
+ if id < 0 || err != nil {
+ w.SendBadRequestError(errors.New("invalid notification policy ID"))
+ return
+ }
+
+ oriPolicy, err := notification.PolicyMgr.Get(id)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to get the notification policy %d: %v", id, err))
+ return
+ }
+ if oriPolicy == nil {
+ w.SendNotFoundError(fmt.Errorf("notification policy %d not found", id))
+ return
+ }
+
+ policy := &models.NotificationPolicy{}
+ isValid, err := w.DecodeJSONReqAndValidate(policy)
+ if !isValid {
+ w.SendBadRequestError(err)
+ return
+ }
+
+ if !w.validateTargets(policy) {
+ return
+ }
+
+ if !w.validateEventTypes(policy) {
+ return
+ }
+
+ if w.project.ProjectID != oriPolicy.ProjectID {
+ w.SendBadRequestError(fmt.Errorf("notification policy %d with projectID %d not belong to project %d in URL", id, oriPolicy.ProjectID, w.project.ProjectID))
+ return
+ }
+
+ policy.ID = id
+ policy.ProjectID = w.project.ProjectID
+
+ if err = notification.PolicyMgr.Update(policy); err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to update the notification policy: %v", err))
+ return
+ }
+}
+
+// List ...
+func (w *NotificationPolicyAPI) List() {
+ projectID := w.project.ProjectID
+ if !w.validateRBAC(rbac.ActionList, projectID) {
+ return
+ }
+
+ res, err := notification.PolicyMgr.List(projectID)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to list notification policies by projectID %d: %v", projectID, err))
+ return
+ }
+
+ policies := []*models.NotificationPolicy{}
+ if res != nil {
+ for _, policy := range res {
+ policies = append(policies, policy)
+ }
+ }
+
+ w.WriteJSONData(policies)
+}
+
+// ListGroupByEventType lists notification policy trigger info grouped by event type for UI,
+// displays event type, status(enabled/disabled), create time, last trigger time
+func (w *NotificationPolicyAPI) ListGroupByEventType() {
+ projectID := w.project.ProjectID
+ if !w.validateRBAC(rbac.ActionList, projectID) {
+ return
+ }
+
+ res, err := notification.PolicyMgr.List(projectID)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to list notification policies by projectID %d: %v", projectID, err))
+ return
+ }
+
+ policies, err := constructPolicyWithTriggerTime(res)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to list the notification policy trigger information: %v", err))
+ return
+ }
+ w.WriteJSONData(policies)
+}
+
+// Delete ...
+func (w *NotificationPolicyAPI) Delete() {
+ projectID := w.project.ProjectID
+ if !w.validateRBAC(rbac.ActionDelete, projectID) {
+ return
+ }
+
+ id, err := w.GetIDFromURL()
+ if id < 0 || err != nil {
+ w.SendBadRequestError(errors.New("invalid notification policy ID"))
+ return
+ }
+
+ policy, err := notification.PolicyMgr.Get(id)
+ if err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to get the notification policy %d: %v", id, err))
+ return
+ }
+ if policy == nil {
+ w.SendNotFoundError(fmt.Errorf("notification policy %d not found", id))
+ return
+ }
+
+ if projectID != policy.ProjectID {
+ w.SendBadRequestError(fmt.Errorf("notification policy %d with projectID %d not belong to project %d in URL", id, policy.ProjectID, projectID))
+ return
+ }
+
+ if err = notification.PolicyMgr.Delete(id); err != nil {
+ w.SendInternalServerError(fmt.Errorf("failed to delete notification policy %d: %v", id, err))
+ return
+ }
+}
+
+// Test ...
+func (w *NotificationPolicyAPI) Test() {
+ projectID := w.project.ProjectID
+ if !w.validateRBAC(rbac.ActionCreate, projectID) {
+ return
+ }
+
+ policy := &models.NotificationPolicy{}
+ isValid, err := w.DecodeJSONReqAndValidate(policy)
+ if !isValid {
+ w.SendBadRequestError(err)
+ return
+ }
+
+ if !w.validateTargets(policy) {
+ return
+ }
+
+ if err := notification.PolicyMgr.Test(policy); err != nil {
+ w.SendBadRequestError(fmt.Errorf("notification policy %s test failed: %v", policy.Name, err))
+ return
+ }
+}
+
+func (w *NotificationPolicyAPI) validateRBAC(action rbac.Action, projectID int64) bool {
+ if w.SecurityCtx.IsSysAdmin() {
+ return true
+ }
+
+ project, err := w.ProjectMgr.Get(projectID)
+ if err != nil {
+ w.ParseAndHandleError(fmt.Sprintf("failed to get project %d", projectID), err)
+ return false
+ }
+
+ resource := rbac.NewProjectNamespace(project.ProjectID).Resource(rbac.ResourceNotificationPolicy)
+ if !w.SecurityCtx.Can(action, resource) {
+ w.SendForbiddenError(errors.New(w.SecurityCtx.GetUsername()))
+ return false
+ }
+ return true
+}
+
+func (w *NotificationPolicyAPI) validateTargets(policy *models.NotificationPolicy) bool {
+ if len(policy.Targets) == 0 {
+ w.SendBadRequestError(fmt.Errorf("empty notification target with policy %s", policy.Name))
+ return false
+ }
+
+ for _, target := range policy.Targets {
+ url, err := utils.ParseEndpoint(target.Address)
+ if err != nil {
+ w.SendBadRequestError(err)
+ return false
+ }
+ // Prevent SSRF security issue #3755
+ target.Address = url.Scheme + "://" + url.Host + url.Path
+
+ _, ok := notification.SupportedNotifyTypes[target.Type]
+ if !ok {
+ w.SendBadRequestError(fmt.Errorf("unsupport target type %s with policy %s", target.Type, policy.Name))
+ return false
+ }
+ }
+
+ return true
+}
+
+func (w *NotificationPolicyAPI) validateEventTypes(policy *models.NotificationPolicy) bool {
+ if len(policy.EventTypes) == 0 {
+ w.SendBadRequestError(errors.New("empty event type"))
+ return false
+ }
+
+ for _, eventType := range policy.EventTypes {
+ _, ok := notification.SupportedEventTypes[eventType]
+ if !ok {
+ w.SendBadRequestError(fmt.Errorf("unsupport event type %s", eventType))
+ return false
+ }
+ }
+
+ return true
+}
+
+func getLastTriggerTimeGroupByEventType(eventType string, policyID int64) (time.Time, error) {
+ jobs, err := notification.JobMgr.ListJobsGroupByEventType(policyID)
+ if err != nil {
+ return time.Time{}, err
+ }
+
+ for _, job := range jobs {
+ if eventType == job.EventType {
+ return job.CreationTime, nil
+ }
+ }
+ return time.Time{}, nil
+}
+
+// constructPolicyWithTriggerTime construct notification policy information displayed in UI
+// including event type, enabled, creation time, last trigger time
+func constructPolicyWithTriggerTime(policies []*models.NotificationPolicy) ([]*notificationPolicyForUI, error) {
+ res := []*notificationPolicyForUI{}
+ if policies != nil {
+ for _, policy := range policies {
+ for _, t := range policy.EventTypes {
+ ply := ¬ificationPolicyForUI{
+ EventType: t,
+ Enabled: policy.Enabled,
+ CreationTime: &policy.CreationTime,
+ }
+ if !policy.CreationTime.IsZero() {
+ ply.CreationTime = &policy.CreationTime
+ }
+
+ ltTime, err := getLastTriggerTimeGroupByEventType(t, policy.ID)
+ if err != nil {
+ return nil, err
+ }
+ if !ltTime.IsZero() {
+ ply.LastTriggerTime = <Time
+ }
+ res = append(res, ply)
+ }
+ }
+ }
+ return res, nil
+}
diff --git a/src/core/api/notification_policy_test.go b/src/core/api/notification_policy_test.go
new file mode 100644
index 000000000..a63f6b72e
--- /dev/null
+++ b/src/core/api/notification_policy_test.go
@@ -0,0 +1,637 @@
+package api
+
+import (
+ "net/http"
+ "testing"
+
+ "github.com/pkg/errors"
+
+ "github.com/goharbor/harbor/src/pkg/notification/model"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/pkg/notification"
+)
+
+type fakedNotificationPlyMgr struct {
+}
+
+func (f *fakedNotificationPlyMgr) Create(*models.NotificationPolicy) (int64, error) {
+ return 0, nil
+}
+
+func (f *fakedNotificationPlyMgr) List(id int64) ([]*models.NotificationPolicy, error) {
+ return []*models.NotificationPolicy{
+ {
+ ID: 1,
+ EventTypes: []string{
+ model.EventTypePullImage,
+ model.EventTypePushImage,
+ },
+ },
+ }, nil
+}
+
+func (f *fakedNotificationPlyMgr) Get(id int64) (*models.NotificationPolicy, error) {
+ switch id {
+ case 1:
+ return &models.NotificationPolicy{ID: 1, ProjectID: 1}, nil
+ case 2:
+ return &models.NotificationPolicy{ID: 2, ProjectID: 222}, nil
+ case 3:
+ return nil, errors.New("")
+ default:
+ return nil, nil
+ }
+}
+
+func (f *fakedNotificationPlyMgr) GetByNameAndProjectID(string, int64) (*models.NotificationPolicy, error) {
+ return nil, nil
+}
+
+func (f *fakedNotificationPlyMgr) Update(*models.NotificationPolicy) error {
+
+ return nil
+}
+
+func (f *fakedNotificationPlyMgr) Delete(int64) error {
+ return nil
+}
+
+func (f *fakedNotificationPlyMgr) Test(*models.NotificationPolicy) error {
+ return nil
+}
+
+func (f *fakedNotificationPlyMgr) GetRelatedPolices(int64, string) ([]*models.NotificationPolicy, error) {
+ return nil, nil
+}
+
+func TestNotificationPolicyAPI_List(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 404
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/123/webhook/policies",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+
+}
+
+func TestNotificationPolicyAPI_Post(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 400 invalid json body
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: "invalid json body",
+ },
+ code: http.StatusBadRequest,
+ },
+ // 400 empty targets
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ Targets: []models.EventTarget{},
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 invalid event target address
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Address: "tcp://127.0.0.1:8080",
+ },
+ },
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 invalid event target type
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Type: "smn",
+ Address: "http://127.0.0.1:8080",
+ },
+ },
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 invalid event type
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"invalidType"},
+ Targets: []models.EventTarget{
+ {
+ Address: "tcp://127.0.0.1:8080",
+ },
+ },
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 policy ID != 0
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ ID: 111,
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Type: "http",
+ Address: "http://10.173.32.58:9009",
+ AuthHeader: "xxxxxxxxx",
+ SkipCertVerify: true,
+ },
+ },
+ },
+ },
+ code: http.StatusBadRequest,
+ },
+ // 201
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Type: "http",
+ Address: "http://10.173.32.58:9009",
+ AuthHeader: "xxxxxxxxx",
+ SkipCertVerify: true,
+ },
+ },
+ },
+ },
+ code: http.StatusCreated,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestNotificationPolicyAPI_Get(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies/111",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies/111",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 404
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies/1234",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 400 projectID not match with projectID in URL
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies/2",
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 500
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies/3",
+ credential: sysAdmin,
+ },
+ code: http.StatusInternalServerError,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestNotificationPolicyAPI_Put(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/111",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/111",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 404
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1234",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 400 invalid json body
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ bodyJSON: "invalidJSONBody",
+ },
+ code: http.StatusBadRequest,
+ },
+ // 400 empty targets
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{},
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 invalid event target address
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Address: "tcp://127.0.0.1:8080",
+ },
+ },
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 invalid event target type
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Type: "smn",
+ Address: "http://127.0.0.1:8080",
+ },
+ },
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 400 invalid event type
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ EventTypes: []string{"invalidType"},
+ Targets: []models.EventTarget{
+ {
+ Address: "tcp://127.0.0.1:8080",
+ },
+ },
+ }},
+ code: http.StatusBadRequest,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ Name: "imagePolicyTest",
+ EventTypes: []string{"pullImage", "pushImage", "deleteImage"},
+ Targets: []models.EventTarget{
+ {
+ Type: "http",
+ Address: "http://10.173.32.58:9009",
+ AuthHeader: "xxxxxxxxx",
+ SkipCertVerify: true,
+ },
+ },
+ },
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestNotificationPolicyAPI_Test(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies/test",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies/test",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 404
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/123/webhook/policies/test",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 400 invalid json body
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies/test",
+ credential: sysAdmin,
+ bodyJSON: 1234125,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/webhook/policies/test",
+ credential: sysAdmin,
+ bodyJSON: &models.NotificationPolicy{
+ Targets: []models.EventTarget{
+ {
+ Type: "http",
+ Address: "http://10.173.32.58:9009",
+ AuthHeader: "xxxxxxxxx",
+ SkipCertVerify: true,
+ },
+ },
+ },
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestNotificationPolicyAPI_ListGroupByEventType(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ jobMgr := notification.JobMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ notification.JobMgr = jobMgr
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+ notification.JobMgr = &fakedNotificationJobMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/lasttrigger",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/lasttrigger",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 404
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/123/webhook/lasttrigger",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/projects/1/webhook/lasttrigger",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestNotificationPolicyAPI_Delete(t *testing.T) {
+ policyCtl := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = policyCtl
+ }()
+
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodDelete,
+ url: "/api/projects/1/webhook/policies/111",
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodDelete,
+ url: "/api/projects/1/webhook/policies/111",
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 404
+ {
+ request: &testingRequest{
+ method: http.MethodDelete,
+ url: "/api/projects/1/webhook/policies/1234",
+ credential: sysAdmin,
+ },
+ code: http.StatusNotFound,
+ },
+ // 400 projectID not match
+ {
+ request: &testingRequest{
+ method: http.MethodDelete,
+ url: "/api/projects/1/webhook/policies/2",
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 500 failed to get policy
+ {
+ request: &testingRequest{
+ method: http.MethodDelete,
+ url: "/api/projects/1/webhook/policies/3",
+ credential: sysAdmin,
+ },
+ code: http.StatusInternalServerError,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodDelete,
+ url: "/api/projects/1/webhook/policies/1",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
diff --git a/src/core/api/oidc.go b/src/core/api/oidc.go
new file mode 100644
index 000000000..ed4688cf8
--- /dev/null
+++ b/src/core/api/oidc.go
@@ -0,0 +1,56 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "errors"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/oidc"
+)
+
+// OIDCAPI handles the requests to /api/system/oidc/xxx
+type OIDCAPI struct {
+ BaseController
+}
+
+// Prepare validates the request initially
+func (oa *OIDCAPI) Prepare() {
+ oa.BaseController.Prepare()
+ if !oa.SecurityCtx.IsAuthenticated() {
+ oa.SendUnAuthorizedError(errors.New("unauthorized"))
+ return
+ }
+ if !oa.SecurityCtx.IsSysAdmin() {
+ msg := "only system admin has permission to access this API"
+ log.Errorf(msg)
+ oa.SendForbiddenError(errors.New(msg))
+ return
+ }
+}
+
+// Ping will handles the request to test connection to OIDC endpoint
+func (oa *OIDCAPI) Ping() {
+ var c oidc.Conn
+ if err := oa.DecodeJSONReq(&c); err != nil {
+ log.Error("Failed to decode JSON request.")
+ oa.SendBadRequestError(err)
+ return
+ }
+ if err := oidc.TestEndpoint(c); err != nil {
+ log.Errorf("Failed to verify connection: %+v, err: %v", c, err)
+ oa.SendBadRequestError(err)
+ return
+ }
+}
diff --git a/src/core/api/oidc_test.go b/src/core/api/oidc_test.go
new file mode 100644
index 000000000..ec9ada990
--- /dev/null
+++ b/src/core/api/oidc_test.go
@@ -0,0 +1,69 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/oidc"
+ "net/http"
+ "testing"
+)
+
+func TestOIDCAPI_Ping(t *testing.T) {
+ url := "/api/system/oidc/ping"
+ cases := []*codeCheckingCase{
+ { // 401
+ request: &testingRequest{
+ method: http.MethodPost,
+ bodyJSON: oidc.Conn{},
+ url: url,
+ },
+ code: http.StatusUnauthorized,
+ },
+ { // 403
+ request: &testingRequest{
+ method: http.MethodPost,
+ bodyJSON: oidc.Conn{},
+ url: url,
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ { // 400
+ request: &testingRequest{
+ method: http.MethodPost,
+ bodyJSON: oidc.Conn{
+ URL: "https://www.baidu.com",
+ VerifyCert: true,
+ },
+ url: url,
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ { // 200
+ request: &testingRequest{
+ method: http.MethodPost,
+ bodyJSON: oidc.Conn{
+ URL: "https://accounts.google.com",
+ VerifyCert: true,
+ },
+ url: url,
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
diff --git a/src/core/api/project.go b/src/core/api/project.go
index aaf0f2e02..1c98242ca 100644
--- a/src/core/api/project.go
+++ b/src/core/api/project.go
@@ -18,19 +18,22 @@ import (
"fmt"
"net/http"
"regexp"
+ "strconv"
+ "sync"
+ "time"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/dao/project"
"github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/utils"
errutil "github.com/goharbor/harbor/src/common/utils/error"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/config"
-
- "errors"
- "strconv"
- "time"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/pkg/errors"
)
type deletableResp struct {
@@ -128,6 +131,7 @@ func (p *ProjectAPI) Post() {
p.SendBadRequestError(err)
return
}
+
err = validateProjectReq(pro)
if err != nil {
log.Errorf("Invalid project request, error: %v", err)
@@ -135,6 +139,28 @@ func (p *ProjectAPI) Post() {
return
}
+ var hardLimits types.ResourceList
+ if config.QuotaPerProjectEnable() {
+ setting, err := config.QuotaSetting()
+ if err != nil {
+ log.Errorf("failed to get quota setting: %v", err)
+ p.SendInternalServerError(fmt.Errorf("failed to get quota setting: %v", err))
+ return
+ }
+
+ if !p.SecurityCtx.IsSysAdmin() {
+ pro.CountLimit = &setting.CountPerProject
+ pro.StorageLimit = &setting.StoragePerProject
+ }
+
+ hardLimits, err = projectQuotaHardLimits(pro, setting)
+ if err != nil {
+ log.Errorf("Invalid project request, error: %v", err)
+ p.SendBadRequestError(fmt.Errorf("invalid request: %v", err))
+ return
+ }
+ }
+
exist, err := p.ProjectMgr.Exists(pro.Name)
if err != nil {
p.ParseAndHandleError(fmt.Sprintf("failed to check the existence of project %s",
@@ -158,6 +184,7 @@ func (p *ProjectAPI) Post() {
if _, ok := pro.Metadata[models.ProMetaPublic]; !ok {
pro.Metadata[models.ProMetaPublic] = strconv.FormatBool(false)
}
+ // populate
owner := p.SecurityCtx.GetUsername()
// set the owner as the system admin when the API being called by replication
@@ -188,6 +215,18 @@ func (p *ProjectAPI) Post() {
return
}
+ if config.QuotaPerProjectEnable() {
+ quotaMgr, err := quota.NewManager("project", strconv.FormatInt(projectID, 10))
+ if err != nil {
+ p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
+ return
+ }
+ if _, err := quotaMgr.NewQuota(hardLimits); err != nil {
+ p.SendInternalServerError(fmt.Errorf("failed to create quota for project: %v", err))
+ return
+ }
+ }
+
go func() {
if err = dao.AddAccessLog(
models.AccessLog{
@@ -231,7 +270,10 @@ func (p *ProjectAPI) Get() {
return
}
- p.populateProperties(p.project)
+ err := p.populateProperties(p.project)
+ if err != nil {
+ log.Errorf("populate project properties failed with : %+v", err)
+ }
p.Data["json"] = p.project
p.ServeJSON()
@@ -259,6 +301,16 @@ func (p *ProjectAPI) Delete() {
return
}
+ quotaMgr, err := quota.NewManager("project", strconv.FormatInt(p.project.ProjectID, 10))
+ if err != nil {
+ p.SendInternalServerError(fmt.Errorf("failed to get quota manager: %v", err))
+ return
+ }
+ if err := quotaMgr.DeleteQuota(); err != nil {
+ p.SendInternalServerError(fmt.Errorf("failed to delete quota for project: %v", err))
+ return
+ }
+
go func() {
if err := dao.AddAccessLog(models.AccessLog{
Username: p.SecurityCtx.GetUsername(),
@@ -401,15 +453,17 @@ func (p *ProjectAPI) List() {
}
for _, project := range result.Projects {
- p.populateProperties(project)
+ err = p.populateProperties(project)
+ if err != nil {
+ log.Errorf("populate project properties failed %v", err)
+ }
}
-
p.SetPaginationHeader(result.Total, page, size)
p.Data["json"] = result.Projects
p.ServeJSON()
}
-func (p *ProjectAPI) populateProperties(project *models.Project) {
+func (p *ProjectAPI) populateProperties(project *models.Project) error {
if p.SecurityCtx.IsAuthenticated() {
roles := p.SecurityCtx.GetProjectRoles(project.ProjectID)
if len(roles) != 0 {
@@ -426,9 +480,8 @@ func (p *ProjectAPI) populateProperties(project *models.Project) {
ProjectIDs: []int64{project.ProjectID},
})
if err != nil {
- log.Errorf("failed to get total of repositories of project %d: %v", project.ProjectID, err)
- p.SendInternalServerError(errors.New(""))
- return
+ err = errors.Wrap(err, fmt.Sprintf("get repo count of project %d failed", project.ProjectID))
+ return err
}
project.RepoCount = total
@@ -437,13 +490,13 @@ func (p *ProjectAPI) populateProperties(project *models.Project) {
if config.WithChartMuseum() {
count, err := chartController.GetCountOfCharts([]string{project.Name})
if err != nil {
- log.Errorf("Failed to get total of charts under project %s: %v", project.Name, err)
- p.SendInternalServerError(errors.New(""))
- return
+ err = errors.Wrap(err, fmt.Sprintf("get chart count of project %d failed", project.ProjectID))
+ return err
}
project.ChartCount = count
}
+ return nil
}
// Put ...
@@ -460,7 +513,8 @@ func (p *ProjectAPI) Put() {
if err := p.ProjectMgr.Update(p.project.ProjectID,
&models.Project{
- Metadata: req.Metadata,
+ Metadata: req.Metadata,
+ CVEWhitelist: req.CVEWhitelist,
}); err != nil {
p.ParseAndHandleError(fmt.Sprintf("failed to update project %d",
p.project.ProjectID), err)
@@ -530,6 +584,37 @@ func (p *ProjectAPI) Logs() {
p.ServeJSON()
}
+// Summary returns the summary of the project
+func (p *ProjectAPI) Summary() {
+ if !p.requireAccess(rbac.ActionRead) {
+ return
+ }
+
+ if err := p.populateProperties(p.project); err != nil {
+ log.Warningf("populate project properties failed with : %+v", err)
+ }
+
+ summary := &models.ProjectSummary{
+ RepoCount: p.project.RepoCount,
+ ChartCount: p.project.ChartCount,
+ }
+
+ var wg sync.WaitGroup
+ for _, fn := range []func(int64, *models.ProjectSummary){getProjectQuotaSummary, getProjectMemberSummary} {
+ fn := fn
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ fn(p.project.ProjectID, summary)
+ }()
+ }
+ wg.Wait()
+
+ p.Data["json"] = summary
+ p.ServeJSON()
+}
+
// TODO move this to pa ckage models
func validateProjectReq(req *models.ProjectRequest) error {
pn := req.Name
@@ -550,3 +635,76 @@ func validateProjectReq(req *models.ProjectRequest) error {
req.Metadata = metas
return nil
}
+
+func projectQuotaHardLimits(req *models.ProjectRequest, setting *models.QuotaSetting) (types.ResourceList, error) {
+ hardLimits := types.ResourceList{}
+ if req.CountLimit != nil {
+ hardLimits[types.ResourceCount] = *req.CountLimit
+ } else {
+ hardLimits[types.ResourceCount] = setting.CountPerProject
+ }
+
+ if req.StorageLimit != nil {
+ hardLimits[types.ResourceStorage] = *req.StorageLimit
+ } else {
+ hardLimits[types.ResourceStorage] = setting.StoragePerProject
+ }
+
+ if err := quota.Validate("project", hardLimits); err != nil {
+ return nil, err
+ }
+
+ return hardLimits, nil
+}
+
+func getProjectQuotaSummary(projectID int64, summary *models.ProjectSummary) {
+ if !config.QuotaPerProjectEnable() {
+ log.Debug("Quota per project disabled")
+ return
+ }
+
+ quotas, err := dao.ListQuotas(&models.QuotaQuery{Reference: "project", ReferenceID: strconv.FormatInt(projectID, 10)})
+ if err != nil {
+ log.Debugf("failed to get quota for project: %d", projectID)
+ return
+ }
+
+ if len(quotas) == 0 {
+ log.Debugf("quota not found for project: %d", projectID)
+ return
+ }
+
+ quota := quotas[0]
+
+ summary.Quota.Hard, _ = types.NewResourceList(quota.Hard)
+ summary.Quota.Used, _ = types.NewResourceList(quota.Used)
+}
+
+func getProjectMemberSummary(projectID int64, summary *models.ProjectSummary) {
+ var wg sync.WaitGroup
+
+ for _, e := range []struct {
+ role int
+ count *int64
+ }{
+ {common.RoleProjectAdmin, &summary.ProjectAdminCount},
+ {common.RoleMaster, &summary.MasterCount},
+ {common.RoleDeveloper, &summary.DeveloperCount},
+ {common.RoleGuest, &summary.GuestCount},
+ } {
+ wg.Add(1)
+ go func(role int, count *int64) {
+ defer wg.Done()
+
+ total, err := project.GetTotalOfProjectMembers(projectID, role)
+ if err != nil {
+ log.Debugf("failed to get total of project members of role %d", role)
+ return
+ }
+
+ *count = total
+ }(e.role, e.count)
+ }
+
+ wg.Wait()
+}
diff --git a/src/core/api/project_test.go b/src/core/api/project_test.go
index 8f42ed35d..2c2d3d8fe 100644
--- a/src/core/api/project_test.go
+++ b/src/core/api/project_test.go
@@ -22,7 +22,7 @@ import (
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
- "github.com/goharbor/harbor/tests/apitests/apilib"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -30,6 +30,42 @@ import (
var addProject *apilib.ProjectReq
var addPID int
+func addProjectByName(apiTest *testapi, projectName string) (int32, error) {
+ req := apilib.ProjectReq{ProjectName: projectName}
+ code, err := apiTest.ProjectsPost(*admin, req)
+ if err != nil {
+ return 0, err
+ }
+ if code != http.StatusCreated {
+ return 0, fmt.Errorf("created failed")
+ }
+
+ code, projects, err := apiTest.ProjectsGet(&apilib.ProjectQuery{Name: projectName}, *admin)
+ if err != nil {
+ return 0, err
+ }
+ if code != http.StatusOK {
+ return 0, fmt.Errorf("get failed")
+ }
+
+ if len(projects) == 0 {
+ return 0, fmt.Errorf("oops")
+ }
+
+ return projects[0].ProjectId, nil
+}
+
+func deleteProjectByIDs(apiTest *testapi, projectIDs ...int32) error {
+ for _, projectID := range projectIDs {
+ _, err := apiTest.ProjectsDelete(*admin, fmt.Sprintf("%d", projectID))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func InitAddPro() {
addProject = &apilib.ProjectReq{ProjectName: "add_project", Metadata: map[string]string{models.ProMetaPublic: "true"}}
}
@@ -90,6 +126,31 @@ func TestAddProject(t *testing.T) {
assert.Equal(int(400), result, "case 4 : response code = 400 : Project name is illegal in length ")
}
+ // case 5: response code = 201 : expect project creation with quota success.
+ fmt.Println("case 5 : response code = 201 : expect project creation with quota success ")
+
+ var countLimit, storageLimit int64
+ countLimit, storageLimit = 100, 10
+ result, err = apiTest.ProjectsPost(*admin, apilib.ProjectReq{ProjectName: "with_quota", CountLimit: &countLimit, StorageLimit: &storageLimit})
+ if err != nil {
+ t.Error("Error while creat project", err.Error())
+ t.Log(err)
+ } else {
+ assert.Equal(int(201), result, "case 5 : response code = 201 : expect project creation with quota success ")
+ }
+
+ // case 6: response code = 400 : bad quota value, create project fail
+ fmt.Println("case 6: response code = 400 : bad quota value, create project fail")
+
+ countLimit, storageLimit = 100, -2
+ result, err = apiTest.ProjectsPost(*admin, apilib.ProjectReq{ProjectName: "with_quota", CountLimit: &countLimit, StorageLimit: &storageLimit})
+ if err != nil {
+ t.Error("Error while creat project", err.Error())
+ t.Log(err)
+ } else {
+ assert.Equal(int(400), result, "case 6: response code = 400 : bad quota value, create project fail")
+ }
+
fmt.Printf("\n")
}
@@ -111,7 +172,7 @@ func TestListProjects(t *testing.T) {
}()
// ----------------------------case 1 : Response Code=200----------------------------//
- fmt.Println("case 1: respose code:200")
+ fmt.Println("case 1: response code:200")
httpStatusCode, result, err := apiTest.ProjectsGet(
&apilib.ProjectQuery{
Name: addProject.ProjectName,
@@ -202,7 +263,7 @@ func TestProGetByID(t *testing.T) {
}()
// ----------------------------case 1 : Response Code=200----------------------------//
- fmt.Println("case 1: respose code:200")
+ fmt.Println("case 1: response code:200")
httpStatusCode, result, err := apiTest.ProjectsGetByPID(projectID)
if err != nil {
t.Error("Error while search project by proID", err.Error())
@@ -230,17 +291,17 @@ func TestDeleteProject(t *testing.T) {
t.Error("Error while delete project", err.Error())
t.Log(err)
} else {
- assert.Equal(int(401), httpStatusCode, "Case 1: Project creation status should be 401")
+ assert.Equal(int(401), httpStatusCode, "Case 1: Project deletion status should be 401")
}
// --------------------------case 2: Response Code=200---------------------------------//
- fmt.Println("case2: respose code:200")
+ fmt.Println("case2: response code:200")
httpStatusCode, err = apiTest.ProjectsDelete(*admin, projectID)
if err != nil {
t.Error("Error while delete project", err.Error())
t.Log(err)
} else {
- assert.Equal(int(200), httpStatusCode, "Case 2: Project creation status should be 200")
+ assert.Equal(int(200), httpStatusCode, "Case 2: Project deletion status should be 200")
}
// --------------------------case 3: Response Code=404,Project does not exist---------------------------------//
@@ -251,7 +312,7 @@ func TestDeleteProject(t *testing.T) {
t.Error("Error while delete project", err.Error())
t.Log(err)
} else {
- assert.Equal(int(404), httpStatusCode, "Case 3: Project creation status should be 404")
+ assert.Equal(int(404), httpStatusCode, "Case 3: Project deletion status should be 404")
}
// --------------------------case 4: Response Code=400,Invalid project id.---------------------------------//
@@ -262,7 +323,7 @@ func TestDeleteProject(t *testing.T) {
t.Error("Error while delete project", err.Error())
t.Log(err)
} else {
- assert.Equal(int(400), httpStatusCode, "Case 4: Project creation status should be 400")
+ assert.Equal(int(400), httpStatusCode, "Case 4: Project deletion status should be 400")
}
fmt.Printf("\n")
@@ -274,7 +335,7 @@ func TestProHead(t *testing.T) {
apiTest := newHarborAPI()
// ----------------------------case 1 : Response Code=200----------------------------//
- fmt.Println("case 1: respose code:200")
+ fmt.Println("case 1: response code:200")
httpStatusCode, err := apiTest.ProjectsHead(*admin, "library")
if err != nil {
t.Error("Error while search project by proName", err.Error())
@@ -284,7 +345,7 @@ func TestProHead(t *testing.T) {
}
// ----------------------------case 2 : Response Code=404:Project name does not exist.----------------------------//
- fmt.Println("case 2: respose code:404,Project name does not exist.")
+ fmt.Println("case 2: response code:404,Project name does not exist.")
httpStatusCode, err = apiTest.ProjectsHead(*admin, "libra")
if err != nil {
t.Error("Error while search project by proName", err.Error())
@@ -308,22 +369,22 @@ func TestPut(t *testing.T) {
},
}
- fmt.Println("case 1: respose code:200")
+ fmt.Println("case 1: response code:200")
code, err := apiTest.ProjectsPut(*admin, "1", project)
require.Nil(t, err)
assert.Equal(int(200), code)
- fmt.Println("case 2: respose code:401, User need to log in first.")
+ fmt.Println("case 2: response code:401, User need to log in first.")
code, err = apiTest.ProjectsPut(*unknownUsr, "1", project)
require.Nil(t, err)
assert.Equal(int(401), code)
- fmt.Println("case 3: respose code:400, Invalid project id")
+ fmt.Println("case 3: response code:400, Invalid project id")
code, err = apiTest.ProjectsPut(*admin, "cc", project)
require.Nil(t, err)
assert.Equal(int(400), code)
- fmt.Println("case 4: respose code:404, Not found the project")
+ fmt.Println("case 4: response code:404, Not found the project")
code, err = apiTest.ProjectsPut(*admin, "1234", project)
require.Nil(t, err)
assert.Equal(int(404), code)
@@ -346,7 +407,7 @@ func TestProjectLogsFilter(t *testing.T) {
}
// -------------------case1: Response Code=200------------------------------//
- fmt.Println("case 1: respose code:200")
+ fmt.Println("case 1: response code:200")
projectID := "1"
httpStatusCode, _, err := apiTest.ProjectLogs(*admin, projectID, query)
if err != nil {
@@ -356,7 +417,7 @@ func TestProjectLogsFilter(t *testing.T) {
assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200")
}
// -------------------case2: Response Code=401:User need to log in first.------------------------------//
- fmt.Println("case 2: respose code:401:User need to log in first.")
+ fmt.Println("case 2: response code:401:User need to log in first.")
projectID = "1"
httpStatusCode, _, err = apiTest.ProjectLogs(*unknownUsr, projectID, query)
if err != nil {
@@ -366,7 +427,7 @@ func TestProjectLogsFilter(t *testing.T) {
assert.Equal(int(401), httpStatusCode, "httpStatusCode should be 401")
}
// -------------------case3: Response Code=404:Project does not exist.-------------------------//
- fmt.Println("case 3: respose code:404:Illegal format of provided ID value.")
+ fmt.Println("case 3: response code:404:Illegal format of provided ID value.")
projectID = "11111"
httpStatusCode, _, err = apiTest.ProjectLogs(*admin, projectID, query)
if err != nil {
@@ -423,3 +484,30 @@ func TestDeletable(t *testing.T) {
assert.Equal(t, http.StatusOK, code)
assert.False(t, del)
}
+
+func TestProjectSummary(t *testing.T) {
+ fmt.Println("\nTest for Project Summary API")
+ assert := assert.New(t)
+
+ apiTest := newHarborAPI()
+
+ projectID, err := addProjectByName(apiTest, "project-summary")
+ assert.Nil(err)
+ defer func() {
+ deleteProjectByIDs(apiTest, projectID)
+ }()
+
+ // ----------------------------case 1 : Response Code=200----------------------------//
+ fmt.Println("case 1: response code:200")
+ httpStatusCode, summary, err := apiTest.ProjectSummary(*admin, fmt.Sprintf("%d", projectID))
+ if err != nil {
+ t.Error("Error while search project by proName", err.Error())
+ t.Log(err)
+ } else {
+ assert.Equal(int(200), httpStatusCode, "httpStatusCode should be 200")
+ assert.Equal(int64(1), summary.ProjectAdminCount)
+ assert.Equal(map[string]int64{"count": -1, "storage": -1}, summary.Quota.Hard)
+ }
+
+ fmt.Printf("\n")
+}
diff --git a/src/core/api/projectmember.go b/src/core/api/projectmember.go
index 0ede80dd4..5495ac26a 100644
--- a/src/core/api/projectmember.go
+++ b/src/core/api/projectmember.go
@@ -23,11 +23,13 @@ import (
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/dao/project"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/auth"
+ "github.com/goharbor/harbor/src/core/config"
)
// ProjectMemberAPI handles request to /api/projects/{}/members/{}
@@ -37,6 +39,7 @@ type ProjectMemberAPI struct {
entityID int
entityType string
project *models.Project
+ groupType int
}
// ErrDuplicateProjectMember ...
@@ -84,6 +87,15 @@ func (pma *ProjectMemberAPI) Prepare() {
return
}
pma.id = int(pmid)
+ authMode, err := config.AuthMode()
+ if err != nil {
+ pma.SendInternalServerError(fmt.Errorf("failed to get authentication mode"))
+ }
+ if authMode == common.LDAPAuth {
+ pma.groupType = common.LDAPGroupType
+ } else if authMode == common.HTTPAuth {
+ pma.groupType = common.HTTPGroupType
+ }
}
func (pma *ProjectMemberAPI) requireAccess(action rbac.Action) bool {
@@ -131,7 +143,7 @@ func (pma *ProjectMemberAPI) Get() {
return
}
if len(memberList) == 0 {
- pma.SendNotFoundError(fmt.Errorf("The project member does not exit, pmid:%v", pma.id))
+ pma.SendNotFoundError(fmt.Errorf("The project member does not exist, pmid:%v", pma.id))
return
}
@@ -161,10 +173,10 @@ func (pma *ProjectMemberAPI) Post() {
pma.SendBadRequestError(fmt.Errorf("Failed to add project member, error: %v", err))
return
} else if err == auth.ErrDuplicateLDAPGroup {
- pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist LDAP group or project member, groupDN:%v", request.MemberGroup.LdapGroupDN))
+ pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist group or project member, groupDN:%v", request.MemberGroup.LdapGroupDN))
return
} else if err == ErrDuplicateProjectMember {
- pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist LDAP group or project member, groupMemberID:%v", request.MemberGroup.ID))
+ pma.SendConflictError(fmt.Errorf("Failed to add project member, already exist group or project member, groupMemberID:%v", request.MemberGroup.ID))
return
} else if err == ErrInvalidRole {
pma.SendBadRequestError(fmt.Errorf("Invalid role ID, role ID %v", request.Role))
@@ -220,12 +232,13 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) {
var member models.Member
member.ProjectID = projectID
member.Role = request.Role
+ member.EntityType = common.GroupMember
+
if request.MemberUser.UserID > 0 {
member.EntityID = request.MemberUser.UserID
member.EntityType = common.UserMember
} else if request.MemberGroup.ID > 0 {
member.EntityID = request.MemberGroup.ID
- member.EntityType = common.GroupMember
} else if len(request.MemberUser.Username) > 0 {
var userID int
member.EntityType = common.UserMember
@@ -243,14 +256,28 @@ func AddProjectMember(projectID int64, request models.MemberReq) (int, error) {
}
member.EntityID = userID
} else if len(request.MemberGroup.LdapGroupDN) > 0 {
-
+ request.MemberGroup.GroupType = common.LDAPGroupType
// If groupname provided, use the provided groupname to name this group
groupID, err := auth.SearchAndOnBoardGroup(request.MemberGroup.LdapGroupDN, request.MemberGroup.GroupName)
if err != nil {
return 0, err
}
member.EntityID = groupID
- member.EntityType = common.GroupMember
+ } else if len(request.MemberGroup.GroupName) > 0 && request.MemberGroup.GroupType == common.HTTPGroupType {
+ ugs, err := group.QueryUserGroup(models.UserGroup{GroupName: request.MemberGroup.GroupName, GroupType: common.HTTPGroupType})
+ if err != nil {
+ return 0, err
+ }
+ if len(ugs) == 0 {
+ groupID, err := auth.SearchAndOnBoardGroup(request.MemberGroup.GroupName, "")
+ if err != nil {
+ return 0, err
+ }
+ member.EntityID = groupID
+ } else {
+ member.EntityID = ugs[0].ID
+ }
+
}
if member.EntityID <= 0 {
return 0, fmt.Errorf("Can not get valid member entity, request: %+v", request)
diff --git a/src/core/api/projectmember_test.go b/src/core/api/projectmember_test.go
index 6cbef32ea..88e47851f 100644
--- a/src/core/api/projectmember_test.go
+++ b/src/core/api/projectmember_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/dao/project"
"github.com/goharbor/harbor/src/common/models"
)
@@ -52,6 +53,15 @@ func TestProjectMemberAPI_Get(t *testing.T) {
},
code: http.StatusBadRequest,
},
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: fmt.Sprintf("/api/projects/1/members/%d", projAdminPMID),
+ credential: admin,
+ },
+ code: http.StatusOK,
+ },
// 404
{
request: &testingRequest{
@@ -85,6 +95,21 @@ func TestProjectMemberAPI_Post(t *testing.T) {
t.Errorf("Error occurred when create user: %v", err)
}
+ ugList, err := group.QueryUserGroup(models.UserGroup{GroupType: 1, LdapGroupDN: "cn=harbor_users,ou=sample,ou=vmware,dc=harbor,dc=com"})
+ if err != nil {
+ t.Errorf("Failed to query the user group")
+ }
+ if len(ugList) <= 0 {
+ t.Errorf("Failed to query the user group")
+ }
+ httpUgList, err := group.QueryUserGroup(models.UserGroup{GroupType: 2, GroupName: "vsphere.local\\administrators"})
+ if err != nil {
+ t.Errorf("Failed to query the user group")
+ }
+ if len(httpUgList) <= 0 {
+ t.Errorf("Failed to query the user group")
+ }
+
cases := []*codeCheckingCase{
// 401
{
@@ -158,6 +183,66 @@ func TestProjectMemberAPI_Post(t *testing.T) {
},
code: http.StatusOK,
},
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/members",
+ credential: admin,
+ bodyJSON: &models.MemberReq{
+ Role: 1,
+ MemberGroup: models.UserGroup{
+ GroupType: 1,
+ LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
+ },
+ },
+ },
+ code: http.StatusBadRequest,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/members",
+ credential: admin,
+ bodyJSON: &models.MemberReq{
+ Role: 1,
+ MemberGroup: models.UserGroup{
+ GroupType: 2,
+ ID: httpUgList[0].ID,
+ },
+ },
+ },
+ code: http.StatusCreated,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/members",
+ credential: admin,
+ bodyJSON: &models.MemberReq{
+ Role: 1,
+ MemberGroup: models.UserGroup{
+ GroupType: 1,
+ ID: ugList[0].ID,
+ },
+ },
+ },
+ code: http.StatusCreated,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/projects/1/members",
+ credential: admin,
+ bodyJSON: &models.MemberReq{
+ Role: 1,
+ MemberGroup: models.UserGroup{
+ GroupType: 2,
+ GroupName: "vsphere.local/users",
+ },
+ },
+ },
+ code: http.StatusBadRequest,
+ },
}
runCodeCheckingCases(t, cases...)
}
diff --git a/src/core/api/quota.go b/src/core/api/quota.go
new file mode 100644
index 000000000..eb55a6df3
--- /dev/null
+++ b/src/core/api/quota.go
@@ -0,0 +1,155 @@
+// Copyright 2018 Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota"
+ "github.com/pkg/errors"
+)
+
+// QuotaAPI handles request to /api/quotas/
+type QuotaAPI struct {
+ BaseController
+ quota *models.Quota
+}
+
+// Prepare validates the URL and the user
+func (qa *QuotaAPI) Prepare() {
+ qa.BaseController.Prepare()
+
+ if !qa.SecurityCtx.IsAuthenticated() {
+ qa.SendUnAuthorizedError(errors.New("Unauthorized"))
+ return
+ }
+
+ if !qa.SecurityCtx.IsSysAdmin() {
+ qa.SendForbiddenError(errors.New(qa.SecurityCtx.GetUsername()))
+ return
+ }
+
+ if len(qa.GetStringFromPath(":id")) != 0 {
+ id, err := qa.GetInt64FromPath(":id")
+ if err != nil || id <= 0 {
+ text := "invalid quota ID: "
+ if err != nil {
+ text += err.Error()
+ } else {
+ text += fmt.Sprintf("%d", id)
+ }
+ qa.SendBadRequestError(errors.New(text))
+ return
+ }
+
+ quota, err := dao.GetQuota(id)
+ if err != nil {
+ qa.SendInternalServerError(fmt.Errorf("failed to get quota %d, error: %v", id, err))
+ return
+ }
+
+ if quota == nil {
+ qa.SendNotFoundError(fmt.Errorf("quota %d not found", id))
+ return
+ }
+
+ qa.quota = quota
+ }
+}
+
+// Get returns quota by id
+func (qa *QuotaAPI) Get() {
+ query := &models.QuotaQuery{
+ ID: qa.quota.ID,
+ }
+
+ quotas, err := dao.ListQuotas(query)
+ if err != nil {
+ qa.SendInternalServerError(fmt.Errorf("failed to get quota %d, error: %v", qa.quota.ID, err))
+ return
+ }
+
+ if len(quotas) == 0 {
+ qa.SendNotFoundError(fmt.Errorf("quota %d not found", qa.quota.ID))
+ return
+ }
+
+ qa.Data["json"] = quotas[0]
+ qa.ServeJSON()
+}
+
+// Put update the quota
+func (qa *QuotaAPI) Put() {
+ var req *models.QuotaUpdateRequest
+ if err := qa.DecodeJSONReq(&req); err != nil {
+ qa.SendBadRequestError(err)
+ return
+ }
+
+ if err := quota.Validate(qa.quota.Reference, req.Hard); err != nil {
+ qa.SendBadRequestError(err)
+ return
+ }
+
+ mgr, err := quota.NewManager(qa.quota.Reference, qa.quota.ReferenceID)
+ if err != nil {
+ qa.SendInternalServerError(fmt.Errorf("failed to create quota manager, error: %v", err))
+ return
+ }
+
+ if err := mgr.UpdateQuota(req.Hard); err != nil {
+ qa.SendInternalServerError(fmt.Errorf("failed to update hard limits of the quota, error: %v", err))
+ return
+ }
+}
+
+// List returns quotas by query
+func (qa *QuotaAPI) List() {
+ page, size, err := qa.GetPaginationParams()
+ if err != nil {
+ qa.SendBadRequestError(err)
+ return
+ }
+
+ query := &models.QuotaQuery{
+ Reference: qa.GetString("reference"),
+ ReferenceID: qa.GetString("reference_id"),
+ Pagination: models.Pagination{
+ Page: page,
+ Size: size,
+ },
+ Sorting: models.Sorting{
+ Sort: qa.GetString("sort"),
+ },
+ }
+
+ total, err := dao.GetTotalOfQuotas(query)
+ if err != nil {
+ qa.SendInternalServerError(fmt.Errorf("failed to query database for total of quotas, error: %v", err))
+ return
+ }
+
+ quotas, err := dao.ListQuotas(query)
+ if err != nil {
+ qa.SendInternalServerError(fmt.Errorf("failed to query database for quotas, error: %v", err))
+ return
+ }
+
+ qa.SetPaginationHeader(total, page, size)
+ qa.Data["json"] = quotas
+ qa.ServeJSON()
+}
diff --git a/src/core/api/quota/chart/chart.go b/src/core/api/quota/chart/chart.go
new file mode 100644
index 000000000..f3ebc1f11
--- /dev/null
+++ b/src/core/api/quota/chart/chart.go
@@ -0,0 +1,226 @@
+// Copyright 2018 Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ common_quota "github.com/goharbor/harbor/src/common/quota"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/api"
+ quota "github.com/goharbor/harbor/src/core/api/quota"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/promgr"
+ "github.com/pkg/errors"
+ "net/url"
+ "strings"
+ "sync"
+)
+
+// Migrator ...
+type Migrator struct {
+ pm promgr.ProjectManager
+}
+
+// NewChartMigrator returns a new RegistryMigrator.
+func NewChartMigrator(pm promgr.ProjectManager) quota.QuotaMigrator {
+ migrator := Migrator{
+ pm: pm,
+ }
+ return &migrator
+}
+
+var (
+ controller *chartserver.Controller
+ controllerErr error
+ controllerOnce sync.Once
+)
+
+// Ping ...
+func (rm *Migrator) Ping() error {
+ return api.HealthCheckerRegistry["chartmuseum"].Check()
+}
+
+// Dump ...
+// Depends on DB to dump chart data, as chart cannot get all of namespaces.
+func (rm *Migrator) Dump() ([]quota.ProjectInfo, error) {
+ var (
+ projects []quota.ProjectInfo
+ wg sync.WaitGroup
+ err error
+ )
+
+ all, err := dao.GetProjects(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ wg.Add(len(all))
+ errChan := make(chan error, 1)
+ infoChan := make(chan interface{})
+ done := make(chan bool, 1)
+
+ go func() {
+ defer func() {
+ done <- true
+ }()
+
+ for {
+ select {
+ case result := <-infoChan:
+ if result == nil {
+ return
+ }
+ project, ok := result.(quota.ProjectInfo)
+ if ok {
+ projects = append(projects, project)
+ }
+
+ case e := <-errChan:
+ if err == nil {
+ err = errors.Wrap(e, "quota sync error on getting info of project")
+ } else {
+ err = errors.Wrap(e, err.Error())
+ }
+ }
+ }
+ }()
+
+ for _, project := range all {
+ go func(project *models.Project) {
+ defer wg.Done()
+
+ var repos []quota.RepoData
+ ctr, err := chartController()
+ if err != nil {
+ errChan <- err
+ return
+ }
+
+ chartInfo, err := ctr.ListCharts(project.Name)
+ if err != nil {
+ errChan <- err
+ return
+ }
+
+ // repo
+ for _, chart := range chartInfo {
+ var afs []*models.Artifact
+ chartVersions, err := ctr.GetChart(project.Name, chart.Name)
+ if err != nil {
+ errChan <- err
+ continue
+ }
+ for _, chart := range chartVersions {
+ af := &models.Artifact{
+ PID: project.ProjectID,
+ Repo: chart.Name,
+ Tag: chart.Version,
+ Digest: chart.Digest,
+ Kind: "Chart",
+ }
+ afs = append(afs, af)
+ }
+ repoData := quota.RepoData{
+ Name: project.Name,
+ Afs: afs,
+ }
+ repos = append(repos, repoData)
+ }
+
+ projectInfo := quota.ProjectInfo{
+ Name: project.Name,
+ Repos: repos,
+ }
+
+ infoChan <- projectInfo
+ }(project)
+ }
+
+ wg.Wait()
+ close(infoChan)
+
+ <-done
+
+ if err != nil {
+ return nil, err
+ }
+
+ return projects, nil
+}
+
+// Usage ...
+// Chart will not cover size.
+func (rm *Migrator) Usage(projects []quota.ProjectInfo) ([]quota.ProjectUsage, error) {
+ var pros []quota.ProjectUsage
+ for _, project := range projects {
+ var count int64
+ // usage count
+ for _, repo := range project.Repos {
+ count = count + int64(len(repo.Afs))
+ }
+ proUsage := quota.ProjectUsage{
+ Project: project.Name,
+ Used: common_quota.ResourceList{
+ common_quota.ResourceCount: count,
+ common_quota.ResourceStorage: 0,
+ },
+ }
+ pros = append(pros, proUsage)
+ }
+ return pros, nil
+
+}
+
+// Persist ...
+// Chart will not persist data into db.
+func (rm *Migrator) Persist(projects []quota.ProjectInfo) error {
+ return nil
+}
+
+func chartController() (*chartserver.Controller, error) {
+ controllerOnce.Do(func() {
+ addr, err := config.GetChartMuseumEndpoint()
+ if err != nil {
+ controllerErr = fmt.Errorf("failed to get the endpoint URL of chart storage server: %s", err.Error())
+ return
+ }
+
+ addr = strings.TrimSuffix(addr, "/")
+ url, err := url.Parse(addr)
+ if err != nil {
+ controllerErr = errors.New("endpoint URL of chart storage server is malformed")
+ return
+ }
+
+ ctr, err := chartserver.NewController(url)
+ if err != nil {
+ controllerErr = errors.New("failed to initialize chart API controller")
+ }
+
+ controller = ctr
+
+ log.Debugf("Chart storage server is set to %s", url.String())
+ log.Info("API controller for chart repository server is successfully initialized")
+ })
+
+ return controller, controllerErr
+}
+
+func init() {
+ quota.Register("chart", NewChartMigrator)
+}
diff --git a/src/core/api/quota/migrator.go b/src/core/api/quota/migrator.go
new file mode 100644
index 000000000..bfd2fc164
--- /dev/null
+++ b/src/core/api/quota/migrator.go
@@ -0,0 +1,173 @@
+// Copyright 2018 Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/promgr"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "strconv"
+)
+
+// QuotaMigrator ...
+type QuotaMigrator interface {
+ // Ping validates and wait for backend service ready.
+ Ping() error
+
+ // Dump exports all data from backend service, registry, chartmuseum
+ Dump() ([]ProjectInfo, error)
+
+ // Usage computes the quota usage of all the projects
+ Usage([]ProjectInfo) ([]ProjectUsage, error)
+
+ // Persist record the data to DB, artifact, artifact_blob and blob tabel.
+ Persist([]ProjectInfo) error
+}
+
+// ProjectInfo ...
+type ProjectInfo struct {
+ Name string
+ Repos []RepoData
+}
+
+// RepoData ...
+type RepoData struct {
+ Name string
+ Afs []*models.Artifact
+ Afnbs []*models.ArtifactAndBlob
+ Blobs []*models.Blob
+}
+
+// ProjectUsage ...
+type ProjectUsage struct {
+ Project string
+ Used quota.ResourceList
+}
+
+// Instance ...
+type Instance func(promgr.ProjectManager) QuotaMigrator
+
+var adapters = make(map[string]Instance)
+
+// Register ...
+func Register(name string, adapter Instance) {
+ if adapter == nil {
+ panic("quota: Register adapter is nil")
+ }
+ if _, ok := adapters[name]; ok {
+ panic("quota: Register called twice for adapter " + name)
+ }
+ adapters[name] = adapter
+}
+
+// Sync ...
+func Sync(pm promgr.ProjectManager, populate bool) error {
+ totalUsage := make(map[string][]ProjectUsage)
+ for name, instanceFunc := range adapters {
+ if !config.WithChartMuseum() {
+ if name == "chart" {
+ continue
+ }
+ }
+ adapter := instanceFunc(pm)
+ if err := adapter.Ping(); err != nil {
+ return err
+ }
+ data, err := adapter.Dump()
+ if err != nil {
+ return err
+ }
+ usage, err := adapter.Usage(data)
+ if err != nil {
+ return err
+ }
+ totalUsage[name] = usage
+ if populate {
+ if err := adapter.Persist(data); err != nil {
+ return err
+ }
+ }
+ }
+ merged := mergeUsage(totalUsage)
+ if err := ensureQuota(merged); err != nil {
+ return err
+ }
+ return nil
+}
+
+// mergeUsage merges the usage of adapters
+func mergeUsage(total map[string][]ProjectUsage) []ProjectUsage {
+ if !config.WithChartMuseum() {
+ return total["registry"]
+ }
+ regUsgs := total["registry"]
+ chartUsgs := total["chart"]
+
+ var mergedUsage []ProjectUsage
+ temp := make(map[string]quota.ResourceList)
+
+ for _, regUsg := range regUsgs {
+ _, exist := temp[regUsg.Project]
+ if !exist {
+ temp[regUsg.Project] = regUsg.Used
+ mergedUsage = append(mergedUsage, ProjectUsage{
+ Project: regUsg.Project,
+ Used: regUsg.Used,
+ })
+ }
+ }
+ for _, chartUsg := range chartUsgs {
+ var usedTemp quota.ResourceList
+ _, exist := temp[chartUsg.Project]
+ if !exist {
+ usedTemp = chartUsg.Used
+ } else {
+ usedTemp = types.Add(temp[chartUsg.Project], chartUsg.Used)
+ }
+ temp[chartUsg.Project] = usedTemp
+ mergedUsage = append(mergedUsage, ProjectUsage{
+ Project: chartUsg.Project,
+ Used: usedTemp,
+ })
+ }
+ return mergedUsage
+}
+
+// ensureQuota updates the quota and quota usage in the data base.
+func ensureQuota(usages []ProjectUsage) error {
+ var pid int64
+ for _, usage := range usages {
+ project, err := dao.GetProjectByName(usage.Project)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ pid = project.ProjectID
+ quotaMgr, err := quota.NewManager("project", strconv.FormatInt(pid, 10))
+ if err != nil {
+ log.Errorf("Error occurred when to new quota manager %v", err)
+ return err
+ }
+ if err := quotaMgr.EnsureQuota(usage.Used); err != nil {
+ log.Errorf("cannot ensure quota for the project: %d, err: %v", pid, err)
+ return err
+ }
+ }
+ return nil
+}
diff --git a/src/core/api/quota/registry/registry.go b/src/core/api/quota/registry/registry.go
new file mode 100644
index 000000000..18a7f87c0
--- /dev/null
+++ b/src/core/api/quota/registry/registry.go
@@ -0,0 +1,433 @@
+// Copyright 2018 Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registry
+
+import (
+ "github.com/docker/distribution/manifest/schema1"
+ "github.com/docker/distribution/manifest/schema2"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ common_quota "github.com/goharbor/harbor/src/common/quota"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/registry"
+ "github.com/goharbor/harbor/src/core/api"
+ quota "github.com/goharbor/harbor/src/core/api/quota"
+ "github.com/goharbor/harbor/src/core/promgr"
+ coreutils "github.com/goharbor/harbor/src/core/utils"
+ "github.com/pkg/errors"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Migrator ...
+type Migrator struct {
+ pm promgr.ProjectManager
+}
+
+// NewRegistryMigrator returns a new Migrator.
+func NewRegistryMigrator(pm promgr.ProjectManager) quota.QuotaMigrator {
+ migrator := Migrator{
+ pm: pm,
+ }
+ return &migrator
+}
+
+// Ping ...
+func (rm *Migrator) Ping() error {
+ return api.HealthCheckerRegistry["registry"].Check()
+}
+
+// Dump ...
+func (rm *Migrator) Dump() ([]quota.ProjectInfo, error) {
+ var (
+ projects []quota.ProjectInfo
+ wg sync.WaitGroup
+ err error
+ )
+
+ reposInRegistry, err := api.Catalog()
+ if err != nil {
+ return nil, err
+ }
+
+ // repoMap : map[project_name : []repo list]
+ repoMap := make(map[string][]string)
+ for _, item := range reposInRegistry {
+ projectName := strings.Split(item, "/")[0]
+ pro, err := rm.pm.Get(projectName)
+ if err != nil {
+ log.Errorf("failed to get project %s: %v", projectName, err)
+ continue
+ }
+ _, exist := repoMap[pro.Name]
+ if !exist {
+ repoMap[pro.Name] = []string{item}
+ } else {
+ repos := repoMap[pro.Name]
+ repos = append(repos, item)
+ repoMap[pro.Name] = repos
+ }
+ }
+
+ wg.Add(len(repoMap))
+ errChan := make(chan error, 1)
+ infoChan := make(chan interface{})
+ done := make(chan bool, 1)
+
+ go func() {
+ defer func() {
+ done <- true
+ }()
+
+ for {
+ select {
+ case result := <-infoChan:
+ if result == nil {
+ return
+ }
+ project, ok := result.(quota.ProjectInfo)
+ if ok {
+ projects = append(projects, project)
+ }
+
+ case e := <-errChan:
+ if err == nil {
+ err = errors.Wrap(e, "quota sync error on getting info of project")
+ } else {
+ err = errors.Wrap(e, err.Error())
+ }
+ }
+ }
+ }()
+
+ for project, repos := range repoMap {
+ go func(project string, repos []string) {
+ defer wg.Done()
+ info, err := infoOfProject(project, repos)
+ if err != nil {
+ errChan <- err
+ return
+ }
+ infoChan <- info
+ }(project, repos)
+ }
+
+ wg.Wait()
+ close(infoChan)
+
+ // wait for all of project info
+ <-done
+
+ if err != nil {
+ return nil, err
+ }
+
+ return projects, nil
+}
+
+// Usage ...
+// registry needs to merge the shard blobs of different repositories.
+func (rm *Migrator) Usage(projects []quota.ProjectInfo) ([]quota.ProjectUsage, error) {
+ var pros []quota.ProjectUsage
+
+ for _, project := range projects {
+ var size, count int64
+ var blobs = make(map[string]int64)
+
+ // usage count
+ for _, repo := range project.Repos {
+ count = count + int64(len(repo.Afs))
+ // Because that there are some shared blobs between repositories, it needs to remove the duplicate items.
+ for _, blob := range repo.Blobs {
+ _, exist := blobs[blob.Digest]
+ if !exist {
+ blobs[blob.Digest] = blob.Size
+ }
+ }
+ }
+ // size
+ for _, item := range blobs {
+ size = size + item
+ }
+
+ proUsage := quota.ProjectUsage{
+ Project: project.Name,
+ Used: common_quota.ResourceList{
+ common_quota.ResourceCount: count,
+ common_quota.ResourceStorage: size,
+ },
+ }
+ pros = append(pros, proUsage)
+ }
+
+ return pros, nil
+}
+
+// Persist ...
+func (rm *Migrator) Persist(projects []quota.ProjectInfo) error {
+ for _, project := range projects {
+ for _, repo := range project.Repos {
+ if err := persistAf(repo.Afs); err != nil {
+ return err
+ }
+ if err := persistAfnbs(repo.Afnbs); err != nil {
+ return err
+ }
+ if err := persistBlob(repo.Blobs); err != nil {
+ return err
+ }
+ }
+ }
+ if err := persistPB(projects); err != nil {
+ return err
+ }
+ return nil
+}
+
+func persistAf(afs []*models.Artifact) error {
+ if len(afs) != 0 {
+ for _, af := range afs {
+ _, err := dao.AddArtifact(af)
+ if err != nil {
+ if err == dao.ErrDupRows {
+ continue
+ }
+ log.Error(err)
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func persistAfnbs(afnbs []*models.ArtifactAndBlob) error {
+ if len(afnbs) != 0 {
+ for _, afnb := range afnbs {
+ _, err := dao.AddArtifactNBlob(afnb)
+ if err != nil {
+ if err == dao.ErrDupRows {
+ continue
+ }
+ log.Error(err)
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func persistBlob(blobs []*models.Blob) error {
+ if len(blobs) != 0 {
+ for _, blob := range blobs {
+ _, err := dao.AddBlob(blob)
+ if err != nil {
+ if err == dao.ErrDupRows {
+ continue
+ }
+ log.Error(err)
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func persistPB(projects []quota.ProjectInfo) error {
+ for _, project := range projects {
+ var blobs = make(map[string]int64)
+ var blobsOfPro []*models.Blob
+ for _, repo := range project.Repos {
+ for _, blob := range repo.Blobs {
+ _, exist := blobs[blob.Digest]
+ if exist {
+ continue
+ }
+ blobs[blob.Digest] = blob.Size
+ blobInDB, err := dao.GetBlob(blob.Digest)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ if blobInDB != nil {
+ blobsOfPro = append(blobsOfPro, blobInDB)
+ }
+ }
+ }
+ pro, err := dao.GetProjectByName(project.Name)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ _, err = dao.AddBlobsToProject(pro.ProjectID, blobsOfPro...)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ }
+ return nil
+}
+
+func infoOfProject(project string, repoList []string) (quota.ProjectInfo, error) {
+ var (
+ repos []quota.RepoData
+ wg sync.WaitGroup
+ err error
+ )
+ wg.Add(len(repoList))
+
+ errChan := make(chan error, 1)
+ infoChan := make(chan interface{})
+ done := make(chan bool, 1)
+
+ pro, err := dao.GetProjectByName(project)
+ if err != nil {
+ log.Error(err)
+ return quota.ProjectInfo{}, err
+ }
+
+ go func() {
+ defer func() {
+ done <- true
+ }()
+
+ for {
+ select {
+ case result := <-infoChan:
+ if result == nil {
+ return
+ }
+ repoData, ok := result.(quota.RepoData)
+ if ok {
+ repos = append(repos, repoData)
+ }
+
+ case e := <-errChan:
+ if err == nil {
+ err = errors.Wrap(e, "quota sync error on getting info of repo")
+ } else {
+ err = errors.Wrap(e, err.Error())
+ }
+ }
+ }
+ }()
+
+ for _, repo := range repoList {
+ go func(pid int64, repo string) {
+ defer func() {
+ wg.Done()
+ }()
+ info, err := infoOfRepo(pid, repo)
+ if err != nil {
+ errChan <- err
+ return
+ }
+ infoChan <- info
+ }(pro.ProjectID, repo)
+ }
+
+ wg.Wait()
+ close(infoChan)
+
+ <-done
+
+ if err != nil {
+ return quota.ProjectInfo{}, err
+ }
+
+ return quota.ProjectInfo{
+ Name: project,
+ Repos: repos,
+ }, nil
+}
+
+func infoOfRepo(pid int64, repo string) (quota.RepoData, error) {
+ repoClient, err := coreutils.NewRepositoryClientForUI("harbor-core", repo)
+ if err != nil {
+ return quota.RepoData{}, err
+ }
+ tags, err := repoClient.ListTag()
+ if err != nil {
+ return quota.RepoData{}, err
+ }
+ var afnbs []*models.ArtifactAndBlob
+ var afs []*models.Artifact
+ var blobs []*models.Blob
+
+ for _, tag := range tags {
+ _, mediaType, payload, err := repoClient.PullManifest(tag, []string{
+ schema1.MediaTypeManifest,
+ schema1.MediaTypeSignedManifest,
+ schema2.MediaTypeManifest,
+ })
+ if err != nil {
+ log.Error(err)
+ return quota.RepoData{}, err
+ }
+ manifest, desc, err := registry.UnMarshal(mediaType, payload)
+ if err != nil {
+ log.Error(err)
+ return quota.RepoData{}, err
+ }
+ // self
+ afnb := &models.ArtifactAndBlob{
+ DigestAF: desc.Digest.String(),
+ DigestBlob: desc.Digest.String(),
+ }
+ afnbs = append(afnbs, afnb)
+ // add manifest as a blob.
+ blob := &models.Blob{
+ Digest: desc.Digest.String(),
+ ContentType: desc.MediaType,
+ Size: desc.Size,
+ CreationTime: time.Now(),
+ }
+ blobs = append(blobs, blob)
+ for _, layer := range manifest.References() {
+ afnb := &models.ArtifactAndBlob{
+ DigestAF: desc.Digest.String(),
+ DigestBlob: layer.Digest.String(),
+ }
+ afnbs = append(afnbs, afnb)
+ blob := &models.Blob{
+ Digest: layer.Digest.String(),
+ ContentType: layer.MediaType,
+ Size: layer.Size,
+ CreationTime: time.Now(),
+ }
+ blobs = append(blobs, blob)
+ }
+ af := &models.Artifact{
+ PID: pid,
+ Repo: strings.Split(repo, "/")[1],
+ Tag: tag,
+ Digest: desc.Digest.String(),
+ Kind: "Docker-Image",
+ CreationTime: time.Now(),
+ }
+ afs = append(afs, af)
+ }
+ return quota.RepoData{
+ Name: repo,
+ Afs: afs,
+ Afnbs: afnbs,
+ Blobs: blobs,
+ }, nil
+}
+
+func init() {
+ quota.Register("registry", NewRegistryMigrator)
+}
diff --git a/src/core/api/quota_test.go b/src/core/api/quota_test.go
new file mode 100644
index 000000000..ddda51457
--- /dev/null
+++ b/src/core/api/quota_test.go
@@ -0,0 +1,133 @@
+// Copyright 2018 Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota"
+ "github.com/goharbor/harbor/src/common/quota/driver"
+ "github.com/goharbor/harbor/src/common/quota/driver/mocks"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/mock"
+)
+
+var (
+ reference = "mock"
+ hardLimits = types.ResourceList{types.ResourceCount: -1, types.ResourceStorage: -1}
+)
+
+func init() {
+ mockDriver := &mocks.Driver{}
+
+ mockHardLimitsFn := func() types.ResourceList {
+ return hardLimits
+ }
+
+ mockLoadFn := func(key string) driver.RefObject {
+ return driver.RefObject{"id": key}
+ }
+
+ mockValidateFn := func(hardLimits types.ResourceList) error {
+ if len(hardLimits) == 0 {
+ return fmt.Errorf("no resources found")
+ }
+
+ return nil
+ }
+
+ mockDriver.On("HardLimits").Return(mockHardLimitsFn)
+ mockDriver.On("Load", mock.AnythingOfType("string")).Return(mockLoadFn, nil)
+ mockDriver.On("Validate", mock.AnythingOfType("types.ResourceList")).Return(mockValidateFn)
+
+ driver.Register(reference, mockDriver)
+}
+
+func TestQuotaAPIList(t *testing.T) {
+ assert := assert.New(t)
+ apiTest := newHarborAPI()
+
+ count := 10
+ for i := 0; i < count; i++ {
+ mgr, err := quota.NewManager(reference, fmt.Sprintf("%d", i))
+ assert.Nil(err)
+
+ _, err = mgr.NewQuota(hardLimits)
+ assert.Nil(err)
+ }
+
+ code, quotas, err := apiTest.QuotasGet(&apilib.QuotaQuery{Reference: reference}, *admin)
+ assert.Nil(err)
+ assert.Equal(int(200), code)
+ assert.Len(quotas, count, fmt.Sprintf("quotas len should be %d", count))
+
+ code, quotas, err = apiTest.QuotasGet(&apilib.QuotaQuery{Reference: reference, PageSize: 1}, *admin)
+ assert.Nil(err)
+ assert.Equal(int(200), code)
+ assert.Len(quotas, 1)
+}
+
+func TestQuotaAPIGet(t *testing.T) {
+ assert := assert.New(t)
+ apiTest := newHarborAPI()
+
+ mgr, err := quota.NewManager(reference, "quota-get")
+ assert.Nil(err)
+
+ quotaID, err := mgr.NewQuota(hardLimits)
+ assert.Nil(err)
+
+ code, quota, err := apiTest.QuotasGetByID(*admin, fmt.Sprintf("%d", quotaID))
+ assert.Nil(err)
+ assert.Equal(int(200), code)
+ assert.Equal(map[string]int64{"storage": -1, "count": -1}, quota.Hard)
+
+ code, _, err = apiTest.QuotasGetByID(*admin, "100")
+ assert.Nil(err)
+ assert.Equal(int(404), code)
+}
+
+func TestQuotaPut(t *testing.T) {
+ assert := assert.New(t)
+ apiTest := newHarborAPI()
+
+ mgr, err := quota.NewManager(reference, "quota-put")
+ assert.Nil(err)
+
+ quotaID, err := mgr.NewQuota(hardLimits)
+ assert.Nil(err)
+
+ code, quota, err := apiTest.QuotasGetByID(*admin, fmt.Sprintf("%d", quotaID))
+ assert.Nil(err)
+ assert.Equal(int(200), code)
+ assert.Equal(map[string]int64{"count": -1, "storage": -1}, quota.Hard)
+
+ code, err = apiTest.QuotasPut(*admin, fmt.Sprintf("%d", quotaID), models.QuotaUpdateRequest{})
+ assert.Nil(err, err)
+ assert.Equal(int(400), code)
+
+ code, err = apiTest.QuotasPut(*admin, fmt.Sprintf("%d", quotaID), models.QuotaUpdateRequest{Hard: types.ResourceList{types.ResourceCount: 100, types.ResourceStorage: 100}})
+ assert.Nil(err)
+ assert.Equal(int(200), code)
+
+ code, quota, err = apiTest.QuotasGetByID(*admin, fmt.Sprintf("%d", quotaID))
+ assert.Nil(err)
+ assert.Equal(int(200), code)
+ assert.Equal(map[string]int64{"count": 100, "storage": 100}, quota.Hard)
+}
diff --git a/src/core/api/reg_gc_test.go b/src/core/api/reg_gc_test.go
index a8e13891c..f9a81601f 100644
--- a/src/core/api/reg_gc_test.go
+++ b/src/core/api/reg_gc_test.go
@@ -3,7 +3,7 @@ package api
import (
"testing"
- "github.com/goharbor/harbor/tests/apitests/apilib"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
"github.com/stretchr/testify/assert"
)
diff --git a/src/core/api/registry.go b/src/core/api/registry.go
index 452d50e72..3885e11a0 100644
--- a/src/core/api/registry.go
+++ b/src/core/api/registry.go
@@ -49,6 +49,7 @@ func (t *RegistryAPI) Ping() {
ID *int64 `json:"id"`
Type *string `json:"type"`
URL *string `json:"url"`
+ Region *string `json:"region"`
CredentialType *string `json:"credential_type"`
AccessKey *string `json:"access_key"`
AccessSecret *string `json:"access_secret"`
diff --git a/src/core/api/repository.go b/src/core/api/repository.go
old mode 100644
new mode 100755
index 12659b48c..46c6976f9
--- a/src/core/api/repository.go
+++ b/src/core/api/repository.go
@@ -16,6 +16,7 @@ package api
import (
"encoding/json"
+ "errors"
"fmt"
"io/ioutil"
"net/http"
@@ -24,7 +25,6 @@ import (
"strings"
"time"
- "errors"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
"github.com/goharbor/harbor/src/common"
@@ -33,12 +33,14 @@ import (
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/rbac"
"github.com/goharbor/harbor/src/common/utils"
- "github.com/goharbor/harbor/src/common/utils/clair"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/common/utils/notary"
+ notarymodel "github.com/goharbor/harbor/src/common/utils/notary/model"
"github.com/goharbor/harbor/src/common/utils/registry"
"github.com/goharbor/harbor/src/core/config"
+ notifierEvt "github.com/goharbor/harbor/src/core/notifier/event"
coreutils "github.com/goharbor/harbor/src/core/utils"
+ "github.com/goharbor/harbor/src/pkg/scan"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/replication/event"
"github.com/goharbor/harbor/src/replication/model"
@@ -78,30 +80,6 @@ func (r reposSorter) Less(i, j int) bool {
return r[i].Index < r[j].Index
}
-type tagDetail struct {
- Digest string `json:"digest"`
- Name string `json:"name"`
- Size int64 `json:"size"`
- Architecture string `json:"architecture"`
- OS string `json:"os"`
- OSVersion string `json:"os.version"`
- DockerVersion string `json:"docker_version"`
- Author string `json:"author"`
- Created time.Time `json:"created"`
- Config *cfg `json:"config"`
-}
-
-type cfg struct {
- Labels map[string]string `json:"labels"`
-}
-
-type tagResp struct {
- tagDetail
- Signature *notary.Target `json:"signature"`
- ScanOverview *models.ImgScanOverview `json:"scan_overview,omitempty"`
- Labels []*models.Label `json:"labels"`
-}
-
type manifestResp struct {
Manifest interface{} `json:"manifest"`
Config interface{} `json:"config,omitempty" `
@@ -261,7 +239,7 @@ func (ra *RepositoryAPI) Delete() {
return
}
- rc, err := coreutils.NewRepositoryClientForUI(ra.SecurityCtx.GetUsername(), repoName)
+ rc, err := coreutils.NewRepositoryClientForLocal(ra.SecurityCtx.GetUsername(), repoName)
if err != nil {
log.Errorf("error occurred while initializing repository client for %s: %v", repoName, err)
ra.SendInternalServerError(errors.New("internal error"))
@@ -331,7 +309,7 @@ func (ra *RepositoryAPI) Delete() {
go func(tag string) {
e := &event.Event{
- Type: event.EventTypeImagePush,
+ Type: event.EventTypeImageDelete,
Resource: &model.Resource{
Type: model.ResourceTypeImage,
Metadata: &model.ResourceMetadata{
@@ -362,6 +340,24 @@ func (ra *RepositoryAPI) Delete() {
}(t)
}
+ // build and publish image delete event
+ evt := ¬ifierEvt.Event{}
+ imgDelMetadata := ¬ifierEvt.ImageDelMetaData{
+ Project: project,
+ Tags: tags,
+ RepoName: repoName,
+ OccurAt: time.Now(),
+ Operator: ra.SecurityCtx.GetUsername(),
+ }
+ if err := evt.Build(imgDelMetadata); err != nil {
+ // do not return when building event metadata failed
+ log.Errorf("failed to build image delete event metadata: %v", err)
+ }
+ if err := evt.Publish(); err != nil {
+ // do not return when publishing event failed
+ log.Errorf("failed to publish image delete event: %v", err)
+ }
+
exist, err := repositoryExist(repoName, rc)
if err != nil {
log.Errorf("failed to check the existence of repository %s: %v", repoName, err)
@@ -588,7 +584,12 @@ func (ra *RepositoryAPI) GetTags() {
}
labeledTags := map[string]struct{}{}
for _, rl := range rls {
- labeledTags[strings.Split(rl.ResourceName, ":")[1]] = struct{}{}
+ strs := strings.SplitN(rl.ResourceName, ":", 2)
+ // the "rls" may contain images which don't belong to the repository
+ if strs[0] != repoName {
+ continue
+ }
+ labeledTags[strs[1]] = struct{}{}
}
ts := []string{}
for _, tag := range tags {
@@ -599,32 +600,52 @@ func (ra *RepositoryAPI) GetTags() {
tags = ts
}
+ detail, err := ra.GetBool("detail", true)
+ if !detail && err == nil {
+ ra.Data["json"] = simpleTags(tags)
+ ra.ServeJSON()
+ return
+ }
+
ra.Data["json"] = assembleTagsInParallel(client, repoName, tags,
ra.SecurityCtx.GetUsername())
ra.ServeJSON()
}
+func simpleTags(tags []string) []*models.TagResp {
+ var tagsResp []*models.TagResp
+ for _, tag := range tags {
+ tagsResp = append(tagsResp, &models.TagResp{
+ TagDetail: models.TagDetail{
+ Name: tag,
+ },
+ })
+ }
+
+ return tagsResp
+}
+
// get config, signature and scan overview and assemble them into one
// struct for each tag in tags
func assembleTagsInParallel(client *registry.Repository, repository string,
- tags []string, username string) []*tagResp {
+ tags []string, username string) []*models.TagResp {
var err error
- signatures := map[string][]notary.Target{}
+ signatures := map[string][]notarymodel.Target{}
if config.WithNotary() {
signatures, err = getSignatures(username, repository)
if err != nil {
- signatures = map[string][]notary.Target{}
+ signatures = map[string][]notarymodel.Target{}
log.Errorf("failed to get signatures of %s: %v", repository, err)
}
}
- c := make(chan *tagResp)
+ c := make(chan *models.TagResp)
for _, tag := range tags {
go assembleTag(c, client, repository, tag, config.WithClair(),
config.WithNotary(), signatures)
}
- result := []*tagResp{}
- var item *tagResp
+ result := []*models.TagResp{}
+ var item *models.TagResp
for i := 0; i < len(tags); i++ {
item = <-c
if item == nil {
@@ -635,10 +656,10 @@ func assembleTagsInParallel(client *registry.Repository, repository string,
return result
}
-func assembleTag(c chan *tagResp, client *registry.Repository,
+func assembleTag(c chan *models.TagResp, client *registry.Repository,
repository, tag string, clairEnabled, notaryEnabled bool,
- signatures map[string][]notary.Target) {
- item := &tagResp{}
+ signatures map[string][]notarymodel.Target) {
+ item := &models.TagResp{}
// labels
image := fmt.Sprintf("%s:%s", repository, tag)
labels, err := dao.GetLabelsOfResource(common.ResourceTypeImage, image)
@@ -654,7 +675,7 @@ func assembleTag(c chan *tagResp, client *registry.Repository,
log.Errorf("failed to get v2 manifest of %s:%s: %v", repository, tag, err)
}
if tagDetail != nil {
- item.tagDetail = *tagDetail
+ item.TagDetail = *tagDetail
}
// scan overview
@@ -672,24 +693,41 @@ func assembleTag(c chan *tagResp, client *registry.Repository,
}
}
}
+
+ // pull/push time
+ artifact, err := dao.GetArtifact(repository, tag)
+ if err != nil {
+ log.Errorf("failed to get artifact %s:%s: %v", repository, tag, err)
+ } else {
+ if artifact == nil {
+ log.Warningf("artifact %s:%s not found", repository, tag)
+ } else {
+ item.PullTime = artifact.PullTime
+ item.PushTime = artifact.PushTime
+ }
+ }
+
c <- item
}
// getTagDetail returns the detail information for v2 manifest image
// The information contains architecture, os, author, size, etc.
-func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) {
- detail := &tagDetail{
+func getTagDetail(client *registry.Repository, tag string) (*models.TagDetail, error) {
+ detail := &models.TagDetail{
Name: tag,
}
- digest, _, payload, err := client.PullManifest(tag, []string{schema2.MediaTypeManifest})
+ digest, mediaType, payload, err := client.PullManifest(tag, []string{schema2.MediaTypeManifest})
if err != nil {
return detail, err
}
detail.Digest = digest
- manifest := &schema2.DeserializedManifest{}
- if err = manifest.UnmarshalJSON(payload); err != nil {
+ if strings.Contains(mediaType, "application/json") {
+ mediaType = schema1.MediaTypeManifest
+ }
+ manifest, _, err := registry.UnMarshal(mediaType, payload)
+ if err != nil {
return detail, err
}
@@ -699,7 +737,21 @@ func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) {
detail.Size += ref.Size
}
- _, reader, err := client.PullBlob(manifest.Target().Digest.String())
+ // if the media type of the manifest isn't v2, doesn't parse image config
+ // and return directly
+ // this impacts that some detail information(os, arch, ...) of old images
+ // cannot be got
+ if mediaType != schema2.MediaTypeManifest {
+ log.Debugf("the media type of the manifest is %s, not v2, skip", mediaType)
+ return detail, nil
+ }
+ v2Manifest, ok := manifest.(*schema2.DeserializedManifest)
+ if !ok {
+ log.Debug("the manifest cannot be convert to DeserializedManifest, skip")
+ return detail, nil
+ }
+
+ _, reader, err := client.PullBlob(v2Manifest.Target().Digest.String())
if err != nil {
return detail, err
}
@@ -718,7 +770,7 @@ func getTagDetail(client *registry.Repository, tag string) (*tagDetail, error) {
return detail, nil
}
-func populateAuthor(detail *tagDetail) {
+func populateAuthor(detail *models.TagDetail) {
// has author info already
if len(detail.Author) > 0 {
return
@@ -1018,34 +1070,22 @@ func (ra *RepositoryAPI) VulnerabilityDetails() {
ra.SendForbiddenError(errors.New(ra.SecurityCtx.GetUsername()))
return
}
- res := []*models.VulnerabilityItem{}
- overview, err := dao.GetImgScanOverview(digest)
+ res, err := scan.VulnListByDigest(digest)
if err != nil {
- ra.SendInternalServerError(fmt.Errorf("failed to get the scan overview, error: %v", err))
- return
- }
- if overview != nil && len(overview.DetailsKey) > 0 {
- clairClient := clair.NewClient(config.ClairEndpoint(), nil)
- log.Debugf("The key for getting details: %s", overview.DetailsKey)
- details, err := clairClient.GetResult(overview.DetailsKey)
- if err != nil {
- ra.SendInternalServerError(fmt.Errorf("Failed to get scan details from Clair, error: %v", err))
- return
- }
- res = transformVulnerabilities(details)
+ log.Errorf("Failed to get vulnerability list for image: %s:%s", repository, tag)
}
ra.Data["json"] = res
ra.ServeJSON()
}
-func getSignatures(username, repository string) (map[string][]notary.Target, error) {
+func getSignatures(username, repository string) (map[string][]notarymodel.Target, error) {
targets, err := notary.GetInternalTargets(config.InternalNotaryEndpoint(),
username, repository)
if err != nil {
return nil, err
}
- signatures := map[string][]notary.Target{}
+ signatures := map[string][]notarymodel.Target{}
for _, tgt := range targets {
digest, err := notary.DigestFromTarget(tgt)
if err != nil {
diff --git a/src/core/api/repository_label_test.go b/src/core/api/repository_label_test.go
index 02797bd12..3ab7e13f3 100644
--- a/src/core/api/repository_label_test.go
+++ b/src/core/api/repository_label_test.go
@@ -28,7 +28,7 @@ import (
var (
resourceLabelAPIBasePath = "/api/repositories"
- repository = "library/hello-world"
+ repo = "library/hello-world"
tag = "latest"
proLibraryLabelID int64
)
@@ -63,7 +63,7 @@ func TestAddToImage(t *testing.T) {
{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodPost,
},
code: http.StatusUnauthorized,
@@ -72,13 +72,13 @@ func TestAddToImage(t *testing.T) {
{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodPost,
credential: projGuest,
},
code: http.StatusForbidden,
},
- // 404 repository doesn't exist
+ // 404 repo doesn't exist
{
request: &testingRequest{
url: fmt.Sprintf("%s/library/non-exist-repo/tags/%s/labels", resourceLabelAPIBasePath, tag),
@@ -90,7 +90,7 @@ func TestAddToImage(t *testing.T) {
// 404 image doesn't exist
{
request: &testingRequest{
- url: fmt.Sprintf("%s/%s/tags/non-exist-tag/labels", resourceLabelAPIBasePath, repository),
+ url: fmt.Sprintf("%s/%s/tags/non-exist-tag/labels", resourceLabelAPIBasePath, repo),
method: http.MethodPost,
credential: projDeveloper,
},
@@ -99,7 +99,7 @@ func TestAddToImage(t *testing.T) {
// 400
{
request: &testingRequest{
- url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repository, tag),
+ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repo, tag),
method: http.MethodPost,
credential: projDeveloper,
},
@@ -109,7 +109,7 @@ func TestAddToImage(t *testing.T) {
{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodPost,
credential: projDeveloper,
bodyJSON: struct {
@@ -124,7 +124,7 @@ func TestAddToImage(t *testing.T) {
{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodPost,
credential: projDeveloper,
bodyJSON: struct {
@@ -139,7 +139,7 @@ func TestAddToImage(t *testing.T) {
{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodPost,
credential: projDeveloper,
bodyJSON: struct {
@@ -154,7 +154,7 @@ func TestAddToImage(t *testing.T) {
{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodPost,
credential: projDeveloper,
bodyJSON: struct {
@@ -172,7 +172,7 @@ func TestAddToImage(t *testing.T) {
func TestGetOfImage(t *testing.T) {
labels := []*models.Label{}
err := handleAndParse(&testingRequest{
- url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repository, tag),
+ url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath, repo, tag),
method: http.MethodGet,
credential: projDeveloper,
}, &labels)
@@ -185,7 +185,7 @@ func TestRemoveFromImage(t *testing.T) {
runCodeCheckingCases(t, &codeCheckingCase{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels/%d", resourceLabelAPIBasePath,
- repository, tag, proLibraryLabelID),
+ repo, tag, proLibraryLabelID),
method: http.MethodDelete,
credential: projDeveloper,
},
@@ -195,7 +195,7 @@ func TestRemoveFromImage(t *testing.T) {
labels := []*models.Label{}
err := handleAndParse(&testingRequest{
url: fmt.Sprintf("%s/%s/tags/%s/labels", resourceLabelAPIBasePath,
- repository, tag),
+ repo, tag),
method: http.MethodGet,
credential: projDeveloper,
}, &labels)
@@ -206,7 +206,7 @@ func TestRemoveFromImage(t *testing.T) {
func TestAddToRepository(t *testing.T) {
runCodeCheckingCases(t, &codeCheckingCase{
request: &testingRequest{
- url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repository),
+ url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repo),
method: http.MethodPost,
bodyJSON: struct {
ID int64
@@ -222,7 +222,7 @@ func TestAddToRepository(t *testing.T) {
func TestGetOfRepository(t *testing.T) {
labels := []*models.Label{}
err := handleAndParse(&testingRequest{
- url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repository),
+ url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repo),
method: http.MethodGet,
credential: projDeveloper,
}, &labels)
@@ -235,7 +235,7 @@ func TestRemoveFromRepository(t *testing.T) {
runCodeCheckingCases(t, &codeCheckingCase{
request: &testingRequest{
url: fmt.Sprintf("%s/%s/labels/%d", resourceLabelAPIBasePath,
- repository, proLibraryLabelID),
+ repo, proLibraryLabelID),
method: http.MethodDelete,
credential: projDeveloper,
},
@@ -244,7 +244,7 @@ func TestRemoveFromRepository(t *testing.T) {
labels := []*models.Label{}
err := handleAndParse(&testingRequest{
- url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repository),
+ url: fmt.Sprintf("%s/%s/labels", resourceLabelAPIBasePath, repo),
method: http.MethodGet,
credential: projDeveloper,
}, &labels)
diff --git a/src/core/api/repository_test.go b/src/core/api/repository_test.go
index 34649d245..7aa17a0b2 100644
--- a/src/core/api/repository_test.go
+++ b/src/core/api/repository_test.go
@@ -21,7 +21,7 @@ import (
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/dao/project"
"github.com/goharbor/harbor/src/common/models"
- "github.com/goharbor/harbor/tests/apitests/apilib"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -96,7 +96,7 @@ func TestGetReposTags(t *testing.T) {
t.Errorf("failed to get tags of repository %s: %v", repository, err)
} else {
assert.Equal(int(200), code, "httpStatusCode should be 200")
- if tg, ok := tags.([]tagResp); ok {
+ if tg, ok := tags.([]models.TagResp); ok {
assert.Equal(1, len(tg), fmt.Sprintf("there should be only one tag, but now %v", tg))
assert.Equal(tg[0].Name, "latest", "the tag should be latest")
} else {
@@ -207,19 +207,19 @@ func TestGetReposTop(t *testing.T) {
func TestPopulateAuthor(t *testing.T) {
author := "author"
- detail := &tagDetail{
+ detail := &models.TagDetail{
Author: author,
}
populateAuthor(detail)
assert.Equal(t, author, detail.Author)
- detail = &tagDetail{}
+ detail = &models.TagDetail{}
populateAuthor(detail)
assert.Equal(t, "", detail.Author)
maintainer := "maintainer"
- detail = &tagDetail{
- Config: &cfg{
+ detail = &models.TagDetail{
+ Config: &models.TagCfg{
Labels: map[string]string{
"Maintainer": maintainer,
},
diff --git a/src/core/api/retention.go b/src/core/api/retention.go
new file mode 100644
index 000000000..07bc013b2
--- /dev/null
+++ b/src/core/api/retention.go
@@ -0,0 +1,437 @@
+package api
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/goharbor/harbor/src/common/rbac"
+ "github.com/goharbor/harbor/src/core/filter"
+ "github.com/goharbor/harbor/src/core/promgr"
+ "github.com/goharbor/harbor/src/pkg/retention"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+)
+
+// RetentionAPI ...
+type RetentionAPI struct {
+ BaseController
+ pm promgr.ProjectManager
+}
+
+// Prepare validates the user
+func (r *RetentionAPI) Prepare() {
+ r.BaseController.Prepare()
+ if !r.SecurityCtx.IsAuthenticated() {
+ r.SendUnAuthorizedError(errors.New("UnAuthorized"))
+ return
+ }
+ pm, e := filter.GetProjectManager(r.Ctx.Request)
+ if e != nil {
+ r.SendInternalServerError(e)
+ return
+ }
+ r.pm = pm
+
+}
+
+// GetMetadatas Get Metadatas
+func (r *RetentionAPI) GetMetadatas() {
+ data := `
+{
+ "templates": [
+ {
+ "rule_template": "latestPushedK",
+ "display_text": "the most recently pushed # images",
+ "action": "retain",
+ "params": [
+ {
+ "type": "int",
+ "unit": "COUNT",
+ "required": true
+ }
+ ]
+ },
+ {
+ "rule_template": "latestPulledN",
+ "display_text": "the most recently pulled # images",
+ "action": "retain",
+ "params": [
+ {
+ "type": "int",
+ "unit": "COUNT",
+ "required": true
+ }
+ ]
+ },
+ {
+ "rule_template": "nDaysSinceLastPull",
+ "display_text": "pulled within the last # days",
+ "action": "retain",
+ "params": [
+ {
+ "type": "int",
+ "unit": "DAYS",
+ "required": true
+ }
+ ]
+ },
+ {
+ "rule_template": "nDaysSinceLastPush",
+ "display_text": "pushed within the last # days",
+ "action": "retain",
+ "params": [
+ {
+ "type": "int",
+ "unit": "DAYS",
+ "required": true
+ }
+ ]
+ },
+ {
+ "rule_template": "nothing",
+ "display_text": "none",
+ "action": "retain",
+ "params": []
+ },
+ {
+ "rule_template": "always",
+ "display_text": "always",
+ "action": "retain",
+ "params": []
+ }
+ ],
+ "scope_selectors": [
+ {
+ "display_text": "Repositories",
+ "kind": "doublestar",
+ "decorations": [
+ "repoMatches",
+ "repoExcludes"
+ ]
+ }
+ ],
+ "tag_selectors": [
+ {
+ "display_text": "Labels",
+ "kind": "label",
+ "decorations": [
+ "withLabels",
+ "withoutLabels"
+ ]
+ },
+ {
+ "display_text": "Tags",
+ "kind": "doublestar",
+ "decorations": [
+ "matches",
+ "excludes"
+ ]
+ }
+ ]
+}
+`
+ w := r.Ctx.ResponseWriter
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(data))
+}
+
+// GetRetention Get Retention
+func (r *RetentionAPI) GetRetention() {
+ id, err := r.GetIDFromURL()
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ p, err := retentionController.GetRetention(id)
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionRead) {
+ return
+ }
+ r.WriteJSONData(p)
+}
+
+// CreateRetention Create Retention
+func (r *RetentionAPI) CreateRetention() {
+ p := &policy.Metadata{}
+ isValid, err := r.DecodeJSONReqAndValidate(p)
+ if !isValid {
+ r.SendBadRequestError(err)
+ return
+ }
+ if err = r.checkRuleConflict(p); err != nil {
+ r.SendConflictError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionCreate) {
+ return
+ }
+ switch p.Scope.Level {
+ case policy.ScopeLevelProject:
+ if p.Scope.Reference <= 0 {
+ r.SendBadRequestError(fmt.Errorf("invalid Project id %d", p.Scope.Reference))
+ return
+ }
+
+ proj, err := r.pm.Get(p.Scope.Reference)
+ if err != nil {
+ r.SendBadRequestError(err)
+ }
+ if proj == nil {
+ r.SendBadRequestError(fmt.Errorf("invalid Project id %d", p.Scope.Reference))
+ }
+ default:
+ r.SendBadRequestError(fmt.Errorf("scope %s is not support", p.Scope.Level))
+ return
+ }
+ id, err := retentionController.CreateRetention(p)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ if err := r.pm.GetMetadataManager().Add(p.Scope.Reference,
+ map[string]string{"retention_id": strconv.FormatInt(id, 10)}); err != nil {
+ r.SendInternalServerError(err)
+ }
+ r.Redirect(http.StatusCreated, strconv.FormatInt(id, 10))
+}
+
+// UpdateRetention Update Retention
+func (r *RetentionAPI) UpdateRetention() {
+ id, err := r.GetIDFromURL()
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ p := &policy.Metadata{}
+ isValid, err := r.DecodeJSONReqAndValidate(p)
+ if !isValid {
+ r.SendBadRequestError(err)
+ return
+ }
+ p.ID = id
+ if err = r.checkRuleConflict(p); err != nil {
+ r.SendConflictError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionUpdate) {
+ return
+ }
+ if err = retentionController.UpdateRetention(p); err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+}
+
+func (r *RetentionAPI) checkRuleConflict(p *policy.Metadata) error {
+ temp := make(map[string]int)
+ for n, rule := range p.Rules {
+ tid := rule.ID
+ rule.ID = 0
+ bs, _ := json.Marshal(rule)
+ if old, exists := temp[string(bs)]; exists {
+ return fmt.Errorf("rule %d is conflict with rule %d", n, old)
+ }
+ temp[string(bs)] = n
+ rule.ID = tid
+ }
+ return nil
+}
+
+// TriggerRetentionExec Trigger Retention Execution
+func (r *RetentionAPI) TriggerRetentionExec() {
+ id, err := r.GetIDFromURL()
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ d := &struct {
+ DryRun bool `json:"dry_run"`
+ }{
+ DryRun: false,
+ }
+ isValid, err := r.DecodeJSONReqAndValidate(d)
+ if !isValid {
+ r.SendBadRequestError(err)
+ return
+ }
+ p, err := retentionController.GetRetention(id)
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionUpdate) {
+ return
+ }
+ eid, err := retentionController.TriggerRetentionExec(id, retention.ExecutionTriggerManual, d.DryRun)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ r.Redirect(http.StatusCreated, strconv.FormatInt(eid, 10))
+}
+
+// OperateRetentionExec Operate Retention Execution
+func (r *RetentionAPI) OperateRetentionExec() {
+ id, err := r.GetIDFromURL()
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ eid, err := r.GetInt64FromPath(":eid")
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ a := &struct {
+ Action string `json:"action" valid:"Required"`
+ }{}
+ isValid, err := r.DecodeJSONReqAndValidate(a)
+ if !isValid {
+ r.SendBadRequestError(err)
+ return
+ }
+ p, err := retentionController.GetRetention(id)
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionUpdate) {
+ return
+ }
+ if err = retentionController.OperateRetentionExec(eid, a.Action); err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+}
+
+// ListRetentionExecs List Retention Execution
+func (r *RetentionAPI) ListRetentionExecs() {
+ id, err := r.GetIDFromURL()
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ page, size, err := r.GetPaginationParams()
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ query := &q.Query{
+ PageNumber: page,
+ PageSize: size,
+ }
+ p, err := retentionController.GetRetention(id)
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionList) {
+ return
+ }
+ execs, err := retentionController.ListRetentionExecs(id, query)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ total, err := retentionController.GetTotalOfRetentionExecs(id)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ r.SetPaginationHeader(total, query.PageNumber, query.PageSize)
+ r.WriteJSONData(execs)
+}
+
+// ListRetentionExecTasks List Retention Execution Tasks
+func (r *RetentionAPI) ListRetentionExecTasks() {
+ id, err := r.GetIDFromURL()
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ eid, err := r.GetInt64FromPath(":eid")
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ page, size, err := r.GetPaginationParams()
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ query := &q.Query{
+ PageNumber: page,
+ PageSize: size,
+ }
+ p, err := retentionController.GetRetention(id)
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ if !r.requireAccess(p, rbac.ActionList) {
+ return
+ }
+ his, err := retentionController.ListRetentionExecTasks(eid, query)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ total, err := retentionController.GetTotalOfRetentionExecTasks(eid)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ r.SetPaginationHeader(total, query.PageNumber, query.PageSize)
+ r.WriteJSONData(his)
+}
+
+// GetRetentionExecTaskLog Get Retention Execution Task log
+func (r *RetentionAPI) GetRetentionExecTaskLog() {
+ tid, err := r.GetInt64FromPath(":tid")
+ if err != nil {
+ r.SendBadRequestError(err)
+ return
+ }
+ log, err := retentionController.GetRetentionExecTaskLog(tid)
+ if err != nil {
+ r.SendInternalServerError(err)
+ return
+ }
+ w := r.Ctx.ResponseWriter
+ w.Header().Set("Content-Type", "text/plain")
+ w.WriteHeader(http.StatusOK)
+ w.Write(log)
+}
+
+func (r *RetentionAPI) requireAccess(p *policy.Metadata, action rbac.Action, subresources ...rbac.Resource) bool {
+ var hasPermission bool
+
+ switch p.Scope.Level {
+ case "project":
+ if len(subresources) == 0 {
+ subresources = append(subresources, rbac.ResourceTagRetention)
+ }
+ resource := rbac.NewProjectNamespace(p.Scope.Reference).Resource(subresources...)
+ hasPermission = r.SecurityCtx.Can(action, resource)
+ default:
+ hasPermission = r.SecurityCtx.IsSysAdmin()
+ }
+
+ if !hasPermission {
+ if !r.SecurityCtx.IsAuthenticated() {
+ r.SendUnAuthorizedError(errors.New("UnAuthorized"))
+ } else {
+ r.SendForbiddenError(errors.New(r.SecurityCtx.GetUsername()))
+ }
+ return false
+ }
+
+ return true
+}
diff --git a/src/core/api/retention_test.go b/src/core/api/retention_test.go
new file mode 100644
index 000000000..0fe47f628
--- /dev/null
+++ b/src/core/api/retention_test.go
@@ -0,0 +1,456 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/goharbor/harbor/src/pkg/retention/dao"
+ "github.com/goharbor/harbor/src/pkg/retention/dao/models"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/stretchr/testify/require"
+ "net/http"
+ "testing"
+ "time"
+)
+
+func TestGetMetadatas(t *testing.T) {
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: "/api/retentions/metadatas",
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestCreatePolicy(t *testing.T) {
+ p1 := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/retentions",
+ },
+ code: http.StatusUnauthorized,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/retentions",
+ bodyJSON: p1,
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/retentions",
+ bodyJSON: &policy.Metadata{
+ Algorithm: "NODEF",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{},
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/retentions",
+ bodyJSON: &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ {
+ ID: 2,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusConflict,
+ },
+ }
+
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestPolicy(t *testing.T) {
+ p := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+ p1 := &models.RetentionPolicy{
+ ScopeLevel: p.Scope.Level,
+ TriggerKind: p.Trigger.Kind,
+ CreateTime: time.Now(),
+ UpdateTime: time.Now(),
+ }
+ data, _ := json.Marshal(p)
+ p1.Data = string(data)
+
+ id, err := dao.CreatePolicy(p1)
+ require.Nil(t, err)
+ require.True(t, id > 0)
+
+ cases := []*codeCheckingCase{
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: fmt.Sprintf("/api/retentions/%d", id),
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: fmt.Sprintf("/api/retentions/%d", id),
+ bodyJSON: &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "b.+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: fmt.Sprintf("/api/retentions/%d", id),
+ bodyJSON: &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "b.+",
+ },
+ },
+ },
+ },
+ {
+ ID: 2,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "b.+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusConflict,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: fmt.Sprintf("/api/retentions/%d/executions", id),
+ bodyJSON: &struct {
+ DryRun bool `json:"dry_run"`
+ }{
+ DryRun: false,
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: fmt.Sprintf("/api/retentions/%d/executions", id),
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+
+ runCodeCheckingCases(t, cases...)
+}
diff --git a/src/core/api/robot.go b/src/core/api/robot.go
index cc1b880e3..be49983a4 100644
--- a/src/core/api/robot.go
+++ b/src/core/api/robot.go
@@ -158,7 +158,6 @@ func (r *RobotAPI) Post() {
}
robotRep := models.RobotRep{
- ID: id,
Name: robot.Name,
Token: rawTk,
}
diff --git a/src/core/api/scan_all_test.go b/src/core/api/scan_all_test.go
index 4378fb900..347b39182 100644
--- a/src/core/api/scan_all_test.go
+++ b/src/core/api/scan_all_test.go
@@ -3,7 +3,7 @@ package api
import (
"testing"
- "github.com/goharbor/harbor/tests/apitests/apilib"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
"github.com/stretchr/testify/assert"
)
diff --git a/src/core/api/statistic_test.go b/src/core/api/statistic_test.go
index 4ef968575..5c9be4229 100644
--- a/src/core/api/statistic_test.go
+++ b/src/core/api/statistic_test.go
@@ -18,7 +18,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- // "github.com/goharbor/harbor/tests/apitests/apilib"
+ // "github.com/goharbor/harbor/src/testing/apitests/apilib"
)
func TestStatisticGet(t *testing.T) {
diff --git a/src/core/api/sys_cve_whitelist.go b/src/core/api/sys_cve_whitelist.go
new file mode 100644
index 000000000..921abb83f
--- /dev/null
+++ b/src/core/api/sys_cve_whitelist.go
@@ -0,0 +1,81 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "errors"
+ "fmt"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/scan/whitelist"
+ "net/http"
+)
+
+// SysCVEWhitelistAPI Handles the requests to manage system level CVE whitelist
+type SysCVEWhitelistAPI struct {
+ BaseController
+ manager whitelist.Manager
+}
+
+// Prepare validates the request initially
+func (sca *SysCVEWhitelistAPI) Prepare() {
+ sca.BaseController.Prepare()
+ if !sca.SecurityCtx.IsAuthenticated() {
+ sca.SendUnAuthorizedError(errors.New("Unauthorized"))
+ return
+ }
+ if !sca.SecurityCtx.IsSysAdmin() && sca.Ctx.Request.Method != http.MethodGet {
+ msg := fmt.Sprintf("only system admin has permission issue %s request to this API", sca.Ctx.Request.Method)
+ log.Errorf(msg)
+ sca.SendForbiddenError(errors.New(msg))
+ return
+ }
+ sca.manager = whitelist.NewDefaultManager()
+}
+
+// Get handles the GET request to retrieve the system level CVE whitelist
+func (sca *SysCVEWhitelistAPI) Get() {
+ l, err := sca.manager.GetSys()
+ if err != nil {
+ sca.SendInternalServerError(err)
+ return
+ }
+ sca.WriteJSONData(l)
+}
+
+// Put handles the PUT request to update the system level CVE whitelist
+func (sca *SysCVEWhitelistAPI) Put() {
+ var l models.CVEWhitelist
+ if err := sca.DecodeJSONReq(&l); err != nil {
+ log.Errorf("Failed to decode JSON array from request")
+ sca.SendBadRequestError(err)
+ return
+ }
+ if l.ProjectID != 0 {
+ msg := fmt.Sprintf("Non-zero project ID for system CVE whitelist: %d.", l.ProjectID)
+ log.Error(msg)
+ sca.SendBadRequestError(errors.New(msg))
+ return
+ }
+ if err := sca.manager.SetSys(l); err != nil {
+ if whitelist.IsInvalidErr(err) {
+ log.Errorf("Invalid CVE whitelist: %v", err)
+ sca.SendBadRequestError(err)
+ return
+ }
+ sca.SendInternalServerError(err)
+ return
+ }
+}
diff --git a/src/core/api/sys_cve_whitelist_test.go b/src/core/api/sys_cve_whitelist_test.go
new file mode 100644
index 000000000..d484b79f2
--- /dev/null
+++ b/src/core/api/sys_cve_whitelist_test.go
@@ -0,0 +1,126 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+package api
+
+import (
+ "github.com/goharbor/harbor/src/common/models"
+ "net/http"
+ "testing"
+)
+
+func TestSysCVEWhitelistAPIGet(t *testing.T) {
+ url := "/api/system/CVEWhitelist"
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: url,
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodGet,
+ url: url,
+ credential: nonSysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
+
+func TestSysCVEWhitelistAPIPut(t *testing.T) {
+ url := "/api/system/CVEWhitelist"
+ s := int64(1573254000)
+ cases := []*codeCheckingCase{
+ // 401
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: url,
+ },
+ code: http.StatusUnauthorized,
+ },
+ // 403
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: url,
+ credential: nonSysAdmin,
+ },
+ code: http.StatusForbidden,
+ },
+ // 400
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: url,
+ bodyJSON: []string{"CVE-1234-1234"},
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 400
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: url,
+ bodyJSON: models.CVEWhitelist{
+ ExpiresAt: &s,
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2019-12310"},
+ },
+ ProjectID: 2,
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 400
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: url,
+ bodyJSON: models.CVEWhitelist{
+ ExpiresAt: &s,
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2019-12310"},
+ {CVEID: "CVE-2019-12310"},
+ },
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusBadRequest,
+ },
+ // 200
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: url,
+ bodyJSON: models.CVEWhitelist{
+ ExpiresAt: &s,
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2019-12310"},
+ },
+ },
+ credential: sysAdmin,
+ },
+ code: http.StatusOK,
+ },
+ }
+ runCodeCheckingCases(t, cases...)
+}
diff --git a/src/core/api/systeminfo.go b/src/core/api/systeminfo.go
index 140a688df..a0929d545 100644
--- a/src/core/api/systeminfo.go
+++ b/src/core/api/systeminfo.go
@@ -16,13 +16,13 @@ package api
import (
"errors"
+ "fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"sync"
- "fmt"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
clairdao "github.com/goharbor/harbor/src/common/dao/clair"
@@ -106,6 +106,7 @@ type GeneralInfo struct {
RegistryStorageProviderName string `json:"registry_storage_provider_name"`
ReadOnly bool `json:"read_only"`
WithChartMuseum bool `json:"with_chartmuseum"`
+ NotificationEnable bool `json:"notification_enable"`
}
// GetVolumeInfo gets specific volume storage info.
@@ -188,6 +189,7 @@ func (sia *SystemInfoAPI) GetGeneralInfo() {
RegistryStorageProviderName: utils.SafeCastString(cfg[common.RegistryStorageProviderName]),
ReadOnly: config.ReadOnly(),
WithChartMuseum: config.WithChartMuseum(),
+ NotificationEnable: utils.SafeCastBool(cfg[common.NotificationEnable]),
}
if info.WithClair {
info.ClairVulnStatus = getClairVulnStatus()
diff --git a/src/core/api/user_test.go b/src/core/api/user_test.go
index 75d324322..0c2bbc519 100644
--- a/src/core/api/user_test.go
+++ b/src/core/api/user_test.go
@@ -23,7 +23,7 @@ import (
"github.com/goharbor/harbor/src/common/api"
"github.com/goharbor/harbor/src/common/models"
- "github.com/goharbor/harbor/tests/apitests/apilib"
+ "github.com/goharbor/harbor/src/testing/apitests/apilib"
"github.com/stretchr/testify/assert"
"github.com/astaxie/beego"
diff --git a/src/core/api/usergroup.go b/src/core/api/usergroup.go
index 317ab4362..3bfd2d34e 100644
--- a/src/core/api/usergroup.go
+++ b/src/core/api/usergroup.go
@@ -27,12 +27,14 @@ import (
"github.com/goharbor/harbor/src/common/utils/ldap"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/auth"
+ "github.com/goharbor/harbor/src/core/config"
)
// UserGroupAPI ...
type UserGroupAPI struct {
BaseController
- id int
+ id int
+ groupType int
}
const (
@@ -61,6 +63,15 @@ func (uga *UserGroupAPI) Prepare() {
uga.SendForbiddenError(errors.New(uga.SecurityCtx.GetUsername()))
return
}
+ authMode, err := config.AuthMode()
+ if err != nil {
+ uga.SendInternalServerError(errors.New("failed to get authentication mode"))
+ }
+ if authMode == common.LDAPAuth {
+ uga.groupType = common.LDAPGroupType
+ } else if authMode == common.HTTPAuth {
+ uga.groupType = common.HTTPGroupType
+ }
}
// Get ...
@@ -69,7 +80,7 @@ func (uga *UserGroupAPI) Get() {
uga.Data["json"] = make([]models.UserGroup, 0)
if ID == 0 {
// user group id not set, return all user group
- query := models.UserGroup{GroupType: common.LdapGroupType} // Current query LDAP group only
+ query := models.UserGroup{GroupType: uga.groupType}
userGroupList, err := group.QueryUserGroup(query)
if err != nil {
uga.SendInternalServerError(fmt.Errorf("failed to query database for user group list, error: %v", err))
@@ -103,41 +114,50 @@ func (uga *UserGroupAPI) Post() {
}
userGroup.ID = 0
- userGroup.GroupType = common.LdapGroupType
+ if userGroup.GroupType == 0 {
+ userGroup.GroupType = uga.groupType
+ }
userGroup.LdapGroupDN = strings.TrimSpace(userGroup.LdapGroupDN)
userGroup.GroupName = strings.TrimSpace(userGroup.GroupName)
if len(userGroup.GroupName) == 0 {
uga.SendBadRequestError(errors.New(userNameEmptyMsg))
return
}
- query := models.UserGroup{GroupType: userGroup.GroupType, LdapGroupDN: userGroup.LdapGroupDN}
- result, err := group.QueryUserGroup(query)
- if err != nil {
- uga.SendInternalServerError(fmt.Errorf("error occurred in add user group, error: %v", err))
- return
- }
- if len(result) > 0 {
- uga.SendConflictError(errors.New("error occurred in add user group, duplicate user group exist"))
- return
- }
- // User can not add ldap group when the ldap server is offline
- ldapGroup, err := auth.SearchGroup(userGroup.LdapGroupDN)
- if err == ldap.ErrNotFound || ldapGroup == nil {
- uga.SendBadRequestError(fmt.Errorf("LDAP Group DN is not found: DN:%v", userGroup.LdapGroupDN))
- return
- }
- if err == ldap.ErrDNSyntax {
- uga.SendBadRequestError(fmt.Errorf("invalid DN syntax. DN: %v", userGroup.LdapGroupDN))
- return
- }
- if err != nil {
- uga.SendInternalServerError(fmt.Errorf("Error occurred in search user group. error: %v", err))
- return
+
+ if userGroup.GroupType == common.LDAPGroupType {
+ query := models.UserGroup{GroupType: userGroup.GroupType, LdapGroupDN: userGroup.LdapGroupDN}
+ result, err := group.QueryUserGroup(query)
+ if err != nil {
+ uga.SendInternalServerError(fmt.Errorf("error occurred in add user group, error: %v", err))
+ return
+ }
+ if len(result) > 0 {
+ uga.SendConflictError(errors.New("error occurred in add user group, duplicate user group exist"))
+ return
+ }
+ // User can not add ldap group when the ldap server is offline
+ ldapGroup, err := auth.SearchGroup(userGroup.LdapGroupDN)
+ if err == ldap.ErrNotFound || ldapGroup == nil {
+ uga.SendBadRequestError(fmt.Errorf("LDAP Group DN is not found: DN:%v", userGroup.LdapGroupDN))
+ return
+ }
+ if err == ldap.ErrDNSyntax {
+ uga.SendBadRequestError(fmt.Errorf("invalid DN syntax. DN: %v", userGroup.LdapGroupDN))
+ return
+ }
+ if err != nil {
+ uga.SendInternalServerError(fmt.Errorf("error occurred in search user group. error: %v", err))
+ return
+ }
}
groupID, err := group.AddUserGroup(userGroup)
if err != nil {
- uga.SendInternalServerError(fmt.Errorf("Error occurred in add user group, error: %v", err))
+ if err == group.ErrGroupNameDup {
+ uga.SendConflictError(fmt.Errorf("duplicated user group name %s", userGroup.GroupName))
+ return
+ }
+ uga.SendInternalServerError(fmt.Errorf("error occurred in add user group, error: %v", err))
return
}
uga.Redirect(http.StatusCreated, strconv.FormatInt(int64(groupID), 10))
@@ -150,13 +170,17 @@ func (uga *UserGroupAPI) Put() {
uga.SendBadRequestError(err)
return
}
+ if userGroup.GroupType == common.HTTPGroupType {
+ uga.SendBadRequestError(errors.New("HTTP group is not allowed to update"))
+ return
+ }
ID := uga.id
userGroup.GroupName = strings.TrimSpace(userGroup.GroupName)
if len(userGroup.GroupName) == 0 {
uga.SendBadRequestError(errors.New(userNameEmptyMsg))
return
}
- userGroup.GroupType = common.LdapGroupType
+ userGroup.GroupType = common.LDAPGroupType
log.Debugf("Updated user group %v", userGroup)
err := group.UpdateUserGroupName(ID, userGroup.GroupName)
if err != nil {
diff --git a/src/core/api/usergroup_test.go b/src/core/api/usergroup_test.go
index ebeeefb4d..dad91080e 100644
--- a/src/core/api/usergroup_test.go
+++ b/src/core/api/usergroup_test.go
@@ -35,7 +35,7 @@ func TestUserGroupAPI_GetAndDelete(t *testing.T) {
groupID, err := group.AddUserGroup(models.UserGroup{
GroupName: "harbor_users",
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
- GroupType: common.LdapGroupType,
+ GroupType: common.LDAPGroupType,
})
if err != nil {
@@ -88,7 +88,7 @@ func TestUserGroupAPI_Post(t *testing.T) {
groupID, err := group.AddUserGroup(models.UserGroup{
GroupName: "harbor_group",
LdapGroupDN: "cn=harbor_group,ou=groups,dc=example,dc=com",
- GroupType: common.LdapGroupType,
+ GroupType: common.LDAPGroupType,
})
if err != nil {
t.Errorf("Error occurred when AddUserGroup: %v", err)
@@ -104,7 +104,32 @@ func TestUserGroupAPI_Post(t *testing.T) {
bodyJSON: &models.UserGroup{
GroupName: "harbor_group",
LdapGroupDN: "cn=harbor_group,ou=groups,dc=example,dc=com",
- GroupType: common.LdapGroupType,
+ GroupType: common.LDAPGroupType,
+ },
+ credential: admin,
+ },
+ code: http.StatusConflict,
+ },
+ // 201
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/usergroups",
+ bodyJSON: &models.UserGroup{
+ GroupName: "vsphere.local\\guest",
+ GroupType: common.HTTPGroupType,
+ },
+ credential: admin,
+ },
+ code: http.StatusCreated,
+ },
+ {
+ request: &testingRequest{
+ method: http.MethodPost,
+ url: "/api/usergroups",
+ bodyJSON: &models.UserGroup{
+ GroupName: "vsphere.local\\guest",
+ GroupType: common.HTTPGroupType,
},
credential: admin,
},
@@ -118,7 +143,7 @@ func TestUserGroupAPI_Put(t *testing.T) {
groupID, err := group.AddUserGroup(models.UserGroup{
GroupName: "harbor_group",
LdapGroupDN: "cn=harbor_groups,ou=groups,dc=example,dc=com",
- GroupType: common.LdapGroupType,
+ GroupType: common.LDAPGroupType,
})
defer group.DeleteUserGroup(groupID)
@@ -149,6 +174,19 @@ func TestUserGroupAPI_Put(t *testing.T) {
},
code: http.StatusOK,
},
+ // 400
+ {
+ request: &testingRequest{
+ method: http.MethodPut,
+ url: fmt.Sprintf("/api/usergroups/%d", groupID),
+ bodyJSON: &models.UserGroup{
+ GroupName: "my_group",
+ GroupType: common.HTTPGroupType,
+ },
+ credential: admin,
+ },
+ code: http.StatusBadRequest,
+ },
}
runCodeCheckingCases(t, cases...)
}
diff --git a/src/core/api/utils.go b/src/core/api/utils.go
index ae7f30f7a..4fd20d383 100644
--- a/src/core/api/utils.go
+++ b/src/core/api/utils.go
@@ -24,7 +24,6 @@ import (
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
- "github.com/goharbor/harbor/src/common/utils/clair"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/common/utils/registry"
"github.com/goharbor/harbor/src/common/utils/registry/auth"
@@ -39,7 +38,7 @@ func SyncRegistry(pm promgr.ProjectManager) error {
log.Infof("Start syncing repositories from registry to DB... ")
- reposInRegistry, err := catalog()
+ reposInRegistry, err := Catalog()
if err != nil {
log.Error(err)
return err
@@ -106,7 +105,8 @@ func SyncRegistry(pm promgr.ProjectManager) error {
return nil
}
-func catalog() ([]string, error) {
+// Catalog ...
+func Catalog() ([]string, error) {
repositories := []string{}
rc, err := initRegistryClient()
@@ -279,35 +279,3 @@ func repositoryExist(name string, client *registry.Repository) (bool, error) {
}
return len(tags) != 0, nil
}
-
-// transformVulnerabilities transforms the returned value of Clair API to a list of VulnerabilityItem
-func transformVulnerabilities(layerWithVuln *models.ClairLayerEnvelope) []*models.VulnerabilityItem {
- res := []*models.VulnerabilityItem{}
- l := layerWithVuln.Layer
- if l == nil {
- return res
- }
- features := l.Features
- if features == nil {
- return res
- }
- for _, f := range features {
- vulnerabilities := f.Vulnerabilities
- if vulnerabilities == nil {
- continue
- }
- for _, v := range vulnerabilities {
- vItem := &models.VulnerabilityItem{
- ID: v.Name,
- Pkg: f.Name,
- Version: f.Version,
- Severity: clair.ParseClairSev(v.Severity),
- Fixed: v.FixedBy,
- Link: v.Link,
- Description: v.Description,
- }
- res = append(res, vItem)
- }
- }
- return res
-}
diff --git a/src/core/auth/authproxy/auth.go b/src/core/auth/authproxy/auth.go
index e388cbad6..1efe42f3e 100644
--- a/src/core/auth/authproxy/auth.go
+++ b/src/core/auth/authproxy/auth.go
@@ -16,18 +16,25 @@ package authproxy
import (
"crypto/tls"
+ "encoding/json"
+ "errors"
"fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/dao/group"
+
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/auth"
"github.com/goharbor/harbor/src/core/config"
- "io/ioutil"
- "net/http"
- "strings"
- "sync"
- "time"
+ "github.com/goharbor/harbor/src/pkg/authproxy"
+ k8s_api_v1beta1 "k8s.io/api/authentication/v1beta1"
)
const refreshDuration = 2 * time.Second
@@ -45,11 +52,16 @@ var insecureTransport = &http.Transport{
type Auth struct {
auth.DefaultAuthenticateHelper
sync.Mutex
- Endpoint string
- SkipCertVerify bool
- AlwaysOnboard bool
- settingTimeStamp time.Time
- client *http.Client
+ Endpoint string
+ TokenReviewEndpoint string
+ SkipCertVerify bool
+ SkipSearch bool
+ settingTimeStamp time.Time
+ client *http.Client
+}
+
+type session struct {
+ SessionID string `json:"session_id,omitempty"`
}
// Authenticate issues http POST request to Endpoint if it returns 200 the authentication is considered success.
@@ -72,7 +84,39 @@ func (a *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
- return &models.User{Username: m.Principal}, nil
+ user := &models.User{Username: m.Principal}
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ log.Warningf("Failed to read response body, error: %v", err)
+ return nil, auth.ErrAuth{}
+ }
+ s := session{}
+ err = json.Unmarshal(data, &s)
+ if err != nil {
+ log.Errorf("failed to read session %v", err)
+ }
+
+ reviewResponse, err := a.tokenReview(s.SessionID)
+ if err != nil {
+ return nil, err
+ }
+ if reviewResponse == nil {
+ return nil, auth.ErrAuth{}
+ }
+
+ // Attach user group ID information
+ ugList := reviewResponse.Status.User.Groups
+ log.Debugf("user groups %+v", ugList)
+ if len(ugList) > 0 {
+ groupIDList, err := group.GetGroupIDByGroupName(ugList, common.HTTPGroupType)
+ if err != nil {
+ return nil, err
+ }
+ log.Debugf("current user's group ID list is %+v", groupIDList)
+ user.GroupIDs = groupIDList
+ }
+ return user, nil
+
} else if resp.StatusCode == http.StatusUnauthorized {
return nil, auth.ErrAuth{}
} else {
@@ -81,10 +125,19 @@ func (a *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
log.Warningf("Failed to read response body, error: %v", err)
}
return nil, fmt.Errorf("failed to authenticate, status code: %d, text: %s", resp.StatusCode, string(data))
+
}
}
+func (a *Auth) tokenReview(sessionID string) (*k8s_api_v1beta1.TokenReview, error) {
+ httpAuthProxySetting, err := config.HTTPAuthProxySetting()
+ if err != nil {
+ return nil, err
+ }
+ return authproxy.TokenReview(sessionID, httpAuthProxySetting)
+}
+
// OnBoardUser delegates to dao pkg to insert/update data in DB.
func (a *Auth) OnBoardUser(u *models.User) error {
return dao.OnBoardUser(u)
@@ -102,14 +155,14 @@ func (a *Auth) PostAuthenticate(u *models.User) error {
}
// SearchUser returns nil as authproxy does not have such capability.
-// When AlwaysOnboard is set it always return the default model.
+// When SkipSearch is set it always return the default model.
func (a *Auth) SearchUser(username string) (*models.User, error) {
err := a.ensure()
if err != nil {
log.Warningf("Failed to refresh configuration for HTTP Auth Proxy Authenticator, error: %v, the default settings will be used", err)
}
var u *models.User
- if a.AlwaysOnboard {
+ if a.SkipSearch {
u = &models.User{Username: username}
if err := a.fillInModel(u); err != nil {
return nil, err
@@ -118,6 +171,37 @@ func (a *Auth) SearchUser(username string) (*models.User, error) {
return u, nil
}
+// SearchGroup search group exist in the authentication provider, for HTTP auth, if SkipSearch is true, it assume this group exist in authentication provider.
+func (a *Auth) SearchGroup(groupKey string) (*models.UserGroup, error) {
+ err := a.ensure()
+ if err != nil {
+ log.Warningf("Failed to refresh configuration for HTTP Auth Proxy Authenticator, error: %v, the default settings will be used", err)
+ }
+ var ug *models.UserGroup
+ if a.SkipSearch {
+ ug = &models.UserGroup{
+ GroupName: groupKey,
+ GroupType: common.HTTPGroupType,
+ }
+ return ug, nil
+ }
+ return nil, nil
+}
+
+// OnBoardGroup create user group entity in Harbor DB, altGroupName is not used.
+func (a *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error {
+ // if group name provided, on board the user group
+ if len(u.GroupName) == 0 {
+ return errors.New("Should provide a group name")
+ }
+ u.GroupType = common.HTTPGroupType
+ err := group.OnBoardUserGroup(u)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
func (a *Auth) fillInModel(u *models.User) error {
if strings.TrimSpace(u.Username) == "" {
return fmt.Errorf("username cannot be empty")
@@ -127,8 +211,6 @@ func (a *Auth) fillInModel(u *models.User) error {
u.Comment = userEntryComment
if strings.Contains(u.Username, "@") {
u.Email = u.Username
- } else {
- u.Email = fmt.Sprintf("%s@placeholder.com", u.Username)
}
return nil
}
@@ -145,8 +227,9 @@ func (a *Auth) ensure() error {
return err
}
a.Endpoint = setting.Endpoint
+ a.TokenReviewEndpoint = setting.TokenReviewEndpoint
a.SkipCertVerify = !setting.VerifyCert
- a.AlwaysOnboard = setting.AlwaysOnBoard
+ a.SkipSearch = setting.SkipSearch
}
if a.SkipCertVerify {
a.client.Transport = insecureTransport
diff --git a/src/core/auth/authproxy/auth_test.go b/src/core/auth/authproxy/auth_test.go
index 0e45b7388..b1fb4ab22 100644
--- a/src/core/auth/authproxy/auth_test.go
+++ b/src/core/auth/authproxy/auth_test.go
@@ -15,18 +15,20 @@
package authproxy
import (
+ "net/http/httptest"
+ "os"
+ "testing"
+ "time"
+
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/models"
cut "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/auth"
"github.com/goharbor/harbor/src/core/auth/authproxy/test"
"github.com/goharbor/harbor/src/core/config"
"github.com/stretchr/testify/assert"
- "net/http/httptest"
- "os"
- "testing"
- "time"
)
var mockSvr *httptest.Server
@@ -41,16 +43,25 @@ func TestMain(m *testing.M) {
}
mockSvr = test.NewMockServer(map[string]string{"jt": "pp", "Admin@vsphere.local": "Admin!23"})
defer mockSvr.Close()
+ defer dao.ExecuteBatchSQL([]string{"delete from user_group where group_name='OnBoardTest'"})
a = &Auth{
- Endpoint: mockSvr.URL + "/test/login",
- SkipCertVerify: true,
+ Endpoint: mockSvr.URL + "/test/login",
+ TokenReviewEndpoint: mockSvr.URL + "/test/tokenreview",
+ SkipCertVerify: true,
// So it won't require mocking the cfgManager
settingTimeStamp: time.Now(),
}
+ cfgMap := cut.GetUnitTestConfig()
conf := map[string]interface{}{
- common.HTTPAuthProxyEndpoint: "dummy",
- common.HTTPAuthProxyTokenReviewEndpoint: "dummy",
- common.HTTPAuthProxyVerifyCert: "false",
+ common.HTTPAuthProxyEndpoint: a.Endpoint,
+ common.HTTPAuthProxyTokenReviewEndpoint: a.TokenReviewEndpoint,
+ common.HTTPAuthProxyVerifyCert: !a.SkipCertVerify,
+ common.PostGreSQLSSLMode: cfgMap[common.PostGreSQLSSLMode],
+ common.PostGreSQLUsername: cfgMap[common.PostGreSQLUsername],
+ common.PostGreSQLPort: cfgMap[common.PostGreSQLPort],
+ common.PostGreSQLHOST: cfgMap[common.PostGreSQLHOST],
+ common.PostGreSQLPassword: cfgMap[common.PostGreSQLPassword],
+ common.PostGreSQLDatabase: cfgMap[common.PostGreSQLDatabase],
}
config.InitWithSettings(conf)
@@ -64,6 +75,10 @@ func TestMain(m *testing.M) {
}
func TestAuth_Authenticate(t *testing.T) {
+ groupIDs, err := group.GetGroupIDByGroupName([]string{"vsphere.local\\users", "vsphere.local\\administrators"}, common.HTTPGroupType)
+ if err != nil {
+ t.Fatal("Failed to get groupIDs")
+ }
t.Log("auth endpoint: ", a.Endpoint)
type output struct {
user models.User
@@ -80,6 +95,7 @@ func TestAuth_Authenticate(t *testing.T) {
expect: output{
user: models.User{
Username: "jt",
+ GroupIDs: groupIDs,
},
err: nil,
},
@@ -92,6 +108,7 @@ func TestAuth_Authenticate(t *testing.T) {
expect: output{
user: models.User{
Username: "Admin@vsphere.local",
+ GroupIDs: groupIDs,
// Email: "Admin@placeholder.com",
// Password: pwd,
// Comment: fmt.Sprintf(cmtTmpl, path.Join(mockSvr.URL, "/test/login")),
@@ -137,7 +154,7 @@ func TestAuth_PostAuthenticate(t *testing.T) {
},
expect: models.User{
Username: "jt",
- Email: "jt@placeholder.com",
+ Email: "",
Realname: "jt",
Password: pwd,
Comment: userEntryComment,
@@ -165,3 +182,19 @@ func TestAuth_PostAuthenticate(t *testing.T) {
}
}
+
+func TestAuth_OnBoardGroup(t *testing.T) {
+ input := &models.UserGroup{
+ GroupName: "OnBoardTest",
+ GroupType: common.HTTPGroupType,
+ }
+ a.OnBoardGroup(input, "")
+
+ assert.True(t, input.ID > 0, "The OnBoardGroup should have a valid group ID")
+
+ emptyGroup := &models.UserGroup{}
+ err := a.OnBoardGroup(emptyGroup, "")
+ if err == nil {
+ t.Fatal("Empty user group should failed to OnBoard")
+ }
+}
diff --git a/src/core/auth/authproxy/test/server.go b/src/core/auth/authproxy/test/server.go
index b11ec17aa..6fead0196 100644
--- a/src/core/auth/authproxy/test/server.go
+++ b/src/core/auth/authproxy/test/server.go
@@ -41,9 +41,20 @@ func (ah *authHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
}
}
+type reviewTokenHandler struct {
+}
+
+func (rth *reviewTokenHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if req.Method != http.MethodPost {
+ http.Error(rw, "", http.StatusMethodNotAllowed)
+ }
+ rw.Write([]byte(`{"apiVersion": "authentication.k8s.io/v1beta1", "kind": "TokenReview", "status": {"authenticated": true, "user": {"username": "administrator@vsphere.local", "groups": ["vsphere.local\\users", "vsphere.local\\administrators", "vsphere.local\\caadmins", "vsphere.local\\systemconfiguration.bashshelladministrators", "vsphere.local\\systemconfiguration.administrators", "vsphere.local\\licenseservice.administrators", "vsphere.local\\everyone"], "extra": {"method": ["basic"]}}}}`))
+}
+
// NewMockServer creates the mock server for testing
func NewMockServer(creds map[string]string) *httptest.Server {
mux := http.NewServeMux()
mux.Handle("/test/login", &authHandler{m: creds})
+ mux.Handle("/test/tokenreview", &reviewTokenHandler{})
return httptest.NewTLSServer(mux)
}
diff --git a/src/core/auth/ldap/ldap.go b/src/core/auth/ldap/ldap.go
index 4d8b63fdf..c5fd86d29 100644
--- a/src/core/auth/ldap/ldap.go
+++ b/src/core/auth/ldap/ldap.go
@@ -20,11 +20,11 @@ import (
"strings"
"github.com/goharbor/harbor/src/common"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/utils"
goldap "gopkg.in/ldap.v2"
- "github.com/goharbor/harbor/src/common/dao"
- "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/models"
ldapUtils "github.com/goharbor/harbor/src/common/utils/ldap"
"github.com/goharbor/harbor/src/common/utils/log"
@@ -79,7 +79,7 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
u.Username = ldapUsers[0].Username
u.Email = strings.TrimSpace(ldapUsers[0].Email)
u.Realname = ldapUsers[0].Realname
- userGroups := make([]*models.UserGroup, 0)
+ ugIDs := []int{}
dn := ldapUsers[0].DN
if err = ldapSession.Bind(dn, m.Password); err != nil {
@@ -95,6 +95,7 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
for _, groupDN := range ldapUsers[0].GroupDNList {
groupDN = utils.TrimLower(groupDN)
+ // Attach LDAP group admin
if len(groupAdminDN) > 0 && groupAdminDN == groupDN {
u.HasAdminRole = true
}
@@ -103,16 +104,16 @@ func (l *Auth) Authenticate(m models.AuthModel) (*models.User, error) {
GroupType: 1,
LdapGroupDN: groupDN,
}
- userGroupList, err := group.QueryUserGroup(userGroupQuery)
+ userGroups, err := group.QueryUserGroup(userGroupQuery)
if err != nil {
continue
}
- if len(userGroupList) == 0 {
+ if len(userGroups) == 0 {
continue
}
- userGroups = append(userGroups, userGroupList[0])
+ ugIDs = append(ugIDs, userGroups[0].ID)
}
- u.GroupList = userGroups
+ u.GroupIDs = ugIDs
return &u, nil
}
@@ -123,8 +124,6 @@ func (l *Auth) OnBoardUser(u *models.User) error {
if u.Email == "" {
if strings.Contains(u.Username, "@") {
u.Email = u.Username
- } else {
- u.Email = u.Username + "@placeholder.com"
}
}
u.Password = "12345678AbC" // Password is not kept in local db
@@ -204,7 +203,7 @@ func (l *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error {
if len(altGroupName) > 0 {
u.GroupName = altGroupName
}
- u.GroupType = common.LdapGroupType
+ u.GroupType = common.LDAPGroupType
// Check duplicate LDAP DN in usergroup, if usergroup exist, return error
userGroupList, err := group.QueryUserGroup(models.UserGroup{LdapGroupDN: u.LdapGroupDN})
if err != nil {
@@ -213,7 +212,7 @@ func (l *Auth) OnBoardGroup(u *models.UserGroup, altGroupName string) error {
if len(userGroupList) > 0 {
return auth.ErrDuplicateLDAPGroup
}
- return group.OnBoardUserGroup(u, "LdapGroupDN", "GroupType")
+ return group.OnBoardUserGroup(u)
}
// PostAuthenticate -- If user exist in harbor DB, sync email address, if not exist, call OnBoardUser
diff --git a/src/core/auth/ldap/ldap_test.go b/src/core/auth/ldap/ldap_test.go
index 5eb852fbe..9002bd8bf 100644
--- a/src/core/auth/ldap/ldap_test.go
+++ b/src/core/auth/ldap/ldap_test.go
@@ -55,7 +55,7 @@ var ldapTestConfig = map[string]interface{}{
common.LDAPGroupBaseDN: "dc=example,dc=com",
common.LDAPGroupAttributeName: "cn",
common.LDAPGroupSearchScope: 2,
- common.LdapGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com",
+ common.LDAPGroupAdminDn: "cn=harbor_users,ou=groups,dc=example,dc=com",
}
func TestMain(m *testing.M) {
@@ -92,8 +92,8 @@ func TestMain(m *testing.M) {
"delete from user_group",
"delete from project_member",
}
- dao.PrepareTestData(clearSqls, initSqls)
-
+ dao.ExecuteBatchSQL(initSqls)
+ defer dao.ExecuteBatchSQL(clearSqls)
retCode := m.Run()
os.Exit(retCode)
}
@@ -224,7 +224,7 @@ func TestOnBoardUser_02(t *testing.T) {
t.Errorf("Failed to onboard user")
}
- assert.Equal(t, "sample02@placeholder.com", user.Email)
+ assert.Equal(t, "", user.Email)
dao.CleanUser(int64(user.UserID))
}
@@ -405,6 +405,7 @@ func TestAddProjectMemberWithLdapGroup(t *testing.T) {
ProjectID: currentProject.ProjectID,
MemberGroup: models.UserGroup{
LdapGroupDN: "cn=harbor_users,ou=groups,dc=example,dc=com",
+ GroupType: 1,
},
Role: models.PROJECTADMIN,
}
diff --git a/src/core/auth/uaa/uaa.go b/src/core/auth/uaa/uaa.go
index b4889302c..8ca250fc1 100644
--- a/src/core/auth/uaa/uaa.go
+++ b/src/core/auth/uaa/uaa.go
@@ -77,9 +77,8 @@ func fillEmailRealName(user *models.User) {
if len(user.Realname) == 0 {
user.Realname = user.Username
}
- if len(user.Email) == 0 {
- // TODO: handle the case when user.Username itself is an email address.
- user.Email = user.Username + "@uaa.placeholder"
+ if len(user.Email) == 0 && strings.Contains(user.Username, "@") {
+ user.Email = user.Username
}
}
diff --git a/src/core/auth/uaa/uaa_test.go b/src/core/auth/uaa/uaa_test.go
index 7b0ff9ea9..a62bd7d7d 100644
--- a/src/core/auth/uaa/uaa_test.go
+++ b/src/core/auth/uaa/uaa_test.go
@@ -110,7 +110,7 @@ func TestOnBoardUser(t *testing.T) {
user, _ := dao.GetUser(models.User{Username: "test"})
assert.Equal("test", user.Realname)
assert.Equal("test", user.Username)
- assert.Equal("test@uaa.placeholder", user.Email)
+ assert.Equal("", user.Email)
err3 := dao.ClearTable(models.UserTable)
assert.Nil(err3)
}
@@ -128,7 +128,7 @@ func TestPostAuthenticate(t *testing.T) {
}
assert.Nil(err)
user, _ := dao.GetUser(models.User{Username: "test"})
- assert.Equal("test@uaa.placeholder", user.Email)
+ assert.Equal("", user.Email)
um2.Email = "newEmail@new.com"
um2.Realname = "newName"
err2 := auth.PostAuthenticate(um2)
@@ -145,7 +145,7 @@ func TestPostAuthenticate(t *testing.T) {
assert.Nil(err3)
user3, _ := dao.GetUser(models.User{Username: "test"})
assert.Equal(user3.UserID, um3.UserID)
- assert.Equal("test@uaa.placeholder", user3.Email)
+ assert.Equal("", user3.Email)
assert.Equal("test", user3.Realname)
err4 := dao.ClearTable(models.UserTable)
assert.Nil(err4)
diff --git a/src/core/config/config.go b/src/core/config/config.go
old mode 100644
new mode 100755
index 31ed8cadc..b3808745d
--- a/src/core/config/config.go
+++ b/src/core/config/config.go
@@ -224,7 +224,7 @@ func LDAPGroupConf() (*models.LdapGroupConf, error) {
LdapGroupFilter: cfgMgr.Get(common.LDAPGroupSearchFilter).GetString(),
LdapGroupNameAttribute: cfgMgr.Get(common.LDAPGroupAttributeName).GetString(),
LdapGroupSearchScope: cfgMgr.Get(common.LDAPGroupSearchScope).GetInt(),
- LdapGroupAdminDN: cfgMgr.Get(common.LdapGroupAdminDn).GetString(),
+ LdapGroupAdminDN: cfgMgr.Get(common.LDAPGroupAdminDn).GetString(),
LdapGroupMembershipAttribute: cfgMgr.Get(common.LDAPGroupMembershipAttribute).GetString(),
}, nil
}
@@ -280,7 +280,11 @@ func InternalJobServiceURL() string {
// InternalCoreURL returns the local harbor core url
func InternalCoreURL() string {
return strings.TrimSuffix(cfgMgr.Get(common.CoreURL).GetString(), "/")
+}
+// LocalCoreURL returns the local harbor core url
+func LocalCoreURL() string {
+ return cfgMgr.Get(common.CoreLocalURL).GetString()
}
// InternalTokenServiceEndpoint returns token service endpoint for internal communication between Harbor containers
@@ -327,12 +331,14 @@ func Database() (*models.Database, error) {
database := &models.Database{}
database.Type = cfgMgr.Get(common.DatabaseType).GetString()
postgresql := &models.PostGreSQL{
- Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
- Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
- Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
- Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
- Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
- SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
+ Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
+ Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
+ Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
+ Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
+ Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
+ SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
+ MaxIdleConns: cfgMgr.Get(common.PostGreSQLMaxIdleConns).GetInt(),
+ MaxOpenConns: cfgMgr.Get(common.PostGreSQLMaxOpenConns).GetInt(),
}
database.PostGreSQL = postgresql
@@ -482,7 +488,7 @@ func HTTPAuthProxySetting() (*models.HTTPAuthProxy, error) {
Endpoint: cfgMgr.Get(common.HTTPAuthProxyEndpoint).GetString(),
TokenReviewEndpoint: cfgMgr.Get(common.HTTPAuthProxyTokenReviewEndpoint).GetString(),
VerifyCert: cfgMgr.Get(common.HTTPAuthProxyVerifyCert).GetBool(),
- AlwaysOnBoard: cfgMgr.Get(common.HTTPAuthProxyAlwaysOnboard).GetBool(),
+ SkipSearch: cfgMgr.Get(common.HTTPAuthProxySkipSearch).GetBool(),
}, nil
}
@@ -510,3 +516,24 @@ func OIDCSetting() (*models.OIDCSetting, error) {
Scope: scope,
}, nil
}
+
+// NotificationEnable returns a bool to indicates if notification enabled in harbor
+func NotificationEnable() bool {
+ return cfgMgr.Get(common.NotificationEnable).GetBool()
+}
+
+// QuotaPerProjectEnable returns a bool to indicates if quota per project enabled in harbor
+func QuotaPerProjectEnable() bool {
+ return cfgMgr.Get(common.QuotaPerProjectEnable).GetBool()
+}
+
+// QuotaSetting returns the setting of quota.
+func QuotaSetting() (*models.QuotaSetting, error) {
+ if err := cfgMgr.Load(); err != nil {
+ return nil, err
+ }
+ return &models.QuotaSetting{
+ CountPerProject: cfgMgr.Get(common.CountPerProject).GetInt64(),
+ StoragePerProject: cfgMgr.Get(common.StoragePerProject).GetInt64(),
+ }, nil
+}
diff --git a/src/core/config/config_test.go b/src/core/config/config_test.go
index 89561778d..ae31c04bc 100644
--- a/src/core/config/config_test.go
+++ b/src/core/config/config_test.go
@@ -21,6 +21,7 @@ import (
"testing"
"fmt"
+
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
@@ -207,6 +208,10 @@ func TestConfig(t *testing.T) {
assert.Equal("http://myjob:8888", InternalJobServiceURL())
assert.Equal("http://myui:8888/service/token", InternalTokenServiceEndpoint())
+ localCoreURL := LocalCoreURL()
+ assert.Equal("http://127.0.0.1:8080", localCoreURL)
+
+ assert.True(NotificationEnable())
}
func currPath() string {
@@ -228,17 +233,17 @@ func TestConfigureValue_GetMap(t *testing.T) {
func TestHTTPAuthProxySetting(t *testing.T) {
m := map[string]interface{}{
- common.HTTPAuthProxyAlwaysOnboard: "true",
- common.HTTPAuthProxyVerifyCert: "true",
- common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix",
+ common.HTTPAuthProxySkipSearch: "true",
+ common.HTTPAuthProxyVerifyCert: "true",
+ common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix",
}
InitWithSettings(m)
v, e := HTTPAuthProxySetting()
assert.Nil(t, e)
assert.Equal(t, *v, models.HTTPAuthProxy{
- Endpoint: "https://auth.proxy/suffix",
- AlwaysOnBoard: true,
- VerifyCert: true,
+ Endpoint: "https://auth.proxy/suffix",
+ SkipSearch: true,
+ VerifyCert: true,
})
}
diff --git a/src/core/controllers/controllers_test.go b/src/core/controllers/controllers_test.go
index 1381a26d3..f38517ebc 100644
--- a/src/core/controllers/controllers_test.go
+++ b/src/core/controllers/controllers_test.go
@@ -32,7 +32,7 @@ import (
"github.com/goharbor/harbor/src/common/models"
utilstest "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/core/config"
- "github.com/goharbor/harbor/src/core/proxy"
+ "github.com/goharbor/harbor/src/core/middlewares"
"github.com/stretchr/testify/assert"
)
@@ -102,8 +102,9 @@ func TestRedirectForOIDC(t *testing.T) {
// TestMain is a sample to run an endpoint test
func TestAll(t *testing.T) {
config.InitWithSettings(utilstest.GetUnitTestConfig())
- proxy.Init()
assert := assert.New(t)
+ err := middlewares.Init()
+ assert.Nil(err)
r, _ := http.NewRequest("POST", "/c/login", nil)
w := httptest.NewRecorder()
diff --git a/src/core/controllers/oidc.go b/src/core/controllers/oidc.go
index 1479b8e5a..903b99954 100644
--- a/src/core/controllers/oidc.go
+++ b/src/core/controllers/oidc.go
@@ -17,6 +17,9 @@ package controllers
import (
"encoding/json"
"fmt"
+ "net/http"
+ "strings"
+
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/models"
@@ -26,8 +29,6 @@ import (
"github.com/goharbor/harbor/src/core/api"
"github.com/goharbor/harbor/src/core/config"
"github.com/pkg/errors"
- "net/http"
- "strings"
)
const tokenKey = "oidc_token"
@@ -189,9 +190,6 @@ func (oc *OIDCController) Onboard() {
}
email := d.Email
- if email == "" {
- email = utils.GenerateRandomString() + "@placeholder.com"
- }
user := models.User{
Username: username,
Realname: d.Username,
diff --git a/src/core/controllers/proxy.go b/src/core/controllers/proxy.go
index 1ddaf9ca7..a8fe916ba 100644
--- a/src/core/controllers/proxy.go
+++ b/src/core/controllers/proxy.go
@@ -2,7 +2,7 @@ package controllers
import (
"github.com/astaxie/beego"
- "github.com/goharbor/harbor/src/core/proxy"
+ "github.com/goharbor/harbor/src/core/middlewares"
)
// RegistryProxy is the endpoint on UI for a reverse proxy pointing to registry
@@ -14,7 +14,7 @@ type RegistryProxy struct {
func (p *RegistryProxy) Handle() {
req := p.Ctx.Request
rw := p.Ctx.ResponseWriter
- proxy.Handle(rw, req)
+ middlewares.Handle(rw, req)
}
// Render ...
diff --git a/src/core/filter/security.go b/src/core/filter/security.go
index 54d5d4523..34f7310a5 100644
--- a/src/core/filter/security.go
+++ b/src/core/filter/security.go
@@ -41,15 +41,7 @@ import (
"github.com/goharbor/harbor/src/core/promgr/pmsdriver/admiral"
"strings"
- "encoding/json"
- k8s_api_v1beta1 "k8s.io/api/authentication/v1beta1"
- "k8s.io/client-go/kubernetes/scheme"
- "k8s.io/client-go/rest"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/runtime/serializer"
+ "github.com/goharbor/harbor/src/pkg/authproxy"
)
// ContextValueKey for content value
@@ -229,8 +221,10 @@ type oidcCliReqCtxModifier struct{}
func (oc *oidcCliReqCtxModifier) Modify(ctx *beegoctx.Context) bool {
path := ctx.Request.URL.Path
- if path != "/service/token" && !strings.HasPrefix(path, "/chartrepo/") {
- log.Debug("OIDC CLI modifer only handles request by docker CLI or helm CLI")
+ if path != "/service/token" &&
+ !strings.HasPrefix(path, "/chartrepo/") &&
+ !strings.HasPrefix(path, "/api/chartrepo/") {
+ log.Debug("OIDC CLI modifier only handles request by docker CLI or helm CLI")
return false
}
if ctx.Request.Context().Value(AuthModeKey).(string) != common.OIDCAuth {
@@ -319,60 +313,17 @@ func (ap *authProxyReqCtxModifier) Modify(ctx *beegoctx.Context) bool {
log.Errorf("User name %s doesn't meet the auth proxy name pattern", proxyUserName)
return false
}
-
httpAuthProxyConf, err := config.HTTPAuthProxySetting()
if err != nil {
log.Errorf("fail to get auth proxy settings, %v", err)
return false
}
-
- // Init auth client with the auth proxy endpoint.
- authClientCfg := &rest.Config{
- Host: httpAuthProxyConf.TokenReviewEndpoint,
- ContentConfig: rest.ContentConfig{
- GroupVersion: &schema.GroupVersion{},
- NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs},
- },
- BearerToken: proxyPwd,
- TLSClientConfig: rest.TLSClientConfig{
- Insecure: !httpAuthProxyConf.VerifyCert,
- },
- }
- authClient, err := rest.RESTClientFor(authClientCfg)
+ tokenReviewResponse, err := authproxy.TokenReview(proxyPwd, httpAuthProxyConf)
if err != nil {
- log.Errorf("fail to create auth client, %v", err)
+ log.Errorf("fail to review token, %v", err)
return false
}
- // Do auth with the token.
- tokenReviewRequest := &k8s_api_v1beta1.TokenReview{
- TypeMeta: metav1.TypeMeta{
- Kind: "TokenReview",
- APIVersion: "authentication.k8s.io/v1beta1",
- },
- Spec: k8s_api_v1beta1.TokenReviewSpec{
- Token: proxyPwd,
- },
- }
- res := authClient.Post().Body(tokenReviewRequest).Do()
- err = res.Error()
- if err != nil {
- log.Errorf("fail to POST auth request, %v", err)
- return false
- }
- resRaw, err := res.Raw()
- if err != nil {
- log.Errorf("fail to get raw data of token review, %v", err)
- return false
- }
-
- // Parse the auth response, check the user name and authenticated status.
- tokenReviewResponse := &k8s_api_v1beta1.TokenReview{}
- err = json.Unmarshal(resRaw, &tokenReviewResponse)
- if err != nil {
- log.Errorf("fail to decode token review, %v", err)
- return false
- }
if !tokenReviewResponse.Status.Authenticated {
log.Errorf("fail to auth user: %s", rawUserName)
return false
diff --git a/src/core/filter/security_test.go b/src/core/filter/security_test.go
index 17307efab..a74d2fa12 100644
--- a/src/core/filter/security_test.go
+++ b/src/core/filter/security_test.go
@@ -16,8 +16,6 @@ package filter
import (
"context"
- "github.com/goharbor/harbor/src/common/utils/oidc"
- "github.com/stretchr/testify/require"
"log"
"net/http"
"net/http/httptest"
@@ -27,6 +25,9 @@ import (
"testing"
"time"
+ "github.com/goharbor/harbor/src/common/utils/oidc"
+ "github.com/stretchr/testify/require"
+
"github.com/astaxie/beego"
beegoctx "github.com/astaxie/beego/context"
"github.com/astaxie/beego/session"
@@ -241,7 +242,7 @@ func TestAuthProxyReqCtxModifier(t *testing.T) {
defer server.Close()
c := map[string]interface{}{
- common.HTTPAuthProxyAlwaysOnboard: "true",
+ common.HTTPAuthProxySkipSearch: "true",
common.HTTPAuthProxyVerifyCert: "false",
common.HTTPAuthProxyEndpoint: "https://auth.proxy/suffix",
common.HTTPAuthProxyTokenReviewEndpoint: server.URL,
@@ -253,7 +254,7 @@ func TestAuthProxyReqCtxModifier(t *testing.T) {
assert.Nil(t, e)
assert.Equal(t, *v, models.HTTPAuthProxy{
Endpoint: "https://auth.proxy/suffix",
- AlwaysOnBoard: true,
+ SkipSearch: true,
VerifyCert: false,
TokenReviewEndpoint: server.URL,
})
diff --git a/src/core/main.go b/src/core/main.go
old mode 100644
new mode 100755
index 2c68141dd..412ab25d5
--- a/src/core/main.go
+++ b/src/core/main.go
@@ -17,15 +17,12 @@ package main
import (
"encoding/gob"
"fmt"
- "os"
- "os/signal"
- "strconv"
- "syscall"
-
"github.com/astaxie/beego"
_ "github.com/astaxie/beego/session/redis"
"github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/models"
+ common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
@@ -33,10 +30,23 @@ import (
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
_ "github.com/goharbor/harbor/src/core/auth/uaa"
+ "os"
+ "os/signal"
+ "strconv"
+ "syscall"
+
+ quota "github.com/goharbor/harbor/src/core/api/quota"
+ _ "github.com/goharbor/harbor/src/core/api/quota/chart"
+ _ "github.com/goharbor/harbor/src/core/api/quota/registry"
+
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
- "github.com/goharbor/harbor/src/core/proxy"
+ "github.com/goharbor/harbor/src/core/middlewares"
+ _ "github.com/goharbor/harbor/src/core/notifier/topic"
"github.com/goharbor/harbor/src/core/service/token"
+ "github.com/goharbor/harbor/src/pkg/notification"
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+ "github.com/goharbor/harbor/src/pkg/types"
"github.com/goharbor/harbor/src/replication"
)
@@ -70,6 +80,64 @@ func updateInitPassword(userID int, password string) error {
return nil
}
+// Quota migration
+func quotaSync() error {
+ usages, err := dao.ListQuotaUsages()
+ if err != nil {
+ log.Errorf("list quota usage error, %v", err)
+ return err
+ }
+ projects, err := dao.GetProjects(nil)
+ if err != nil {
+ log.Errorf("list project error, %v", err)
+ return err
+ }
+
+ // The condition handles these two cases:
+ // 1, len(project) > 1 && len(usages) == 1. existing projects without usage, as we do always has 'library' usage in DB.
+ // 2, migration fails at the phase of inserting usage into DB, and parts of them are inserted successfully.
+ if len(projects) != len(usages) {
+ log.Info("Start to sync quota data .....")
+ if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
+ log.Errorf("Fail to sync quota data, %v", err)
+ return err
+ }
+ log.Info("Success to sync quota data .....")
+ return nil
+ }
+
+ // Only has one project without usage
+ zero := common_quota.ResourceList{
+ common_quota.ResourceCount: 0,
+ common_quota.ResourceStorage: 0,
+ }
+ if len(projects) == 1 && len(usages) == 1 {
+ totalRepo, err := dao.GetTotalOfRepositories()
+ if totalRepo == 0 {
+ return nil
+ }
+ refID, err := strconv.ParseInt(usages[0].ReferenceID, 10, 64)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ usedRes, err := types.NewResourceList(usages[0].Used)
+ if err != nil {
+ log.Error(err)
+ return err
+ }
+ if types.Equals(usedRes, zero) && refID == projects[0].ProjectID {
+ log.Info("Start to sync quota data .....")
+ if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
+ log.Errorf("Fail to sync quota data, %v", err)
+ return err
+ }
+ log.Info("Success to sync quota data .....")
+ }
+ }
+ return nil
+}
+
func gracefulShutdown(closing chan struct{}) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
@@ -106,6 +174,11 @@ func main() {
log.Fatalf("failed to load config: %v", err)
}
+ // init the jobservice client
+ job.Init()
+ // init the scheduler
+ scheduler.Init()
+
password, err := config.InitialAdminPassword()
if err != nil {
log.Fatalf("failed to get admin's initia password: %v", err)
@@ -135,6 +208,9 @@ func main() {
log.Fatalf("failed to init for replication: %v", err)
}
+ log.Info("initializing notification...")
+ notification.Init()
+
filter.Init()
beego.InsertFilter("/*", beego.BeforeRouter, filter.SecurityFilter)
beego.InsertFilter("/*", beego.BeforeRouter, filter.ReadonlyFilter)
@@ -158,7 +234,13 @@ func main() {
}
log.Info("Init proxy")
- proxy.Init()
- // go proxy.StartProxy()
+ if err := middlewares.Init(); err != nil {
+ log.Fatalf("init proxy error, %v", err)
+ }
+
+ if err := quotaSync(); err != nil {
+ log.Fatalf("quota migration error, %v", err)
+ }
+
beego.Run()
}
diff --git a/src/core/middlewares/chain.go b/src/core/middlewares/chain.go
new file mode 100644
index 000000000..822dc0c63
--- /dev/null
+++ b/src/core/middlewares/chain.go
@@ -0,0 +1,75 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middlewares
+
+import (
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/chart"
+ "github.com/goharbor/harbor/src/core/middlewares/contenttrust"
+ "github.com/goharbor/harbor/src/core/middlewares/countquota"
+ "github.com/goharbor/harbor/src/core/middlewares/listrepo"
+ "github.com/goharbor/harbor/src/core/middlewares/multiplmanifest"
+ "github.com/goharbor/harbor/src/core/middlewares/readonly"
+ "github.com/goharbor/harbor/src/core/middlewares/sizequota"
+ "github.com/goharbor/harbor/src/core/middlewares/url"
+ "github.com/goharbor/harbor/src/core/middlewares/vulnerable"
+ "github.com/justinas/alice"
+)
+
+// DefaultCreator ...
+type DefaultCreator struct {
+ middlewares []string
+}
+
+// New ...
+func New(middlewares []string) *DefaultCreator {
+ return &DefaultCreator{
+ middlewares: middlewares,
+ }
+}
+
+// Create creates a middleware chain ...
+func (b *DefaultCreator) Create() *alice.Chain {
+ chain := alice.New()
+ for _, mName := range b.middlewares {
+ middlewareName := mName
+ chain = chain.Append(func(next http.Handler) http.Handler {
+ constructor := b.geMiddleware(middlewareName)
+ if constructor == nil {
+ log.Errorf("cannot init middle %s", middlewareName)
+ return nil
+ }
+ return constructor(next)
+ })
+ }
+ return &chain
+}
+
+func (b *DefaultCreator) geMiddleware(mName string) alice.Constructor {
+ middlewares := map[string]alice.Constructor{
+ CHART: func(next http.Handler) http.Handler { return chart.New(next) },
+ READONLY: func(next http.Handler) http.Handler { return readonly.New(next) },
+ URL: func(next http.Handler) http.Handler { return url.New(next) },
+ MUITIPLEMANIFEST: func(next http.Handler) http.Handler { return multiplmanifest.New(next) },
+ LISTREPO: func(next http.Handler) http.Handler { return listrepo.New(next) },
+ CONTENTTRUST: func(next http.Handler) http.Handler { return contenttrust.New(next) },
+ VULNERABLE: func(next http.Handler) http.Handler { return vulnerable.New(next) },
+ SIZEQUOTA: func(next http.Handler) http.Handler { return sizequota.New(next) },
+ COUNTQUOTA: func(next http.Handler) http.Handler { return countquota.New(next) },
+ }
+ return middlewares[mName]
+}
diff --git a/src/core/middlewares/chart/builder.go b/src/core/middlewares/chart/builder.go
new file mode 100644
index 000000000..ba54cd2de
--- /dev/null
+++ b/src/core/middlewares/chart/builder.go
@@ -0,0 +1,134 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+var (
+ deleteChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/(?P\w+)/(?P[\w\d\.]+)/?$`)
+ createChartVersionRe = regexp.MustCompile(`^/api/chartrepo/(?P\w+)/charts/?$`)
+)
+
+var (
+ defaultBuilders = []interceptor.Builder{
+ &chartVersionDeletionBuilder{},
+ &chartVersionCreationBuilder{},
+ }
+)
+
+type chartVersionDeletionBuilder struct{}
+
+func (*chartVersionDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if req.Method != http.MethodDelete {
+ return nil, nil
+ }
+
+ matches := deleteChartVersionRe.FindStringSubmatch(req.URL.String())
+ if len(matches) <= 1 {
+ return nil, nil
+ }
+
+ namespace, chartName, version := matches[1], matches[2], matches[3]
+
+ project, err := dao.GetProjectByName(namespace)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err)
+ }
+ if project == nil {
+ return nil, fmt.Errorf("project %s not found", namespace)
+ }
+
+ info := &util.ChartVersionInfo{
+ ProjectID: project.ProjectID,
+ Namespace: namespace,
+ ChartName: chartName,
+ Version: version,
+ }
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
+ quota.WithAction(quota.SubtractAction),
+ quota.StatusCode(http.StatusOK),
+ quota.MutexKeys(info.MutexKey()),
+ quota.Resources(types.ResourceList{types.ResourceCount: 1}),
+ }
+
+ return quota.New(opts...), nil
+}
+
+type chartVersionCreationBuilder struct{}
+
+func (*chartVersionCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if req.Method != http.MethodPost {
+ return nil, nil
+ }
+
+ matches := createChartVersionRe.FindStringSubmatch(req.URL.String())
+ if len(matches) <= 1 {
+ return nil, nil
+ }
+
+ namespace := matches[1]
+
+ project, err := dao.GetProjectByName(namespace)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get project %s, error: %v", namespace, err)
+ }
+ if project == nil {
+ return nil, fmt.Errorf("project %s not found", namespace)
+ }
+
+ info, ok := util.ChartVersionInfoFromContext(req.Context())
+ if !ok {
+ chart, err := parseChart(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse chart from body, error: %v", err)
+ }
+ chartName, version := chart.Metadata.Name, chart.Metadata.Version
+
+ info = &util.ChartVersionInfo{
+ ProjectID: project.ProjectID,
+ Namespace: namespace,
+ ChartName: chartName,
+ Version: version,
+ }
+ // Chart version info will be used by computeQuotaForUpload
+ *req = *req.WithContext(util.NewChartVersionInfoContext(req.Context(), info))
+ }
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(project.ProjectID, 10)),
+ quota.WithAction(quota.AddAction),
+ quota.StatusCode(http.StatusCreated),
+ quota.MutexKeys(info.MutexKey()),
+ quota.OnResources(computeResourcesForChartVersionCreation),
+ }
+
+ return quota.New(opts...), nil
+}
diff --git a/src/core/middlewares/chart/handler.go b/src/core/middlewares/chart/handler.go
new file mode 100644
index 000000000..dd1fa583b
--- /dev/null
+++ b/src/core/middlewares/chart/handler.go
@@ -0,0 +1,83 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+)
+
+type chartHandler struct {
+ builders []interceptor.Builder
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
+ if len(builders) == 0 {
+ builders = defaultBuilders
+ }
+
+ return &chartHandler{
+ builders: builders,
+ next: next,
+ }
+}
+
+// ServeHTTP manifest ...
+func (h *chartHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ interceptor, err := h.getInterceptor(req)
+ if err != nil {
+ http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)),
+ http.StatusInternalServerError)
+ return
+ }
+
+ if interceptor == nil {
+ h.next.ServeHTTP(rw, req)
+ return
+ }
+
+ if err := interceptor.HandleRequest(req); err != nil {
+ log.Warningf("Error occurred when to handle request in count quota handler: %v", err)
+ http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in chart count quota handler: %v", err)),
+ http.StatusInternalServerError)
+ return
+ }
+
+ w := util.NewCustomResponseWriter(rw)
+ h.next.ServeHTTP(w, req)
+
+ interceptor.HandleResponse(w, req)
+}
+
+func (h *chartHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
+ for _, builder := range h.builders {
+ interceptor, err := builder.Build(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if interceptor != nil {
+ return interceptor, nil
+ }
+ }
+
+ return nil, nil
+}
diff --git a/src/core/middlewares/chart/handler_test.go b/src/core/middlewares/chart/handler_test.go
new file mode 100644
index 000000000..aedf1218e
--- /dev/null
+++ b/src/core/middlewares/chart/handler_test.go
@@ -0,0 +1,137 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "testing"
+
+ "github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+ htesting "github.com/goharbor/harbor/src/testing"
+ "github.com/stretchr/testify/suite"
+)
+
+func deleteChartVersion(projectName, chartName, version string) {
+ url := fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", projectName, chartName, version)
+ req, _ := http.NewRequest(http.MethodDelete, url, nil)
+
+ next := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ })
+
+ rr := httptest.NewRecorder()
+ h := New(next)
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
+}
+
+func uploadChartVersion(projectID int64, projectName, chartName, version string) {
+ url := fmt.Sprintf("/api/chartrepo/%s/charts/", projectName)
+ req, _ := http.NewRequest(http.MethodPost, url, nil)
+
+ info := &util.ChartVersionInfo{
+ ProjectID: projectID,
+ Namespace: projectName,
+ ChartName: chartName,
+ Version: version,
+ }
+ *req = *req.WithContext(util.NewChartVersionInfoContext(req.Context(), info))
+
+ next := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ })
+
+ rr := httptest.NewRecorder()
+ h := New(next)
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
+}
+
+func mockChartController() (*httptest.Server, *chartserver.Controller, error) {
+ mockServer := httptest.NewServer(htesting.MockChartRepoHandler)
+
+ var oldController, newController *chartserver.Controller
+ url, err := url.Parse(mockServer.URL)
+ if err == nil {
+ newController, err = chartserver.NewController(url)
+ }
+
+ if err != nil {
+ mockServer.Close()
+ return nil, nil, err
+ }
+
+ chartController() // Init chart controller
+
+ // Override current controller and keep the old one for restoring
+ oldController = controller
+ controller = newController
+
+ return mockServer, oldController, nil
+}
+
+type HandlerSuite struct {
+ htesting.Suite
+ oldController *chartserver.Controller
+ mockChartServer *httptest.Server
+}
+
+func (suite *HandlerSuite) SetupTest() {
+ mockServer, oldController, err := mockChartController()
+ suite.Nil(err, "Mock chart controller failed")
+
+ suite.oldController = oldController
+ suite.mockChartServer = mockServer
+}
+
+func (suite *HandlerSuite) TearDownTest() {
+ for _, table := range []string{
+ "quota", "quota_usage",
+ } {
+ dao.ClearTable(table)
+ }
+
+ controller = suite.oldController
+ suite.mockChartServer.Close()
+}
+
+func (suite *HandlerSuite) TestUpload() {
+ suite.WithProject(func(projectID int64, projectName string) {
+ uploadChartVersion(projectID, projectName, "harbor", "0.2.1")
+ suite.AssertResourceUsage(1, types.ResourceCount, projectID)
+
+ // harbor:0.2.0 exists in repo1, upload it again
+ uploadChartVersion(projectID, projectName, "harbor", "0.2.0")
+ suite.AssertResourceUsage(1, types.ResourceCount, projectID)
+ }, "repo1")
+}
+
+func (suite *HandlerSuite) TestDelete() {
+ suite.WithProject(func(projectID int64, projectName string) {
+ uploadChartVersion(projectID, projectName, "harbor", "0.2.1")
+ suite.AssertResourceUsage(1, types.ResourceCount, projectID)
+
+ deleteChartVersion(projectName, "harbor", "0.2.1")
+ suite.AssertResourceUsage(0, types.ResourceCount, projectID)
+ }, "repo1")
+}
+
+func TestRunHandlerSuite(t *testing.T) {
+ suite.Run(t, new(HandlerSuite))
+}
diff --git a/src/core/middlewares/chart/util.go b/src/core/middlewares/chart/util.go
new file mode 100644
index 000000000..03b899498
--- /dev/null
+++ b/src/core/middlewares/chart/util.go
@@ -0,0 +1,116 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package chart
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+
+ "github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "k8s.io/helm/pkg/chartutil"
+ "k8s.io/helm/pkg/proto/hapi/chart"
+)
+
+const (
+ formFieldNameForChart = "chart"
+)
+
+var (
+ controller *chartserver.Controller
+ controllerErr error
+ controllerOnce sync.Once
+)
+
+func chartController() (*chartserver.Controller, error) {
+ controllerOnce.Do(func() {
+ addr, err := config.GetChartMuseumEndpoint()
+ if err != nil {
+ controllerErr = fmt.Errorf("failed to get the endpoint URL of chart storage server: %s", err.Error())
+ return
+ }
+
+ addr = strings.TrimSuffix(addr, "/")
+ url, err := url.Parse(addr)
+ if err != nil {
+ controllerErr = errors.New("endpoint URL of chart storage server is malformed")
+ return
+ }
+
+ ctr, err := chartserver.NewController(url)
+ if err != nil {
+ controllerErr = errors.New("failed to initialize chart API controller")
+ }
+
+ controller = ctr
+
+ log.Debugf("Chart storage server is set to %s", url.String())
+ log.Info("API controller for chart repository server is successfully initialized")
+ })
+
+ return controller, controllerErr
+}
+
+func chartVersionExists(namespace, chartName, version string) bool {
+ ctr, err := chartController()
+ if err != nil {
+ return false
+ }
+
+ chartVersion, err := ctr.GetChartVersion(namespace, chartName, version)
+ if err != nil {
+ log.Debugf("Get chart %s of version %s in namespace %s failed, error: %v", chartName, version, namespace, err)
+ return false
+ }
+
+ return !chartVersion.Removed
+}
+
+// computeResourcesForChartVersionCreation returns count resource required for the chart package
+// no count required if the chart package of version exists in project
+func computeResourcesForChartVersionCreation(req *http.Request) (types.ResourceList, error) {
+ info, ok := util.ChartVersionInfoFromContext(req.Context())
+ if !ok {
+ return nil, errors.New("chart version info missing")
+ }
+
+ if chartVersionExists(info.Namespace, info.ChartName, info.Version) {
+ log.Debugf("Chart %s with version %s in namespace %s exists", info.ChartName, info.Version, info.Namespace)
+ return nil, nil
+ }
+
+ return types.ResourceList{types.ResourceCount: 1}, nil
+}
+
+func parseChart(req *http.Request) (*chart.Chart, error) {
+ chartFile, _, err := req.FormFile(formFieldNameForChart)
+ if err != nil {
+ return nil, err
+ }
+
+ chart, err := chartutil.LoadArchive(chartFile)
+ if err != nil {
+ return nil, fmt.Errorf("load chart from archive failed: %s", err.Error())
+ }
+
+ return chart, nil
+}
diff --git a/src/core/middlewares/config.go b/src/core/middlewares/config.go
new file mode 100644
index 000000000..8f0dcb3c0
--- /dev/null
+++ b/src/core/middlewares/config.go
@@ -0,0 +1,37 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middlewares
+
+// const variables
+const (
+ CHART = "chart"
+ READONLY = "readonly"
+ URL = "url"
+ MUITIPLEMANIFEST = "manifest"
+ LISTREPO = "listrepo"
+ CONTENTTRUST = "contenttrust"
+ VULNERABLE = "vulnerable"
+ SIZEQUOTA = "sizequota"
+ COUNTQUOTA = "countquota"
+)
+
+// ChartMiddlewares middlewares for chart server
+var ChartMiddlewares = []string{CHART}
+
+// Middlewares with sequential organization
+var Middlewares = []string{READONLY, URL, MUITIPLEMANIFEST, LISTREPO, CONTENTTRUST, VULNERABLE, SIZEQUOTA, COUNTQUOTA}
+
+// MiddlewaresLocal ...
+var MiddlewaresLocal = []string{SIZEQUOTA, COUNTQUOTA}
diff --git a/src/core/middlewares/contenttrust/handler.go b/src/core/middlewares/contenttrust/handler.go
new file mode 100644
index 000000000..bcc4de44a
--- /dev/null
+++ b/src/core/middlewares/contenttrust/handler.go
@@ -0,0 +1,101 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package contenttrust
+
+import (
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/notary"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "net/http"
+)
+
+// NotaryEndpoint ...
+var NotaryEndpoint = ""
+
+type contentTrustHandler struct {
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler) http.Handler {
+ return &contentTrustHandler{
+ next: next,
+ }
+}
+
+// ServeHTTP ...
+func (cth contentTrustHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ imgRaw := req.Context().Value(util.ImageInfoCtxKey)
+ if imgRaw == nil || !config.WithNotary() {
+ cth.next.ServeHTTP(rw, req)
+ return
+ }
+ img, _ := req.Context().Value(util.ImageInfoCtxKey).(util.ImageInfo)
+ if img.Digest == "" {
+ cth.next.ServeHTTP(rw, req)
+ return
+ }
+ if !util.GetPolicyChecker().ContentTrustEnabled(img.ProjectName) {
+ cth.next.ServeHTTP(rw, req)
+ return
+ }
+ match, err := matchNotaryDigest(img)
+ if err != nil {
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", "Failed in communication with Notary please check the log"), http.StatusInternalServerError)
+ return
+ }
+ if !match {
+ log.Debugf("digest mismatch, failing the response.")
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", "The image is not signed in Notary."), http.StatusPreconditionFailed)
+ return
+ }
+ cth.next.ServeHTTP(rw, req)
+}
+
+func matchNotaryDigest(img util.ImageInfo) (bool, error) {
+ if NotaryEndpoint == "" {
+ NotaryEndpoint = config.InternalNotaryEndpoint()
+ }
+ targets, err := notary.GetInternalTargets(NotaryEndpoint, util.TokenUsername, img.Repository)
+ if err != nil {
+ return false, err
+ }
+ for _, t := range targets {
+ if utils.IsDigest(img.Reference) {
+ d, err := notary.DigestFromTarget(t)
+ if err != nil {
+ return false, err
+ }
+ if img.Digest == d {
+ return true, nil
+ }
+ } else {
+ if t.Tag == img.Reference {
+ log.Debugf("found reference: %s in notary, try to match digest.", img.Reference)
+ d, err := notary.DigestFromTarget(t)
+ if err != nil {
+ return false, err
+ }
+ if img.Digest == d {
+ return true, nil
+ }
+ }
+ }
+ }
+ log.Debugf("image: %#v, not found in notary", img)
+ return false, nil
+}
diff --git a/src/core/middlewares/contenttrust/handler_test.go b/src/core/middlewares/contenttrust/handler_test.go
new file mode 100644
index 000000000..d7767cac1
--- /dev/null
+++ b/src/core/middlewares/contenttrust/handler_test.go
@@ -0,0 +1,63 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package contenttrust
+
+import (
+ "github.com/goharbor/harbor/src/common"
+ notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/stretchr/testify/assert"
+ "net/http/httptest"
+ "os"
+ "testing"
+)
+
+var endpoint = "10.117.4.142"
+var notaryServer *httptest.Server
+
+var admiralEndpoint = "http://127.0.0.1:8282"
+var token = ""
+
+func TestMain(m *testing.M) {
+ notaryServer = notarytest.NewNotaryServer(endpoint)
+ defer notaryServer.Close()
+ NotaryEndpoint = notaryServer.URL
+ var defaultConfig = map[string]interface{}{
+ common.ExtEndpoint: "https://" + endpoint,
+ common.WithNotary: true,
+ common.TokenExpiration: 30,
+ }
+ config.InitWithSettings(defaultConfig)
+ result := m.Run()
+ if result != 0 {
+ os.Exit(result)
+ }
+}
+
+func TestMatchNotaryDigest(t *testing.T) {
+ assert := assert.New(t)
+ // The data from common/utils/notary/helper_test.go
+ img1 := util.ImageInfo{Repository: "notary-demo/busybox", Reference: "1.0", ProjectName: "notary-demo", Digest: "sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7"}
+ img2 := util.ImageInfo{Repository: "notary-demo/busybox", Reference: "2.0", ProjectName: "notary-demo", Digest: "sha256:12345678"}
+
+ res1, err := matchNotaryDigest(img1)
+ assert.Nil(err, "Unexpected error: %v, image: %#v", err, img1)
+ assert.True(res1)
+
+ res2, err := matchNotaryDigest(img2)
+ assert.Nil(err, "Unexpected error: %v, image: %#v, take 2", err, img2)
+ assert.False(res2)
+}
diff --git a/src/core/middlewares/countquota/builder.go b/src/core/middlewares/countquota/builder.go
new file mode 100644
index 000000000..089c4a5d6
--- /dev/null
+++ b/src/core/middlewares/countquota/builder.go
@@ -0,0 +1,100 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package countquota
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+)
+
+var (
+ defaultBuilders = []interceptor.Builder{
+ &manifestDeletionBuilder{},
+ &manifestCreationBuilder{},
+ }
+)
+
+type manifestDeletionBuilder struct{}
+
+func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if match, _, _ := util.MatchDeleteManifest(req); !match {
+ return nil, nil
+ }
+
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ var err error
+ info, err = util.ParseManifestInfoFromPath(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse manifest, error %v", err)
+ }
+
+ // Manifest info will be used by computeResourcesForDeleteManifest
+ *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
+ }
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
+ quota.WithAction(quota.SubtractAction),
+ quota.StatusCode(http.StatusAccepted),
+ quota.MutexKeys(info.MutexKey("count")),
+ quota.OnResources(computeResourcesForManifestDeletion),
+ quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
+ return dao.DeleteArtifactByDigest(info.ProjectID, info.Repository, info.Digest)
+ }),
+ }
+
+ return quota.New(opts...), nil
+}
+
+type manifestCreationBuilder struct{}
+
+func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if match, _, _ := util.MatchPushManifest(req); !match {
+ return nil, nil
+ }
+
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ var err error
+ info, err = util.ParseManifestInfo(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse manifest, error %v", err)
+ }
+
+ // Manifest info will be used by computeResourcesForCreateManifest
+ *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
+ }
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
+ quota.WithAction(quota.AddAction),
+ quota.StatusCode(http.StatusCreated),
+ quota.MutexKeys(info.MutexKey("count")),
+ quota.OnResources(computeResourcesForManifestCreation),
+ quota.OnFulfilled(afterManifestCreated),
+ }
+
+ return quota.New(opts...), nil
+}
diff --git a/src/core/middlewares/countquota/handler.go b/src/core/middlewares/countquota/handler.go
new file mode 100644
index 000000000..1b05a4cf5
--- /dev/null
+++ b/src/core/middlewares/countquota/handler.go
@@ -0,0 +1,83 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package countquota
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+)
+
+type countQuotaHandler struct {
+ builders []interceptor.Builder
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
+ if len(builders) == 0 {
+ builders = defaultBuilders
+ }
+
+ return &countQuotaHandler{
+ builders: builders,
+ next: next,
+ }
+}
+
+// ServeHTTP manifest ...
+func (h *countQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ interceptor, err := h.getInterceptor(req)
+ if err != nil {
+ log.Warningf("Error occurred when to handle request in count quota handler: %v", err)
+ http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)),
+ http.StatusInternalServerError)
+ return
+ }
+
+ if interceptor == nil {
+ h.next.ServeHTTP(rw, req)
+ return
+ }
+
+ if err := interceptor.HandleRequest(req); err != nil {
+ log.Warningf("Error occurred when to handle request in count quota handler: %v", err)
+ http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in count quota handler: %v", err)),
+ http.StatusInternalServerError)
+ return
+ }
+
+ h.next.ServeHTTP(rw, req)
+
+ interceptor.HandleResponse(rw, req)
+}
+
+func (h *countQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
+ for _, builder := range h.builders {
+ interceptor, err := builder.Build(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if interceptor != nil {
+ return interceptor, nil
+ }
+ }
+
+ return nil, nil
+}
diff --git a/src/core/middlewares/countquota/handler_test.go b/src/core/middlewares/countquota/handler_test.go
new file mode 100644
index 000000000..a2ebb5a69
--- /dev/null
+++ b/src/core/middlewares/countquota/handler_test.go
@@ -0,0 +1,304 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package countquota
+
+import (
+ "fmt"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/suite"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func getProjectCountUsage(projectID int64) (int64, error) {
+ usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
+ err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
+ if err != nil {
+ return 0, err
+ }
+ used, err := types.NewResourceList(usage.Used)
+ if err != nil {
+ return 0, err
+ }
+
+ return used[types.ResourceCount], nil
+}
+
+func randomString(n int) string {
+ const letterBytes = "abcdefghijklmnopqrstuvwxyz"
+
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letterBytes[rand.Intn(len(letterBytes))]
+ }
+
+ return string(b)
+}
+
+func doDeleteManifestRequest(projectID int64, projectName, name, dgt string, next ...http.HandlerFunc) int {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ url := fmt.Sprintf("/v2/%s/manifests/%s", repository, dgt)
+ req, _ := http.NewRequest("DELETE", url, nil)
+
+ ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{
+ ProjectID: projectID,
+ Repository: repository,
+ Digest: dgt,
+ })
+
+ rr := httptest.NewRecorder()
+
+ var n http.HandlerFunc
+ if len(next) > 0 {
+ n = next[0]
+ } else {
+ n = func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusAccepted)
+ }
+ }
+
+ h := New(http.HandlerFunc(n))
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req.WithContext(ctx))
+
+ return rr.Code
+}
+
+func doPutManifestRequest(projectID int64, projectName, name, tag, dgt string, next ...http.HandlerFunc) int {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag)
+ req, _ := http.NewRequest("PUT", url, nil)
+
+ ctx := util.NewManifestInfoContext(req.Context(), &util.ManifestInfo{
+ ProjectID: projectID,
+ Repository: repository,
+ Tag: tag,
+ Digest: dgt,
+ References: []distribution.Descriptor{
+ {Digest: digest.FromString(randomString(15))},
+ {Digest: digest.FromString(randomString(15))},
+ },
+ })
+
+ rr := httptest.NewRecorder()
+
+ var n http.HandlerFunc
+ if len(next) > 0 {
+ n = next[0]
+ } else {
+ n = func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ }
+ }
+
+ h := New(http.HandlerFunc(n))
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req.WithContext(ctx))
+
+ return rr.Code
+}
+
+type HandlerSuite struct {
+ suite.Suite
+}
+
+func (suite *HandlerSuite) addProject(projectName string) int64 {
+ projectID, err := dao.AddProject(models.Project{
+ Name: projectName,
+ OwnerID: 1,
+ })
+
+ suite.Nil(err, fmt.Sprintf("Add project failed for %s", projectName))
+
+ return projectID
+}
+
+func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) {
+ count, err := getProjectCountUsage(projectID)
+ suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err))
+ suite.Equal(expected, count, "Failed to check count usage for project %d", projectID)
+}
+
+func (suite *HandlerSuite) TearDownTest() {
+ for _, table := range []string{
+ "artifact", "blob",
+ "artifact_blob", "project_blob",
+ "quota", "quota_usage",
+ } {
+ dao.ClearTable(table)
+ }
+}
+
+func (suite *HandlerSuite) TestPutManifestCreated() {
+ projectName := randomString(5)
+
+ projectID := suite.addProject(projectName)
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ dgt := digest.FromString(randomString(15)).String()
+ code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(1, projectID)
+
+ total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
+ suite.Nil(err)
+ suite.Equal(int64(1), total, "Artifact should be created")
+
+ // Push the photon:latest with photon:dev
+ code = doPutManifestRequest(projectID, projectName, "photon", "dev", dgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(2, projectID)
+
+ total, err = dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
+ suite.Nil(err)
+ suite.Equal(int64(2), total, "Artifact should be created")
+
+ // Push the photon:latest with new image
+ newDgt := digest.FromString(randomString(15)).String()
+ code = doPutManifestRequest(projectID, projectName, "photon", "latest", newDgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(2, projectID)
+
+ total, err = dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: newDgt})
+ suite.Nil(err)
+ suite.Equal(int64(1), total, "Artifact should be updated")
+}
+
+func (suite *HandlerSuite) TestPutManifestFailed() {
+ projectName := randomString(5)
+
+ projectID := suite.addProject(projectName)
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ next := func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+
+ dgt := digest.FromString(randomString(15)).String()
+ code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt, next)
+ suite.Equal(http.StatusInternalServerError, code)
+ suite.checkCountUsage(0, projectID)
+
+ total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{Digest: dgt})
+ suite.Nil(err)
+ suite.Equal(int64(0), total, "Artifact should not be created")
+}
+
+func (suite *HandlerSuite) TestDeleteManifestAccepted() {
+ projectName := randomString(5)
+
+ projectID := suite.addProject(projectName)
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ dgt := digest.FromString(randomString(15)).String()
+ code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(1, projectID)
+
+ code = doDeleteManifestRequest(projectID, projectName, "photon", dgt)
+ suite.Equal(http.StatusAccepted, code)
+ suite.checkCountUsage(0, projectID)
+}
+
+func (suite *HandlerSuite) TestDeleteManifestFailed() {
+ projectName := randomString(5)
+
+ projectID := suite.addProject(projectName)
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ dgt := digest.FromString(randomString(15)).String()
+ code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(1, projectID)
+
+ next := func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusInternalServerError)
+ }
+
+ code = doDeleteManifestRequest(projectID, projectName, "photon", dgt, next)
+ suite.Equal(http.StatusInternalServerError, code)
+ suite.checkCountUsage(1, projectID)
+}
+
+func (suite *HandlerSuite) TestDeleteManifestInMultiProjects() {
+ projectName := randomString(5)
+
+ projectID := suite.addProject(projectName)
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ dgt := digest.FromString(randomString(15)).String()
+ code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(1, projectID)
+
+ {
+ projectName := randomString(5)
+
+ projectID := suite.addProject(projectName)
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ code := doPutManifestRequest(projectID, projectName, "photon", "latest", dgt)
+ suite.Equal(http.StatusCreated, code)
+ suite.checkCountUsage(1, projectID)
+
+ code = doDeleteManifestRequest(projectID, projectName, "photon", dgt)
+ suite.Equal(http.StatusAccepted, code)
+ suite.checkCountUsage(0, projectID)
+ }
+
+ code = doDeleteManifestRequest(projectID, projectName, "photon", dgt)
+ suite.Equal(http.StatusAccepted, code)
+ suite.checkCountUsage(0, projectID)
+}
+
+func TestMain(m *testing.M) {
+ config.Init()
+ dao.PrepareTestForPostgresSQL()
+
+ if result := m.Run(); result != 0 {
+ os.Exit(result)
+ }
+}
+
+func TestRunHandlerSuite(t *testing.T) {
+ suite.Run(t, new(HandlerSuite))
+}
diff --git a/src/core/middlewares/countquota/util.go b/src/core/middlewares/countquota/util.go
new file mode 100644
index 000000000..8275cb7ae
--- /dev/null
+++ b/src/core/middlewares/countquota/util.go
@@ -0,0 +1,118 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package countquota
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/quota"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+// computeResourcesForManifestCreation returns count resource required for manifest
+// no count required if the tag of the repository exists in the project
+func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) {
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ return nil, errors.New("manifest info missing")
+ }
+
+ // only count quota required when push new tag
+ if info.IsNewTag() {
+ return quota.ResourceList{quota.ResourceCount: 1}, nil
+ }
+
+ return nil, nil
+}
+
+// computeResourcesForManifestDeletion returns count resource will be released when manifest deleted
+// then result will be the sum of manifest count of the same repository in the project
+func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ return nil, errors.New("manifest info missing")
+ }
+
+ total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
+ PID: info.ProjectID,
+ Repo: info.Repository,
+ Digest: info.Digest,
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("error occurred when get artifacts %v ", err)
+ }
+
+ return types.ResourceList{types.ResourceCount: total}, nil
+}
+
+// afterManifestCreated the handler after manifest created success
+// it will create or update the artifact info in db, and then attach blobs to artifact
+func afterManifestCreated(w http.ResponseWriter, req *http.Request) error {
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ return errors.New("manifest info missing")
+ }
+
+ artifact := info.Artifact()
+ if artifact.ID == 0 {
+ if _, err := dao.AddArtifact(artifact); err != nil {
+ return fmt.Errorf("error to add artifact, %v", err)
+ }
+ } else {
+ if err := dao.UpdateArtifact(artifact); err != nil {
+ return fmt.Errorf("error to update artifact, %v", err)
+ }
+ }
+
+ return attachBlobsToArtifact(info)
+}
+
+// attachBlobsToArtifact attach the blobs which from manifest to artifact
+func attachBlobsToArtifact(info *util.ManifestInfo) error {
+ self := &models.ArtifactAndBlob{
+ DigestAF: info.Digest,
+ DigestBlob: info.Digest,
+ }
+
+ artifactBlobs := append([]*models.ArtifactAndBlob{}, self)
+
+ for _, reference := range info.References {
+ artifactBlob := &models.ArtifactAndBlob{
+ DigestAF: info.Digest,
+ DigestBlob: reference.Digest.String(),
+ }
+
+ artifactBlobs = append(artifactBlobs, artifactBlob)
+ }
+
+ if err := dao.AddArtifactNBlobs(artifactBlobs); err != nil {
+ if strings.Contains(err.Error(), dao.ErrDupRows.Error()) {
+ log.Warning("the artifact and blobs have already in the DB, it maybe an existing image with different tag")
+ return nil
+ }
+
+ return fmt.Errorf("error to add artifact and blobs in proxy response handler, %v", err)
+ }
+
+ return nil
+}
diff --git a/src/core/middlewares/inlet.go b/src/core/middlewares/inlet.go
new file mode 100644
index 000000000..3e0f30eef
--- /dev/null
+++ b/src/core/middlewares/inlet.go
@@ -0,0 +1,57 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middlewares
+
+import (
+ "errors"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/filter"
+ "github.com/goharbor/harbor/src/core/middlewares/registryproxy"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "net/http"
+)
+
+var head http.Handler
+var proxy http.Handler
+
+// Init initialize the Proxy instance and handler chain.
+func Init() error {
+ proxy = registryproxy.New()
+ if proxy == nil {
+ return errors.New("get nil when to create proxy")
+ }
+ return nil
+}
+
+// Handle handles the request.
+func Handle(rw http.ResponseWriter, req *http.Request) {
+ securityCtx, err := filter.GetSecurityContext(req)
+ if err != nil {
+ log.Errorf("failed to get security context in middlerware: %v", err)
+ // error to get security context, use the default chain.
+ head = New(Middlewares).Create().Then(proxy)
+ } else {
+ // true: the request is from 127.0.0.1, only quota middlewares are applied to request
+ // false: the request is from outside, all of middlewares are applied to the request.
+ if securityCtx.IsSolutionUser() {
+ head = New(MiddlewaresLocal).Create().Then(proxy)
+ } else {
+ head = New(Middlewares).Create().Then(proxy)
+ }
+ }
+
+ customResW := util.NewCustomResponseWriter(rw)
+ head.ServeHTTP(customResW, req)
+}
diff --git a/src/core/middlewares/interceptor/interceptor.go b/src/core/middlewares/interceptor/interceptor.go
new file mode 100644
index 000000000..ab8cf6ec6
--- /dev/null
+++ b/src/core/middlewares/interceptor/interceptor.go
@@ -0,0 +1,48 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package interceptor
+
+import (
+ "net/http"
+)
+
+// Builder interceptor builder
+type Builder interface {
+ // Build build interceptor from http.Request
+ // (nil, nil) must be returned if builder not match the request
+ Build(*http.Request) (Interceptor, error)
+}
+
+// Interceptor interceptor for middleware
+type Interceptor interface {
+ // HandleRequest ...
+ HandleRequest(*http.Request) error
+
+ // HandleResponse won't return any error
+ HandleResponse(http.ResponseWriter, *http.Request)
+}
+
+// ResponseInterceptorFunc ...
+type ResponseInterceptorFunc func(w http.ResponseWriter, r *http.Request)
+
+// HandleRequest no-op HandleRequest
+func (f ResponseInterceptorFunc) HandleRequest(*http.Request) error {
+ return nil
+}
+
+// HandleResponse calls f(w, r).
+func (f ResponseInterceptorFunc) HandleResponse(w http.ResponseWriter, r *http.Request) {
+ f(w, r)
+}
diff --git a/src/core/middlewares/interceptor/quota/options.go b/src/core/middlewares/interceptor/quota/options.go
new file mode 100644
index 000000000..ddf102a74
--- /dev/null
+++ b/src/core/middlewares/interceptor/quota/options.go
@@ -0,0 +1,150 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/quota"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+// Option ...
+type Option func(*Options)
+
+// Action ...
+type Action string
+
+const (
+ // AddAction action to add resources
+ AddAction Action = "add"
+ // SubtractAction action to subtract resources
+ SubtractAction Action = "subtract"
+)
+
+// Options ...
+type Options struct {
+ enforceResources *bool
+
+ Action Action
+ Manager *quota.Manager
+ MutexKeys []string
+ Resources types.ResourceList
+ StatusCode int
+
+ OnResources func(*http.Request) (types.ResourceList, error)
+ OnFulfilled func(http.ResponseWriter, *http.Request) error
+ OnRejected func(http.ResponseWriter, *http.Request) error
+ OnFinally func(http.ResponseWriter, *http.Request) error
+}
+
+// EnforceResources ...
+func (opts *Options) EnforceResources() bool {
+ return opts.enforceResources != nil && *opts.enforceResources
+}
+
+func boolPtr(v bool) *bool {
+ return &v
+}
+
+func newOptions(opt ...Option) Options {
+ opts := Options{}
+
+ for _, o := range opt {
+ o(&opts)
+ }
+
+ if opts.Action == "" {
+ opts.Action = AddAction
+ }
+
+ if opts.StatusCode == 0 {
+ opts.StatusCode = http.StatusOK
+ }
+
+ if opts.enforceResources == nil {
+ opts.enforceResources = boolPtr(true)
+ }
+
+ return opts
+}
+
+// EnforceResources sets the interceptor enforceResources
+func EnforceResources(enforceResources bool) Option {
+ return func(o *Options) {
+ o.enforceResources = boolPtr(enforceResources)
+ }
+}
+
+// WithAction sets the interceptor action
+func WithAction(a Action) Option {
+ return func(o *Options) {
+ o.Action = a
+ }
+}
+
+// Manager sets the interceptor manager
+func Manager(m *quota.Manager) Option {
+ return func(o *Options) {
+ o.Manager = m
+ }
+}
+
+// WithManager sets the interceptor manager by reference and referenceID
+func WithManager(reference, referenceID string) Option {
+ return func(o *Options) {
+ m, err := quota.NewManager(reference, referenceID)
+ if err != nil {
+ return
+ }
+
+ o.Manager = m
+ }
+}
+
+// MutexKeys set the interceptor mutex keys
+func MutexKeys(keys ...string) Option {
+ return func(o *Options) {
+ o.MutexKeys = keys
+ }
+}
+
+// Resources set the interceptor resources
+func Resources(r types.ResourceList) Option {
+ return func(o *Options) {
+ o.Resources = r
+ }
+}
+
+// StatusCode set the interceptor status code
+func StatusCode(c int) Option {
+ return func(o *Options) {
+ o.StatusCode = c
+ }
+}
+
+// OnResources sets the interceptor on resources function
+func OnResources(f func(*http.Request) (types.ResourceList, error)) Option {
+ return func(o *Options) {
+ o.OnResources = f
+ }
+}
+
+// OnFulfilled set the success handler for interceptor
+func OnFulfilled(f func(http.ResponseWriter, *http.Request) error) Option {
+ return func(o *Options) {
+ o.OnFulfilled = f
+ }
+}
diff --git a/src/core/middlewares/interceptor/quota/quota.go b/src/core/middlewares/interceptor/quota/quota.go
new file mode 100644
index 000000000..607f58dde
--- /dev/null
+++ b/src/core/middlewares/interceptor/quota/quota.go
@@ -0,0 +1,189 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quota
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/redis"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/pkg/types"
+)
+
+// New ....
+func New(opts ...Option) interceptor.Interceptor {
+ options := newOptions(opts...)
+
+ return "aInterceptor{opts: &options}
+}
+
+type statusRecorder interface {
+ Status() int
+}
+
+type quotaInterceptor struct {
+ opts *Options
+ resources types.ResourceList
+ mutexes []*redis.Mutex
+}
+
+// HandleRequest ...
+func (qi *quotaInterceptor) HandleRequest(req *http.Request) (err error) {
+ defer func() {
+ if err != nil {
+ qi.freeMutexes()
+ }
+ }()
+
+ err = qi.requireMutexes()
+ if err != nil {
+ return
+ }
+
+ err = qi.computeResources(req)
+ if err != nil {
+ return
+ }
+
+ err = qi.reserve()
+ if err != nil {
+ log.Errorf("Failed to %s resources, error: %v", qi.opts.Action, err)
+ }
+
+ return
+}
+
+// HandleResponse ...
+func (qi *quotaInterceptor) HandleResponse(w http.ResponseWriter, req *http.Request) {
+ defer qi.freeMutexes()
+
+ sr, ok := w.(statusRecorder)
+ if !ok {
+ return
+ }
+
+ opts := qi.opts
+
+ switch sr.Status() {
+ case opts.StatusCode:
+ if opts.OnFulfilled != nil {
+ if err := opts.OnFulfilled(w, req); err != nil {
+ log.Errorf("Failed to handle on fulfilled, error: %v", err)
+ }
+ }
+ default:
+ if err := qi.unreserve(); err != nil {
+ log.Errorf("Failed to %s resources, error: %v", opts.Action, err)
+ }
+
+ if opts.OnRejected != nil {
+ if err := opts.OnRejected(w, req); err != nil {
+ log.Errorf("Failed to handle on rejected, error: %v", err)
+ }
+ }
+ }
+
+ if opts.OnFinally != nil {
+ if err := opts.OnFinally(w, req); err != nil {
+ log.Errorf("Failed to handle on finally, error: %v", err)
+ }
+ }
+}
+
+func (qi *quotaInterceptor) requireMutexes() error {
+ if !qi.opts.EnforceResources() {
+ // Do nothing for locks when quota interceptor not enforce resources
+ return nil
+ }
+
+ for _, key := range qi.opts.MutexKeys {
+ m, err := redis.RequireLock(key)
+ if err != nil {
+ return err
+ }
+ qi.mutexes = append(qi.mutexes, m)
+ }
+
+ return nil
+}
+
+func (qi *quotaInterceptor) freeMutexes() {
+ for i := len(qi.mutexes) - 1; i >= 0; i-- {
+ if err := redis.FreeLock(qi.mutexes[i]); err != nil {
+ log.Error(err)
+ }
+ }
+}
+
+func (qi *quotaInterceptor) computeResources(req *http.Request) error {
+ if !qi.opts.EnforceResources() {
+ // Do nothing in compute resources when quota interceptor not enforce resources
+ return nil
+ }
+
+ qi.resources = qi.opts.Resources
+ if len(qi.resources) == 0 && qi.opts.OnResources != nil {
+ resources, err := qi.opts.OnResources(req)
+ if err != nil {
+ return fmt.Errorf("failed to compute the resources for quota, error: %v", err)
+ }
+
+ qi.resources = resources
+ }
+
+ return nil
+}
+
+func (qi *quotaInterceptor) reserve() error {
+ if !qi.opts.EnforceResources() {
+ // Do nothing in reserve resources when quota interceptor not enforce resources
+ return nil
+ }
+
+ if len(qi.resources) == 0 {
+ return nil
+ }
+
+ switch qi.opts.Action {
+ case AddAction:
+ return qi.opts.Manager.AddResources(qi.resources)
+ case SubtractAction:
+ return qi.opts.Manager.SubtractResources(qi.resources)
+ }
+
+ return nil
+}
+
+func (qi *quotaInterceptor) unreserve() error {
+ if !qi.opts.EnforceResources() {
+ // Do nothing in unreserve resources when quota interceptor not enforce resources
+ return nil
+ }
+
+ if len(qi.resources) == 0 {
+ return nil
+ }
+
+ switch qi.opts.Action {
+ case AddAction:
+ return qi.opts.Manager.SubtractResources(qi.resources)
+ case SubtractAction:
+ return qi.opts.Manager.AddResources(qi.resources)
+ }
+
+ return nil
+}
diff --git a/src/core/middlewares/interface.go b/src/core/middlewares/interface.go
new file mode 100644
index 000000000..4ca772f43
--- /dev/null
+++ b/src/core/middlewares/interface.go
@@ -0,0 +1,22 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package middlewares
+
+import "github.com/justinas/alice"
+
+// ChainCreator ...
+type ChainCreator interface {
+ Create(middlewares []string) *alice.Chain
+}
diff --git a/src/core/middlewares/listrepo/handler.go b/src/core/middlewares/listrepo/handler.go
new file mode 100644
index 000000000..9cc2a2ae0
--- /dev/null
+++ b/src/core/middlewares/listrepo/handler.go
@@ -0,0 +1,104 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package listrepo
+
+import (
+ "encoding/json"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "net/http"
+ "net/http/httptest"
+ "regexp"
+ "strconv"
+)
+
+const (
+ catalogURLPattern = `/v2/_catalog`
+)
+
+type listReposHandler struct {
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler) http.Handler {
+ return &listReposHandler{
+ next: next,
+ }
+}
+
+// ServeHTTP ...
+func (lrh listReposHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ var rec *httptest.ResponseRecorder
+ listReposFlag := matchListRepos(req)
+ if listReposFlag {
+ rec = httptest.NewRecorder()
+ lrh.next.ServeHTTP(rec, req)
+ if rec.Result().StatusCode != http.StatusOK {
+ util.CopyResp(rec, rw)
+ return
+ }
+ var ctlg struct {
+ Repositories []string `json:"repositories"`
+ }
+ decoder := json.NewDecoder(rec.Body)
+ if err := decoder.Decode(&ctlg); err != nil {
+ log.Errorf("Decode repositories error: %v", err)
+ util.CopyResp(rec, rw)
+ return
+ }
+ var entries []string
+ for repo := range ctlg.Repositories {
+ log.Debugf("the repo in the response %s", ctlg.Repositories[repo])
+ exist := dao.RepositoryExists(ctlg.Repositories[repo])
+ if exist {
+ entries = append(entries, ctlg.Repositories[repo])
+ }
+ }
+ type Repos struct {
+ Repositories []string `json:"repositories"`
+ }
+ resp := &Repos{Repositories: entries}
+ respJSON, err := json.Marshal(resp)
+ if err != nil {
+ log.Errorf("Encode repositories error: %v", err)
+ util.CopyResp(rec, rw)
+ return
+ }
+
+ for k, v := range rec.Header() {
+ rw.Header()[k] = v
+ }
+ clen := len(respJSON)
+ rw.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(clen))
+ rw.Write(respJSON)
+ return
+ }
+ lrh.next.ServeHTTP(rw, req)
+}
+
+// matchListRepos checks if the request looks like a request to list repositories.
+func matchListRepos(req *http.Request) bool {
+ if req.Method != http.MethodGet {
+ return false
+ }
+ re := regexp.MustCompile(catalogURLPattern)
+ s := re.FindStringSubmatch(req.URL.Path)
+ if len(s) == 1 {
+ return true
+ }
+ return false
+}
diff --git a/src/core/middlewares/listrepo/handler_test.go b/src/core/middlewares/listrepo/handler_test.go
new file mode 100644
index 000000000..70bbbeaf9
--- /dev/null
+++ b/src/core/middlewares/listrepo/handler_test.go
@@ -0,0 +1,37 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package listrepo
+
+import (
+ "github.com/stretchr/testify/assert"
+ "net/http"
+ "testing"
+)
+
+func TestMatchListRepos(t *testing.T) {
+ assert := assert.New(t)
+ req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/_catalog", nil)
+ res1 := matchListRepos(req1)
+ assert.False(res1, "%s %v is not a request to list repos", req1.Method, req1.URL)
+
+ req2, _ := http.NewRequest("GET", "http://127.0.0.1:5000/v2/_catalog", nil)
+ res2 := matchListRepos(req2)
+ assert.True(res2, "%s %v is a request to list repos", req2.Method, req2.URL)
+
+ req3, _ := http.NewRequest("GET", "https://192.168.0.5:443/v1/_catalog", nil)
+ res3 := matchListRepos(req3)
+ assert.False(res3, "%s %v is not a request to pull manifest", req3.Method, req3.URL)
+
+}
diff --git a/src/core/middlewares/multiplmanifest/handler.go b/src/core/middlewares/multiplmanifest/handler.go
new file mode 100644
index 000000000..d0126696c
--- /dev/null
+++ b/src/core/middlewares/multiplmanifest/handler.go
@@ -0,0 +1,48 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package multiplmanifest
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "net/http"
+ "strings"
+)
+
+type multipleManifestHandler struct {
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler) http.Handler {
+ return &multipleManifestHandler{
+ next: next,
+ }
+}
+
+// ServeHTTP The handler is responsible for blocking request to upload manifest list by docker client, which is not supported so far by Harbor.
+func (mh multipleManifestHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ match, _, _ := util.MatchPushManifest(req)
+ if match {
+ contentType := req.Header.Get("Content-type")
+ // application/vnd.docker.distribution.manifest.list.v2+json
+ if strings.Contains(contentType, "manifest.list.v2") {
+ log.Debugf("Content-type: %s is not supported, failing the response.", contentType)
+ http.Error(rw, util.MarshalError("UNSUPPORTED_MEDIA_TYPE", "Manifest.list is not supported."), http.StatusUnsupportedMediaType)
+ return
+ }
+ }
+ mh.next.ServeHTTP(rw, req)
+}
diff --git a/src/core/middlewares/readonly/hanlder.go b/src/core/middlewares/readonly/hanlder.go
new file mode 100644
index 000000000..be77ac285
--- /dev/null
+++ b/src/core/middlewares/readonly/hanlder.go
@@ -0,0 +1,45 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package readonly
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "net/http"
+)
+
+type readonlyHandler struct {
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler) http.Handler {
+ return &readonlyHandler{
+ next: next,
+ }
+}
+
+// ServeHTTP ...
+func (rh readonlyHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if config.ReadOnly() {
+ if req.Method == http.MethodDelete || req.Method == http.MethodPost || req.Method == http.MethodPatch || req.Method == http.MethodPut {
+ log.Warningf("The request is prohibited in readonly mode, url is: %s", req.URL.Path)
+ http.Error(rw, util.MarshalError("DENIED", "The system is in read only mode. Any modification is prohibited."), http.StatusForbidden)
+ return
+ }
+ }
+ rh.next.ServeHTTP(rw, req)
+}
diff --git a/src/core/middlewares/registryproxy/handler.go b/src/core/middlewares/registryproxy/handler.go
new file mode 100644
index 000000000..72a9f02f0
--- /dev/null
+++ b/src/core/middlewares/registryproxy/handler.go
@@ -0,0 +1,61 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package registryproxy
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+)
+
+type proxyHandler struct {
+ handler http.Handler
+}
+
+// New ...
+func New(urls ...string) http.Handler {
+ var registryURL string
+ var err error
+ if len(urls) > 1 {
+ log.Errorf("the parm, urls should have only 0 or 1 elements")
+ return nil
+ }
+ if len(urls) == 0 {
+ registryURL, err = config.RegistryURL()
+ if err != nil {
+ log.Error(err)
+ return nil
+ }
+ } else {
+ registryURL = urls[0]
+ }
+ targetURL, err := url.Parse(registryURL)
+ if err != nil {
+ log.Error(err)
+ return nil
+ }
+
+ return &proxyHandler{
+ handler: httputil.NewSingleHostReverseProxy(targetURL),
+ }
+
+}
+
+// ServeHTTP ...
+func (ph proxyHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ ph.handler.ServeHTTP(rw, req)
+}
diff --git a/src/core/middlewares/sizequota/builder.go b/src/core/middlewares/sizequota/builder.go
new file mode 100644
index 000000000..a6e1ecf92
--- /dev/null
+++ b/src/core/middlewares/sizequota/builder.go
@@ -0,0 +1,212 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sizequota
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor/quota"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+)
+
+var (
+ defaultBuilders = []interceptor.Builder{
+ &blobStreamUploadBuilder{},
+ &blobStorageQuotaBuilder{},
+ &manifestCreationBuilder{},
+ &manifestDeletionBuilder{},
+ }
+)
+
+// blobStreamUploadBuilder interceptor for PATCH /v2//blobs/uploads/
+type blobStreamUploadBuilder struct{}
+
+func (*blobStreamUploadBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if !match(req, http.MethodPatch, blobUploadURLRe) {
+ return nil, nil
+ }
+
+ s := blobUploadURLRe.FindStringSubmatch(req.URL.Path)
+ uuid := s[2]
+
+ onResponse := func(w http.ResponseWriter, req *http.Request) {
+ size, err := parseUploadedBlobSize(w)
+ if err != nil {
+ log.Errorf("failed to parse uploaded blob size for upload %s", uuid)
+ return
+ }
+
+ ok, err := setUploadedBlobSize(uuid, size)
+ if err != nil {
+ log.Errorf("failed to update blob update size for upload %s, error: %v", uuid, err)
+ return
+ }
+
+ if !ok {
+ // ToDo discuss what to do here.
+ log.Errorf("fail to set bunk: %s size: %d in redis, it causes unable to set correct quota for the artifact", uuid, size)
+ }
+ }
+
+ return interceptor.ResponseInterceptorFunc(onResponse), nil
+}
+
+// blobStorageQuotaBuilder interceptor builder for these requests
+// PUT /v2//blobs/uploads/?digest=
+// POST /v2//blobs/uploads/?mount=&from=
+type blobStorageQuotaBuilder struct{}
+
+func (*blobStorageQuotaBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ parseBlobInfo := getBlobInfoParser(req)
+ if parseBlobInfo == nil {
+ return nil, nil
+ }
+
+ info, err := parseBlobInfo(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // replace req with blob info context
+ *req = *(req.WithContext(util.NewBlobInfoContext(req.Context(), info)))
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
+ quota.WithAction(quota.AddAction),
+ quota.StatusCode(http.StatusCreated), // NOTICE: mount blob and blob upload complete both return 201 when success
+ quota.OnResources(computeResourcesForBlob),
+ quota.MutexKeys(info.MutexKey()),
+ quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
+ return syncBlobInfoToProject(info)
+ }),
+ }
+
+ return quota.New(opts...), nil
+}
+
+// manifestCreationBuilder interceptor builder for the request PUT /v2//manifests/
+type manifestCreationBuilder struct{}
+
+func (*manifestCreationBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if match, _, _ := util.MatchPushManifest(req); !match {
+ return nil, nil
+ }
+
+ info, err := util.ParseManifestInfo(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // Replace request with manifests info context
+ *req = *req.WithContext(util.NewManifestInfoContext(req.Context(), info))
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
+ quota.WithAction(quota.AddAction),
+ quota.StatusCode(http.StatusCreated),
+ quota.OnResources(computeResourcesForManifestCreation),
+ quota.MutexKeys(info.MutexKey("size")),
+ quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
+ // manifest created, sync manifest itself as blob to blob and project_blob table
+ blobInfo, err := parseBlobInfoFromManifest(req)
+ if err != nil {
+ return err
+ }
+
+ if err := syncBlobInfoToProject(blobInfo); err != nil {
+ return err
+ }
+
+ // sync blobs from manifest which are not in project to project_blob table
+ blobs, err := info.GetBlobsNotInProject()
+ if err != nil {
+ return err
+ }
+
+ _, err = dao.AddBlobsToProject(info.ProjectID, blobs...)
+
+ return err
+ }),
+ }
+
+ return quota.New(opts...), nil
+}
+
+// deleteManifestBuilder interceptor builder for the request DELETE /v2//manifests/
+type manifestDeletionBuilder struct{}
+
+func (*manifestDeletionBuilder) Build(req *http.Request) (interceptor.Interceptor, error) {
+ if match, _, _ := util.MatchDeleteManifest(req); !match {
+ return nil, nil
+ }
+
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ var err error
+ info, err = util.ParseManifestInfoFromPath(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse manifest, error %v", err)
+ }
+
+ // Manifest info will be used by computeResourcesForDeleteManifest
+ *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
+ }
+
+ blobs, err := dao.GetBlobsByArtifact(info.Digest)
+ if err != nil {
+ return nil, fmt.Errorf("failed to query blobs of %s, error: %v", info.Digest, err)
+ }
+
+ mutexKeys := []string{info.MutexKey("size")}
+ for _, blob := range blobs {
+ mutexKeys = append(mutexKeys, info.BlobMutexKey(blob))
+ }
+
+ opts := []quota.Option{
+ quota.EnforceResources(config.QuotaPerProjectEnable()),
+ quota.WithManager("project", strconv.FormatInt(info.ProjectID, 10)),
+ quota.WithAction(quota.SubtractAction),
+ quota.StatusCode(http.StatusAccepted),
+ quota.OnResources(computeResourcesForManifestDeletion),
+ quota.MutexKeys(mutexKeys...),
+ quota.OnFulfilled(func(http.ResponseWriter, *http.Request) error {
+ blobs := info.ExclusiveBlobs
+
+ total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
+ PID: info.ProjectID,
+ Digest: info.Digest,
+ })
+ if err == nil && total > 0 {
+ blob, err := dao.GetBlob(info.Digest)
+ if err == nil {
+ blobs = append(blobs, blob)
+ }
+ }
+
+ return dao.RemoveBlobsFromProject(info.ProjectID, blobs...)
+ }),
+ }
+
+ return quota.New(opts...), nil
+}
diff --git a/src/core/middlewares/sizequota/handler.go b/src/core/middlewares/sizequota/handler.go
new file mode 100644
index 000000000..244e55589
--- /dev/null
+++ b/src/core/middlewares/sizequota/handler.go
@@ -0,0 +1,83 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sizequota
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/interceptor"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+)
+
+type sizeQuotaHandler struct {
+ builders []interceptor.Builder
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler, builders ...interceptor.Builder) http.Handler {
+ if len(builders) == 0 {
+ builders = defaultBuilders
+ }
+
+ return &sizeQuotaHandler{
+ builders: builders,
+ next: next,
+ }
+}
+
+// ServeHTTP ...
+func (h *sizeQuotaHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ interceptor, err := h.getInterceptor(req)
+ if err != nil {
+ log.Warningf("Error occurred when to handle request in size quota handler: %v", err)
+ http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)),
+ http.StatusInternalServerError)
+ return
+ }
+
+ if interceptor == nil {
+ h.next.ServeHTTP(rw, req)
+ return
+ }
+
+ if err := interceptor.HandleRequest(req); err != nil {
+ log.Warningf("Error occurred when to handle request in size quota handler: %v", err)
+ http.Error(rw, util.MarshalError("InternalError", fmt.Sprintf("Error occurred when to handle request in size quota handler: %v", err)),
+ http.StatusInternalServerError)
+ return
+ }
+
+ h.next.ServeHTTP(rw, req)
+
+ interceptor.HandleResponse(rw, req)
+}
+
+func (h *sizeQuotaHandler) getInterceptor(req *http.Request) (interceptor.Interceptor, error) {
+ for _, builder := range h.builders {
+ interceptor, err := builder.Build(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if interceptor != nil {
+ return interceptor, nil
+ }
+ }
+
+ return nil, nil
+}
diff --git a/src/core/middlewares/sizequota/handler_test.go b/src/core/middlewares/sizequota/handler_test.go
new file mode 100644
index 000000000..e2b2bb309
--- /dev/null
+++ b/src/core/middlewares/sizequota/handler_test.go
@@ -0,0 +1,710 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sizequota
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strconv"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/manifest"
+ "github.com/docker/distribution/manifest/schema2"
+ "github.com/goharbor/harbor/src/common"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/countquota"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/suite"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+func genUUID() string {
+ b := make([]byte, 16)
+
+ if _, err := rand.Read(b); err != nil {
+ return ""
+ }
+
+ return fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
+}
+
+func getProjectCountUsage(projectID int64) (int64, error) {
+ usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
+ err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
+ if err != nil {
+ return 0, err
+ }
+ used, err := types.NewResourceList(usage.Used)
+ if err != nil {
+ return 0, err
+ }
+
+ return used[types.ResourceCount], nil
+}
+
+func getProjectStorageUsage(projectID int64) (int64, error) {
+ usage := models.QuotaUsage{Reference: "project", ReferenceID: fmt.Sprintf("%d", projectID)}
+ err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
+ if err != nil {
+ return 0, err
+ }
+ used, err := types.NewResourceList(usage.Used)
+ if err != nil {
+ return 0, err
+ }
+
+ return used[types.ResourceStorage], nil
+}
+
+func randomString(n int) string {
+ const letterBytes = "abcdefghijklmnopqrstuvwxyz"
+
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letterBytes[rand.Intn(len(letterBytes))]
+ }
+
+ return string(b)
+}
+
+func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest {
+ manifest := schema2.Manifest{
+ Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest},
+ Config: distribution.Descriptor{
+ MediaType: schema2.MediaTypeImageConfig,
+ Size: configSize,
+ Digest: digest.FromString(randomString(15)),
+ },
+ }
+
+ for _, size := range layerSizes {
+ manifest.Layers = append(manifest.Layers, distribution.Descriptor{
+ MediaType: schema2.MediaTypeLayer,
+ Size: size,
+ Digest: digest.FromString(randomString(15)),
+ })
+ }
+
+ return manifest
+}
+
+func manifestWithAdditionalLayers(raw schema2.Manifest, layerSizes []int64) schema2.Manifest {
+ var manifest schema2.Manifest
+
+ manifest.Versioned = raw.Versioned
+ manifest.Config = raw.Config
+ manifest.Layers = append(manifest.Layers, raw.Layers...)
+
+ for _, size := range layerSizes {
+ manifest.Layers = append(manifest.Layers, distribution.Descriptor{
+ MediaType: schema2.MediaTypeLayer,
+ Size: size,
+ Digest: digest.FromString(randomString(15)),
+ })
+ }
+
+ return manifest
+}
+
+func digestOfManifest(manifest schema2.Manifest) string {
+ bytes, _ := json.Marshal(manifest)
+
+ return digest.FromBytes(bytes).String()
+}
+
+func sizeOfManifest(manifest schema2.Manifest) int64 {
+ bytes, _ := json.Marshal(manifest)
+
+ return int64(len(bytes))
+}
+
+func sizeOfImage(manifest schema2.Manifest) int64 {
+ totalSizeOfLayers := manifest.Config.Size
+
+ for _, layer := range manifest.Layers {
+ totalSizeOfLayers += layer.Size
+ }
+
+ return sizeOfManifest(manifest) + totalSizeOfLayers
+}
+
+func doHandle(req *http.Request, next ...http.HandlerFunc) int {
+ rr := httptest.NewRecorder()
+
+ var n http.HandlerFunc
+ if len(next) > 0 {
+ n = next[0]
+ } else {
+ n = func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ }
+ }
+
+ h := New(http.HandlerFunc(n))
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
+
+ return rr.Code
+}
+
+func patchBlobUpload(projectName, name, uuid, blobDigest string, chunkSize int64) {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest)
+ req, _ := http.NewRequest(http.MethodPatch, url, nil)
+
+ doHandle(req, func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusAccepted)
+ w.Header().Add("Range", fmt.Sprintf("0-%d", chunkSize-1))
+ })
+}
+
+func putBlobUpload(projectName, name, uuid, blobDigest string, blobSize ...int64) {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ url := fmt.Sprintf("/v2/%s/blobs/uploads/%s?digest=%s", repository, uuid, blobDigest)
+ req, _ := http.NewRequest(http.MethodPut, url, nil)
+ if len(blobSize) > 0 {
+ req.Header.Add("Content-Length", strconv.FormatInt(blobSize[0], 10))
+ }
+
+ doHandle(req, func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ })
+}
+
+func mountBlob(projectName, name, blobDigest, fromRepository string) {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ url := fmt.Sprintf("/v2/%s/blobs/uploads/?mount=%s&from=%s", repository, blobDigest, fromRepository)
+ req, _ := http.NewRequest(http.MethodPost, url, nil)
+
+ doHandle(req, func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ })
+}
+
+func deleteManifest(projectName, name, digest string, accepted ...func() bool) {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ url := fmt.Sprintf("/v2/%s/manifests/%s", repository, digest)
+ req, _ := http.NewRequest(http.MethodDelete, url, nil)
+
+ next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if len(accepted) > 0 {
+ if accepted[0]() {
+ w.WriteHeader(http.StatusAccepted)
+ } else {
+ w.WriteHeader(http.StatusNotFound)
+ }
+
+ return
+ }
+
+ w.WriteHeader(http.StatusAccepted)
+ }))
+
+ rr := httptest.NewRecorder()
+ h := New(next)
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
+}
+
+func putManifest(projectName, name, tag string, manifest schema2.Manifest) {
+ repository := fmt.Sprintf("%s/%s", projectName, name)
+
+ buf, _ := json.Marshal(manifest)
+
+ url := fmt.Sprintf("/v2/%s/manifests/%s", repository, tag)
+ req, _ := http.NewRequest(http.MethodPut, url, bytes.NewReader(buf))
+ req.Header.Add("Content-Type", manifest.MediaType)
+
+ next := countquota.New(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ w.WriteHeader(http.StatusCreated)
+ }))
+
+ rr := httptest.NewRecorder()
+ h := New(next)
+ h.ServeHTTP(util.NewCustomResponseWriter(rr), req)
+}
+
+func pushImage(projectName, name, tag string, manifest schema2.Manifest) {
+ putBlobUpload(projectName, name, genUUID(), manifest.Config.Digest.String(), manifest.Config.Size)
+ for _, layer := range manifest.Layers {
+ putBlobUpload(projectName, name, genUUID(), layer.Digest.String(), layer.Size)
+ }
+
+ putManifest(projectName, name, tag, manifest)
+}
+
+func withProject(f func(int64, string)) {
+ projectName := randomString(5)
+
+ projectID, err := dao.AddProject(models.Project{
+ Name: projectName,
+ OwnerID: 1,
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ f(projectID, projectName)
+}
+
+type HandlerSuite struct {
+ suite.Suite
+}
+
+func (suite *HandlerSuite) checkCountUsage(expected, projectID int64) {
+ count, err := getProjectCountUsage(projectID)
+ suite.Nil(err, fmt.Sprintf("Failed to get count usage of project %d, error: %v", projectID, err))
+ suite.Equal(expected, count, "Failed to check count usage for project %d", projectID)
+}
+
+func (suite *HandlerSuite) checkStorageUsage(expected, projectID int64) {
+ value, err := getProjectStorageUsage(projectID)
+ suite.Nil(err, fmt.Sprintf("Failed to get storage usage of project %d, error: %v", projectID, err))
+ suite.Equal(expected, value, "Failed to check storage usage for project %d", projectID)
+}
+
+func (suite *HandlerSuite) TearDownTest() {
+ for _, table := range []string{
+ "artifact", "blob",
+ "artifact_blob", "project_blob",
+ "quota", "quota_usage",
+ } {
+ dao.ClearTable(table)
+ }
+}
+
+func (suite *HandlerSuite) TestPatchBlobUpload() {
+ withProject(func(projectID int64, projectName string) {
+ uuid := genUUID()
+ blobDigest := digest.FromString(randomString(15)).String()
+ patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
+ size, err := getUploadedBlobSize(uuid)
+ suite.Nil(err)
+ suite.Equal(int64(1024), size)
+ })
+}
+
+func (suite *HandlerSuite) TestPutBlobUpload() {
+ withProject(func(projectID int64, projectName string) {
+ uuid := genUUID()
+ blobDigest := digest.FromString(randomString(15)).String()
+ putBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
+ suite.checkStorageUsage(1024, projectID)
+
+ blob, err := dao.GetBlob(blobDigest)
+ suite.Nil(err)
+ suite.Equal(int64(1024), blob.Size)
+ })
+}
+
+func (suite *HandlerSuite) TestPutBlobUploadWithPatch() {
+ withProject(func(projectID int64, projectName string) {
+ uuid := genUUID()
+ blobDigest := digest.FromString(randomString(15)).String()
+ patchBlobUpload(projectName, "photon", uuid, blobDigest, 1024)
+
+ putBlobUpload(projectName, "photon", uuid, blobDigest)
+ suite.checkStorageUsage(1024, projectID)
+
+ blob, err := dao.GetBlob(blobDigest)
+ suite.Nil(err)
+ suite.Equal(int64(1024), blob.Size)
+ })
+}
+
+func (suite *HandlerSuite) TestMountBlob() {
+ withProject(func(projectID int64, projectName string) {
+ blobDigest := digest.FromString(randomString(15)).String()
+ putBlobUpload(projectName, "photon", genUUID(), blobDigest, 1024)
+ suite.checkStorageUsage(1024, projectID)
+
+ repository := fmt.Sprintf("%s/%s", projectName, "photon")
+
+ withProject(func(projectID int64, projectName string) {
+ mountBlob(projectName, "harbor", blobDigest, repository)
+ suite.checkStorageUsage(1024, projectID)
+ })
+ })
+}
+
+func (suite *HandlerSuite) TestPutManifestCreated() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(100, []int64{100, 100})
+
+ putBlobUpload(projectName, "photon", genUUID(), manifest.Config.Digest.String(), manifest.Config.Size)
+ for _, layer := range manifest.Layers {
+ putBlobUpload(projectName, "photon", genUUID(), layer.Digest.String(), layer.Size)
+ }
+
+ putManifest(projectName, "photon", "latest", manifest)
+
+ suite.checkStorageUsage(int64(300+sizeOfManifest(manifest)), projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteManifest() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkStorageUsage(size, projectID)
+
+ deleteManifest(projectName, "photon", digestOfManifest(manifest))
+ suite.checkStorageUsage(0, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestImageOverwrite() {
+ withProject(func(projectID int64, projectName string) {
+ manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
+ size1 := sizeOfImage(manifest1)
+ pushImage(projectName, "photon", "latest", manifest1)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size1, projectID)
+
+ manifest2 := makeManifest(1, []int64{2, 3, 4, 5})
+ size2 := sizeOfImage(manifest2)
+ pushImage(projectName, "photon", "latest", manifest2)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size1+size2, projectID)
+
+ manifest3 := makeManifest(1, []int64{2, 3, 4, 5})
+ size3 := sizeOfImage(manifest2)
+ pushImage(projectName, "photon", "latest", manifest3)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size1+size2+size3, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestPushImageMultiTimes() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestPushImageToSameRepository() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "photon", "dev", manifest)
+ suite.checkCountUsage(2, projectID)
+ suite.checkStorageUsage(size, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestPushImageToDifferentRepositories() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "mysql", "latest", manifest)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "redis", "latest", manifest)
+ suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
+
+ pushImage(projectName, "postgres", "latest", manifest)
+ suite.checkStorageUsage(size+2*sizeOfManifest(manifest), projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestPushImageToDifferentProjects() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "mysql", "latest", manifest)
+ suite.checkStorageUsage(size, projectID)
+
+ withProject(func(id int64, name string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(name, "mysql", "latest", manifest)
+ suite.checkStorageUsage(size, id)
+
+ suite.checkStorageUsage(size, projectID)
+ })
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteManifestShareLayersInSameRepository() {
+ withProject(func(projectID int64, projectName string) {
+ manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
+ size1 := sizeOfImage(manifest1)
+
+ pushImage(projectName, "mysql", "latest", manifest1)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size1, projectID)
+
+ manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7})
+ pushImage(projectName, "mysql", "dev", manifest2)
+ suite.checkCountUsage(2, projectID)
+
+ totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7
+ suite.checkStorageUsage(totalSize, projectID)
+
+ deleteManifest(projectName, "mysql", digestOfManifest(manifest1))
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteManifestShareLayersInDifferentRepositories() {
+ withProject(func(projectID int64, projectName string) {
+ manifest1 := makeManifest(1, []int64{2, 3, 4, 5})
+ size1 := sizeOfImage(manifest1)
+
+ pushImage(projectName, "mysql", "latest", manifest1)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size1, projectID)
+
+ pushImage(projectName, "mysql", "dev", manifest1)
+ suite.checkCountUsage(2, projectID)
+ suite.checkStorageUsage(size1, projectID)
+
+ manifest2 := manifestWithAdditionalLayers(manifest1, []int64{6, 7})
+ pushImage(projectName, "mariadb", "latest", manifest2)
+ suite.checkCountUsage(3, projectID)
+
+ totalSize := size1 + sizeOfManifest(manifest2) + 6 + 7
+ suite.checkStorageUsage(totalSize, projectID)
+
+ deleteManifest(projectName, "mysql", digestOfManifest(manifest1))
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(totalSize-sizeOfManifest(manifest1), projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteManifestInSameRepository() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "photon", "dev", manifest)
+ suite.checkCountUsage(2, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ deleteManifest(projectName, "photon", digestOfManifest(manifest))
+ suite.checkCountUsage(0, projectID)
+ suite.checkStorageUsage(0, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteManifestInDifferentRepositories() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "mysql", "latest", manifest)
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "mysql", "5.6", manifest)
+ suite.checkCountUsage(2, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "redis", "latest", manifest)
+ suite.checkCountUsage(3, projectID)
+ suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
+
+ deleteManifest(projectName, "redis", digestOfManifest(manifest))
+ suite.checkCountUsage(2, projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ pushImage(projectName, "redis", "latest", manifest)
+ suite.checkCountUsage(3, projectID)
+ suite.checkStorageUsage(size+sizeOfManifest(manifest), projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteManifestInDifferentProjects() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "mysql", "latest", manifest)
+ suite.checkStorageUsage(size, projectID)
+
+ withProject(func(id int64, name string) {
+ pushImage(name, "mysql", "latest", manifest)
+ suite.checkStorageUsage(size, id)
+
+ suite.checkStorageUsage(size, projectID)
+ deleteManifest(projectName, "mysql", digestOfManifest(manifest))
+ suite.checkCountUsage(0, projectID)
+ suite.checkStorageUsage(0, projectID)
+ })
+
+ })
+}
+
+func (suite *HandlerSuite) TestPushDeletePush() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkStorageUsage(size, projectID)
+
+ deleteManifest(projectName, "photon", digestOfManifest(manifest))
+ suite.checkStorageUsage(0, projectID)
+
+ pushImage(projectName, "photon", "latest", manifest)
+ suite.checkStorageUsage(size, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestPushImageRace() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ size := sizeOfImage(manifest)
+
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ pushImage(projectName, "photon", "latest", manifest)
+ }()
+ }
+ wg.Wait()
+
+ suite.checkCountUsage(1, projectID)
+ suite.checkStorageUsage(size, projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDeleteImageRace() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ pushImage(projectName, "photon", "latest", manifest)
+
+ count := 100
+ size := sizeOfImage(manifest)
+ for i := 0; i < count; i++ {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ pushImage(projectName, "mysql", fmt.Sprintf("tag%d", i), manifest)
+ size += sizeOfImage(manifest)
+ }
+
+ suite.checkCountUsage(int64(count+1), projectID)
+ suite.checkStorageUsage(size, projectID)
+
+ var wg sync.WaitGroup
+ for i := 0; i < 10; i++ {
+ wg.Add(1)
+ go func(i int) {
+ defer wg.Done()
+ deleteManifest(projectName, "photon", digestOfManifest(manifest), func() bool {
+ return i == 0
+ })
+ }(i)
+ }
+ wg.Wait()
+
+ suite.checkCountUsage(int64(count), projectID)
+ suite.checkStorageUsage(size-sizeOfImage(manifest), projectID)
+ })
+}
+
+func (suite *HandlerSuite) TestDisableProjectQuota() {
+ withProject(func(projectID int64, projectName string) {
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ pushImage(projectName, "photon", "latest", manifest)
+
+ quotas, err := dao.ListQuotas(&models.QuotaQuery{
+ Reference: "project",
+ ReferenceID: strconv.FormatInt(projectID, 10),
+ })
+
+ suite.Nil(err)
+ suite.Len(quotas, 1)
+ })
+
+ withProject(func(projectID int64, projectName string) {
+ cfg := config.GetCfgManager()
+ cfg.Set(common.QuotaPerProjectEnable, false)
+ defer cfg.Set(common.QuotaPerProjectEnable, true)
+
+ manifest := makeManifest(1, []int64{2, 3, 4, 5})
+ pushImage(projectName, "photon", "latest", manifest)
+
+ quotas, err := dao.ListQuotas(&models.QuotaQuery{
+ Reference: "project",
+ ReferenceID: strconv.FormatInt(projectID, 10),
+ })
+
+ suite.Nil(err)
+ suite.Len(quotas, 0)
+ })
+}
+
+func TestMain(m *testing.M) {
+ config.Init()
+ dao.PrepareTestForPostgresSQL()
+
+ if result := m.Run(); result != 0 {
+ os.Exit(result)
+ }
+}
+
+func TestRunHandlerSuite(t *testing.T) {
+ suite.Run(t, new(HandlerSuite))
+}
diff --git a/src/core/middlewares/sizequota/util.go b/src/core/middlewares/sizequota/util.go
new file mode 100644
index 000000000..edcf92631
--- /dev/null
+++ b/src/core/middlewares/sizequota/util.go
@@ -0,0 +1,330 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package sizequota
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/garyburd/redigo/redis"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ blobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/([a-zA-Z0-9-_.=]+)/?$`)
+ initiateBlobUploadURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)blobs/uploads/?$`)
+)
+
+// parseUploadedBlobSize parse the blob stream upload response and return the size blob uploaded
+func parseUploadedBlobSize(w http.ResponseWriter) (int64, error) {
+ // Range: Range indicating the current progress of the upload.
+ // https://github.com/opencontainers/distribution-spec/blob/master/spec.md#get-blob-upload
+ r := w.Header().Get("Range")
+
+ end := strings.Split(r, "-")[1]
+ size, err := strconv.ParseInt(end, 10, 64)
+ if err != nil {
+ return 0, err
+ }
+
+ // docker registry did '-1' in the response
+ if size > 0 {
+ size = size + 1
+ }
+
+ return size, nil
+}
+
+// setUploadedBlobSize update the size of stream upload blob
+func setUploadedBlobSize(uuid string, size int64) (bool, error) {
+ conn, err := util.GetRegRedisCon()
+ if err != nil {
+ return false, err
+ }
+ defer conn.Close()
+
+ key := fmt.Sprintf("upload:%s:size", uuid)
+ reply, err := redis.String(conn.Do("SET", key, size))
+ if err != nil {
+ return false, err
+ }
+ return reply == "OK", nil
+
+}
+
+// getUploadedBlobSize returns the size of stream upload blob
+func getUploadedBlobSize(uuid string) (int64, error) {
+ conn, err := util.GetRegRedisCon()
+ if err != nil {
+ return 0, err
+ }
+ defer conn.Close()
+
+ key := fmt.Sprintf("upload:%s:size", uuid)
+ size, err := redis.Int64(conn.Do("GET", key))
+ if err != nil {
+ return 0, err
+ }
+
+ return size, nil
+}
+
+// parseBlobSize returns blob size from blob upload complete request
+func parseBlobSize(req *http.Request, uuid string) (int64, error) {
+ size, err := strconv.ParseInt(req.Header.Get("Content-Length"), 10, 64)
+ if err == nil && size != 0 {
+ return size, nil
+ }
+
+ return getUploadedBlobSize(uuid)
+}
+
+// match returns true if request method equal method and path match re
+func match(req *http.Request, method string, re *regexp.Regexp) bool {
+ return req.Method == method && re.MatchString(req.URL.Path)
+}
+
+// parseBlobInfoFromComplete returns blob info from blob upload complete request
+func parseBlobInfoFromComplete(req *http.Request) (*util.BlobInfo, error) {
+ if !match(req, http.MethodPut, blobUploadURLRe) {
+ return nil, fmt.Errorf("not match url %s for blob upload complete", req.URL.Path)
+ }
+
+ s := blobUploadURLRe.FindStringSubmatch(req.URL.Path)
+ repository, uuid := s[1][:len(s[1])-1], s[2]
+
+ projectName, _ := utils.ParseRepository(repository)
+ project, err := dao.GetProjectByName(projectName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
+ }
+ if project == nil {
+ return nil, fmt.Errorf("project %s not found", projectName)
+ }
+
+ dgt, err := digest.Parse(req.FormValue("digest"))
+ if err != nil {
+ return nil, fmt.Errorf("blob digest invalid for upload %s", uuid)
+ }
+
+ size, err := parseBlobSize(req, uuid)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get content length of blob upload %s, error: %v", uuid, err)
+ }
+
+ return &util.BlobInfo{
+ ProjectID: project.ProjectID,
+ Repository: repository,
+ Digest: dgt.String(),
+ Size: size,
+ }, nil
+}
+
+// parseBlobInfoFromManifest returns blob info from put the manifest request
+func parseBlobInfoFromManifest(req *http.Request) (*util.BlobInfo, error) {
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ manifest, err := util.ParseManifestInfo(req)
+ if err != nil {
+ return nil, err
+ }
+
+ info = manifest
+
+ // replace the request with manifest info
+ *req = *(req.WithContext(util.NewManifestInfoContext(req.Context(), info)))
+ }
+
+ return &util.BlobInfo{
+ ProjectID: info.ProjectID,
+ Repository: info.Repository,
+ Digest: info.Descriptor.Digest.String(),
+ Size: info.Descriptor.Size,
+ ContentType: info.Descriptor.MediaType,
+ }, nil
+}
+
+// parseBlobInfoFromMount returns blob info from blob mount request
+func parseBlobInfoFromMount(req *http.Request) (*util.BlobInfo, error) {
+ if !match(req, http.MethodPost, initiateBlobUploadURLRe) {
+ return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path)
+ }
+
+ if req.FormValue("mount") == "" || req.FormValue("from") == "" {
+ return nil, fmt.Errorf("not match url %s for mount blob", req.URL.Path)
+ }
+
+ dgt, err := digest.Parse(req.FormValue("mount"))
+ if err != nil {
+ return nil, errors.New("mount must be digest")
+ }
+
+ s := initiateBlobUploadURLRe.FindStringSubmatch(req.URL.Path)
+ repository := strings.TrimSuffix(s[1], "/")
+
+ projectName, _ := utils.ParseRepository(repository)
+ project, err := dao.GetProjectByName(projectName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
+ }
+ if project == nil {
+ return nil, fmt.Errorf("project %s not found", projectName)
+ }
+
+ blob, err := dao.GetBlob(dgt.String())
+ if err != nil {
+ return nil, fmt.Errorf("failed to get blob %s, error: %v", dgt.String(), err)
+ }
+ if blob == nil {
+ return nil, fmt.Errorf("the blob in the mount request with digest: %s doesn't exist", dgt.String())
+ }
+
+ return &util.BlobInfo{
+ ProjectID: project.ProjectID,
+ Repository: repository,
+ Digest: dgt.String(),
+ Size: blob.Size,
+ }, nil
+}
+
+// getBlobInfoParser return parse blob info function for request
+// returns parseBlobInfoFromComplete when request match PUT /v2//blobs/uploads/?digest=
+// returns parseBlobInfoFromMount when request match POST /v2//blobs/uploads/?mount=&from=
+func getBlobInfoParser(req *http.Request) func(*http.Request) (*util.BlobInfo, error) {
+ if match(req, http.MethodPut, blobUploadURLRe) {
+ if req.FormValue("digest") != "" {
+ return parseBlobInfoFromComplete
+ }
+ }
+
+ if match(req, http.MethodPost, initiateBlobUploadURLRe) {
+ if req.FormValue("mount") != "" && req.FormValue("from") != "" {
+ return parseBlobInfoFromMount
+ }
+ }
+
+ return nil
+}
+
+// computeResourcesForBlob returns storage required for blob, no storage required if blob exists in project
+func computeResourcesForBlob(req *http.Request) (types.ResourceList, error) {
+ info, ok := util.BlobInfoFromContext(req.Context())
+ if !ok {
+ return nil, errors.New("blob info missing")
+ }
+
+ exist, err := info.BlobExists()
+ if err != nil {
+ return nil, err
+ }
+
+ if exist {
+ return nil, nil
+ }
+
+ return types.ResourceList{types.ResourceStorage: info.Size}, nil
+}
+
+// computeResourcesForManifestCreation returns storage resource required for manifest
+// no storage required if manifest exists in project
+// the sum size of manifest itself and blobs not in project will return if manifest not exists in project
+func computeResourcesForManifestCreation(req *http.Request) (types.ResourceList, error) {
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ return nil, errors.New("manifest info missing")
+ }
+
+ exist, err := info.ManifestExists()
+ if err != nil {
+ return nil, err
+ }
+
+ // manifest exist in project, so no storage quota required
+ if exist {
+ return nil, nil
+ }
+
+ blobs, err := info.GetBlobsNotInProject()
+ if err != nil {
+ return nil, err
+ }
+
+ size := info.Descriptor.Size
+
+ for _, blob := range blobs {
+ size += blob.Size
+ }
+
+ return types.ResourceList{types.ResourceStorage: size}, nil
+}
+
+// computeResourcesForManifestDeletion returns storage resource will be released when manifest deleted
+// then result will be the sum of manifest itself and blobs which will not be used by other manifests of project
+func computeResourcesForManifestDeletion(req *http.Request) (types.ResourceList, error) {
+ info, ok := util.ManifestInfoFromContext(req.Context())
+ if !ok {
+ return nil, errors.New("manifest info missing")
+ }
+
+ blobs, err := dao.GetExclusiveBlobs(info.ProjectID, info.Repository, info.Digest)
+ if err != nil {
+ return nil, err
+ }
+
+ info.ExclusiveBlobs = blobs
+
+ blob, err := dao.GetBlob(info.Digest)
+ if err != nil {
+ return nil, err
+ }
+
+ // manifest size will always be released
+ size := blob.Size
+
+ for _, blob := range blobs {
+ size = size + blob.Size
+ }
+
+ return types.ResourceList{types.ResourceStorage: size}, nil
+}
+
+// syncBlobInfoToProject create the blob and add it to project
+func syncBlobInfoToProject(info *util.BlobInfo) error {
+ _, blob, err := dao.GetOrCreateBlob(&models.Blob{
+ Digest: info.Digest,
+ ContentType: info.ContentType,
+ Size: info.Size,
+ CreationTime: time.Now(),
+ })
+ if err != nil {
+ return err
+ }
+
+ if _, err := dao.AddBlobToProject(blob.ID, info.ProjectID); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/src/core/middlewares/url/handler.go b/src/core/middlewares/url/handler.go
new file mode 100644
index 000000000..07e1a0f3f
--- /dev/null
+++ b/src/core/middlewares/url/handler.go
@@ -0,0 +1,74 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package url
+
+import (
+ "context"
+ "fmt"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ coreutils "github.com/goharbor/harbor/src/core/utils"
+ "net/http"
+ "strings"
+)
+
+type urlHandler struct {
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler) http.Handler {
+ return &urlHandler{
+ next: next,
+ }
+}
+
+// ServeHTTP ...
+func (uh urlHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ log.Debugf("in url handler, path: %s", req.URL.Path)
+ flag, repository, reference := util.MatchPullManifest(req)
+ if flag {
+ components := strings.SplitN(repository, "/", 2)
+ if len(components) < 2 {
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Bad repository name: %s", repository)), http.StatusBadRequest)
+ return
+ }
+
+ client, err := coreutils.NewRepositoryClientForUI(util.TokenUsername, repository)
+ if err != nil {
+ log.Errorf("Error creating repository Client: %v", err)
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError)
+ return
+ }
+ digest, _, err := client.ManifestExist(reference)
+ if err != nil {
+ log.Errorf("Failed to get digest for reference: %s, error: %v", reference, err)
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError)
+ return
+ }
+
+ img := util.ImageInfo{
+ Repository: repository,
+ Reference: reference,
+ ProjectName: components[0],
+ Digest: digest,
+ }
+
+ log.Debugf("image info of the request: %#v", img)
+ ctx := context.WithValue(req.Context(), util.ImageInfoCtxKey, img)
+ req = req.WithContext(ctx)
+ }
+ uh.next.ServeHTTP(rw, req)
+}
diff --git a/src/core/middlewares/util/response.go b/src/core/middlewares/util/response.go
new file mode 100644
index 000000000..48e3f0cda
--- /dev/null
+++ b/src/core/middlewares/util/response.go
@@ -0,0 +1,59 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "net/http"
+)
+
+// CustomResponseWriter write the response code into the status
+type CustomResponseWriter struct {
+ http.ResponseWriter
+ status int
+ wroteHeader bool
+}
+
+// NewCustomResponseWriter ...
+func NewCustomResponseWriter(w http.ResponseWriter) *CustomResponseWriter {
+ return &CustomResponseWriter{ResponseWriter: w}
+}
+
+// Status ...
+func (w *CustomResponseWriter) Status() int {
+ return w.status
+}
+
+// Header ...
+func (w CustomResponseWriter) Header() http.Header {
+ return w.ResponseWriter.Header()
+}
+
+// Write ...
+func (w *CustomResponseWriter) Write(p []byte) (n int, err error) {
+ if !w.wroteHeader {
+ w.WriteHeader(http.StatusOK)
+ }
+ return w.ResponseWriter.Write(p)
+}
+
+// WriteHeader ...
+func (w *CustomResponseWriter) WriteHeader(code int) {
+ w.ResponseWriter.WriteHeader(code)
+ if w.wroteHeader {
+ return
+ }
+ w.status = code
+ w.wroteHeader = true
+}
diff --git a/src/replication/adapter/image_registry_test.go b/src/core/middlewares/util/response_test.go
similarity index 61%
rename from src/replication/adapter/image_registry_test.go
rename to src/core/middlewares/util/response_test.go
index 157471d41..40ec59c4e 100644
--- a/src/replication/adapter/image_registry_test.go
+++ b/src/core/middlewares/util/response_test.go
@@ -12,35 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package adapter
+package util
import (
+ "net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
-// TODO add UT
-
-func TestIsDigest(t *testing.T) {
- cases := []struct {
- str string
- isDigest bool
- }{
- {
- str: "",
- isDigest: false,
- },
- {
- str: "latest",
- isDigest: false,
- },
- {
- str: "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
- isDigest: true,
- },
- }
- for _, c := range cases {
- assert.Equal(t, c.isDigest, isDigest(c.str))
- }
+func TestCustomResponseWriter(t *testing.T) {
+ rw := httptest.NewRecorder()
+ customResW := CustomResponseWriter{ResponseWriter: rw}
+ customResW.WriteHeader(501)
+ assert.Equal(t, customResW.Status(), 501)
}
diff --git a/src/core/middlewares/util/util.go b/src/core/middlewares/util/util.go
new file mode 100644
index 000000000..7b8d2839e
--- /dev/null
+++ b/src/core/middlewares/util/util.go
@@ -0,0 +1,528 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/manifest/schema1"
+ "github.com/docker/distribution/manifest/schema2"
+ "github.com/garyburd/redigo/redis"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/common/utils/clair"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/promgr"
+ "github.com/goharbor/harbor/src/pkg/scan/whitelist"
+ "github.com/opencontainers/go-digest"
+)
+
+type contextKey string
+
+const (
+ // ImageInfoCtxKey the context key for image information
+ ImageInfoCtxKey = contextKey("ImageInfo")
+ // TokenUsername ...
+ // TODO: temp solution, remove after vmware/harbor#2242 is resolved.
+ TokenUsername = "harbor-core"
+
+ // blobInfoKey the context key for blob info
+ blobInfoKey = contextKey("BlobInfo")
+ // chartVersionInfoKey the context key for chart version info
+ chartVersionInfoKey = contextKey("ChartVersionInfo")
+ // manifestInfoKey the context key for manifest info
+ manifestInfoKey = contextKey("ManifestInfo")
+
+ // DialConnectionTimeout ...
+ DialConnectionTimeout = 30 * time.Second
+ // DialReadTimeout ...
+ DialReadTimeout = time.Minute + 10*time.Second
+ // DialWriteTimeout ...
+ DialWriteTimeout = 10 * time.Second
+)
+
+var (
+ manifestURLRe = regexp.MustCompile(`^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`)
+)
+
+// ChartVersionInfo ...
+type ChartVersionInfo struct {
+ ProjectID int64
+ Namespace string
+ ChartName string
+ Version string
+}
+
+// MutexKey returns mutex key of the chart version
+func (info *ChartVersionInfo) MutexKey(suffix ...string) string {
+ a := []string{"quota", info.Namespace, "chart", info.ChartName, "version", info.Version}
+
+ return strings.Join(append(a, suffix...), ":")
+}
+
+// ImageInfo ...
+type ImageInfo struct {
+ Repository string
+ Reference string
+ ProjectName string
+ Digest string
+}
+
+// BlobInfo ...
+type BlobInfo struct {
+ ProjectID int64
+ ContentType string
+ Size int64
+ Repository string
+ Digest string
+
+ blobExist bool
+ blobExistErr error
+ blobExistOnce sync.Once
+}
+
+// BlobExists returns true when blob exists in the project
+func (info *BlobInfo) BlobExists() (bool, error) {
+ info.blobExistOnce.Do(func() {
+ info.blobExist, info.blobExistErr = dao.HasBlobInProject(info.ProjectID, info.Digest)
+ })
+
+ return info.blobExist, info.blobExistErr
+}
+
+// MutexKey returns mutex key of the blob
+func (info *BlobInfo) MutexKey(suffix ...string) string {
+ projectName, _ := utils.ParseRepository(info.Repository)
+ a := []string{"quota", projectName, "blob", info.Digest}
+
+ return strings.Join(append(a, suffix...), ":")
+}
+
+// ManifestInfo ...
+type ManifestInfo struct {
+ // basic information of a manifest
+ ProjectID int64
+ Repository string
+ Tag string
+ Digest string
+
+ References []distribution.Descriptor
+ Descriptor distribution.Descriptor
+
+ // manifestExist is to index the existing of the manifest in DB by (repository, digest)
+ manifestExist bool
+ manifestExistErr error
+ manifestExistOnce sync.Once
+
+ // artifact the artifact indexed by (repository, tag) in DB
+ artifact *models.Artifact
+ artifactErr error
+ artifactOnce sync.Once
+
+ // ExclusiveBlobs include the blobs that belong to the manifest only
+ // and exclude the blobs that shared by other manifests in the same repo(project/repository).
+ ExclusiveBlobs []*models.Blob
+}
+
+// MutexKey returns mutex key of the manifest
+func (info *ManifestInfo) MutexKey(suffix ...string) string {
+ projectName, _ := utils.ParseRepository(info.Repository)
+ var a []string
+
+ if info.Tag != "" {
+ // tag not empty happened in PUT /v2//manifests/
+ // lock by to tag to compute the count resource required by quota
+ a = []string{"quota", projectName, "manifest", info.Tag}
+ } else {
+ a = []string{"quota", projectName, "manifest", info.Digest}
+ }
+
+ return strings.Join(append(a, suffix...), ":")
+}
+
+// BlobMutexKey returns mutex key of the blob in manifest
+func (info *ManifestInfo) BlobMutexKey(blob *models.Blob, suffix ...string) string {
+ projectName, _ := utils.ParseRepository(info.Repository)
+ a := []string{"quota", projectName, "blob", blob.Digest}
+
+ return strings.Join(append(a, suffix...), ":")
+}
+
+// GetBlobsNotInProject returns blobs of the manifest which not in the project
+func (info *ManifestInfo) GetBlobsNotInProject() ([]*models.Blob, error) {
+ var digests []string
+ for _, reference := range info.References {
+ digests = append(digests, reference.Digest.String())
+ }
+
+ blobs, err := dao.GetBlobsNotInProject(info.ProjectID, digests...)
+ if err != nil {
+ return nil, err
+ }
+
+ return blobs, nil
+}
+
+func (info *ManifestInfo) fetchArtifact() (*models.Artifact, error) {
+ info.artifactOnce.Do(func() {
+ info.artifact, info.artifactErr = dao.GetArtifact(info.Repository, info.Tag)
+ })
+
+ return info.artifact, info.artifactErr
+}
+
+// IsNewTag returns true if the tag of the manifest not exists in project
+func (info *ManifestInfo) IsNewTag() bool {
+ artifact, _ := info.fetchArtifact()
+
+ return artifact == nil
+}
+
+// Artifact returns artifact of the manifest
+func (info *ManifestInfo) Artifact() *models.Artifact {
+ result := &models.Artifact{
+ PID: info.ProjectID,
+ Repo: info.Repository,
+ Tag: info.Tag,
+ Digest: info.Digest,
+ Kind: "Docker-Image",
+ }
+
+ if artifact, _ := info.fetchArtifact(); artifact != nil {
+ result.ID = artifact.ID
+ result.CreationTime = artifact.CreationTime
+ result.PushTime = time.Now()
+ }
+
+ return result
+}
+
+// ManifestExists returns true if manifest exist in repository
+func (info *ManifestInfo) ManifestExists() (bool, error) {
+ info.manifestExistOnce.Do(func() {
+ total, err := dao.GetTotalOfArtifacts(&models.ArtifactQuery{
+ PID: info.ProjectID,
+ Repo: info.Repository,
+ Digest: info.Digest,
+ })
+
+ info.manifestExist = total > 0
+ info.manifestExistErr = err
+ })
+
+ return info.manifestExist, info.manifestExistErr
+}
+
+// JSONError wraps a concrete Code and Message, it's readable for docker deamon.
+type JSONError struct {
+ Code string `json:"code,omitempty"`
+ Message string `json:"message,omitempty"`
+ Detail string `json:"detail,omitempty"`
+}
+
+// MarshalError ...
+func MarshalError(code, msg string) string {
+ var tmpErrs struct {
+ Errors []JSONError `json:"errors,omitempty"`
+ }
+ tmpErrs.Errors = append(tmpErrs.Errors, JSONError{
+ Code: code,
+ Message: msg,
+ Detail: msg,
+ })
+ str, err := json.Marshal(tmpErrs)
+ if err != nil {
+ log.Debugf("failed to marshal json error, %v", err)
+ return msg
+ }
+ return string(str)
+}
+
+// MatchManifestURL ...
+func MatchManifestURL(req *http.Request) (bool, string, string) {
+ s := manifestURLRe.FindStringSubmatch(req.URL.Path)
+ if len(s) == 3 {
+ s[1] = strings.TrimSuffix(s[1], "/")
+ return true, s[1], s[2]
+ }
+ return false, "", ""
+}
+
+// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
+func MatchPullManifest(req *http.Request) (bool, string, string) {
+ if req.Method != http.MethodGet {
+ return false, "", ""
+ }
+ return MatchManifestURL(req)
+}
+
+// MatchPushManifest checks if the request looks like a request to push manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
+func MatchPushManifest(req *http.Request) (bool, string, string) {
+ if req.Method != http.MethodPut {
+ return false, "", ""
+ }
+ return MatchManifestURL(req)
+}
+
+// MatchDeleteManifest checks if the request
+func MatchDeleteManifest(req *http.Request) (match bool, repository string, reference string) {
+ if req.Method != http.MethodDelete {
+ return
+ }
+
+ match, repository, reference = MatchManifestURL(req)
+ if _, err := digest.Parse(reference); err != nil {
+ // Delete manifest only accept digest as reference
+ match = false
+
+ return
+ }
+
+ return
+}
+
+// CopyResp ...
+func CopyResp(rec *httptest.ResponseRecorder, rw http.ResponseWriter) {
+ for k, v := range rec.Header() {
+ rw.Header()[k] = v
+ }
+ rw.WriteHeader(rec.Result().StatusCode)
+ rw.Write(rec.Body.Bytes())
+}
+
+// PolicyChecker checks the policy of a project by project name, to determine if it's needed to check the image's status under this project.
+type PolicyChecker interface {
+ // contentTrustEnabled returns whether a project has enabled content trust.
+ ContentTrustEnabled(name string) bool
+ // vulnerablePolicy returns whether a project has enabled vulnerable, and the project's severity.
+ VulnerablePolicy(name string) (bool, models.Severity, models.CVEWhitelist)
+}
+
+// PmsPolicyChecker ...
+type PmsPolicyChecker struct {
+ pm promgr.ProjectManager
+}
+
+// ContentTrustEnabled ...
+func (pc PmsPolicyChecker) ContentTrustEnabled(name string) bool {
+ project, err := pc.pm.Get(name)
+ if err != nil {
+ log.Errorf("Unexpected error when getting the project, error: %v", err)
+ return true
+ }
+ return project.ContentTrustEnabled()
+}
+
+// VulnerablePolicy ...
+func (pc PmsPolicyChecker) VulnerablePolicy(name string) (bool, models.Severity, models.CVEWhitelist) {
+ project, err := pc.pm.Get(name)
+ wl := models.CVEWhitelist{}
+ if err != nil {
+ log.Errorf("Unexpected error when getting the project, error: %v", err)
+ return true, models.SevUnknown, wl
+ }
+ mgr := whitelist.NewDefaultManager()
+ if project.ReuseSysCVEWhitelist() {
+ w, err := mgr.GetSys()
+ if err != nil {
+ return project.VulPrevented(), clair.ParseClairSev(project.Severity()), wl
+ }
+ wl = *w
+ } else {
+ w, err := mgr.Get(project.ProjectID)
+ if err != nil {
+ return project.VulPrevented(), clair.ParseClairSev(project.Severity()), wl
+ }
+ wl = *w
+ }
+ return project.VulPrevented(), clair.ParseClairSev(project.Severity()), wl
+
+}
+
+// NewPMSPolicyChecker returns an instance of an pmsPolicyChecker
+func NewPMSPolicyChecker(pm promgr.ProjectManager) PolicyChecker {
+ return &PmsPolicyChecker{
+ pm: pm,
+ }
+}
+
+// GetPolicyChecker ...
+func GetPolicyChecker() PolicyChecker {
+ return NewPMSPolicyChecker(config.GlobalProjectMgr)
+}
+
+// GetRegRedisCon ...
+func GetRegRedisCon() (redis.Conn, error) {
+ // FOR UT
+ if os.Getenv("UTTEST") == "true" {
+ return redis.Dial(
+ "tcp",
+ fmt.Sprintf("%s:%d", os.Getenv("REDIS_HOST"), 6379),
+ redis.DialConnectTimeout(DialConnectionTimeout),
+ redis.DialReadTimeout(DialReadTimeout),
+ redis.DialWriteTimeout(DialWriteTimeout),
+ )
+ }
+ return redis.DialURL(
+ config.GetRedisOfRegURL(),
+ redis.DialConnectTimeout(DialConnectionTimeout),
+ redis.DialReadTimeout(DialReadTimeout),
+ redis.DialWriteTimeout(DialWriteTimeout),
+ )
+}
+
+// BlobInfoFromContext returns blob info from context
+func BlobInfoFromContext(ctx context.Context) (*BlobInfo, bool) {
+ info, ok := ctx.Value(blobInfoKey).(*BlobInfo)
+ return info, ok
+}
+
+// ChartVersionInfoFromContext returns chart info from context
+func ChartVersionInfoFromContext(ctx context.Context) (*ChartVersionInfo, bool) {
+ info, ok := ctx.Value(chartVersionInfoKey).(*ChartVersionInfo)
+ return info, ok
+}
+
+// ImageInfoFromContext returns image info from context
+func ImageInfoFromContext(ctx context.Context) (*ImageInfo, bool) {
+ info, ok := ctx.Value(ImageInfoCtxKey).(*ImageInfo)
+ return info, ok
+}
+
+// ManifestInfoFromContext returns manifest info from context
+func ManifestInfoFromContext(ctx context.Context) (*ManifestInfo, bool) {
+ info, ok := ctx.Value(manifestInfoKey).(*ManifestInfo)
+ return info, ok
+}
+
+// NewBlobInfoContext returns context with blob info
+func NewBlobInfoContext(ctx context.Context, info *BlobInfo) context.Context {
+ return context.WithValue(ctx, blobInfoKey, info)
+}
+
+// NewChartVersionInfoContext returns context with blob info
+func NewChartVersionInfoContext(ctx context.Context, info *ChartVersionInfo) context.Context {
+ return context.WithValue(ctx, chartVersionInfoKey, info)
+}
+
+// NewImageInfoContext returns context with image info
+func NewImageInfoContext(ctx context.Context, info *ImageInfo) context.Context {
+ return context.WithValue(ctx, ImageInfoCtxKey, info)
+}
+
+// NewManifestInfoContext returns context with manifest info
+func NewManifestInfoContext(ctx context.Context, info *ManifestInfo) context.Context {
+ return context.WithValue(ctx, manifestInfoKey, info)
+}
+
+// ParseManifestInfo prase manifest from request
+func ParseManifestInfo(req *http.Request) (*ManifestInfo, error) {
+ match, repository, reference := MatchManifestURL(req)
+ if !match {
+ return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path)
+ }
+
+ var tag string
+ if _, err := digest.Parse(reference); err != nil {
+ tag = reference
+ }
+
+ mediaType := req.Header.Get("Content-Type")
+ if mediaType != schema1.MediaTypeManifest &&
+ mediaType != schema1.MediaTypeSignedManifest &&
+ mediaType != schema2.MediaTypeManifest {
+ return nil, fmt.Errorf("unsupported content type for manifest: %s", mediaType)
+ }
+
+ if req.Body == nil {
+ return nil, fmt.Errorf("body missing")
+ }
+
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ log.Warningf("Error occurred when to copy manifest body %v", err)
+ return nil, err
+ }
+ req.Body = ioutil.NopCloser(bytes.NewBuffer(body))
+
+ manifest, desc, err := distribution.UnmarshalManifest(mediaType, body)
+ if err != nil {
+ log.Warningf("Error occurred when to Unmarshal Manifest %v", err)
+ return nil, err
+ }
+
+ projectName, _ := utils.ParseRepository(repository)
+ project, err := dao.GetProjectByName(projectName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
+ }
+ if project == nil {
+ return nil, fmt.Errorf("project %s not found", projectName)
+ }
+
+ return &ManifestInfo{
+ ProjectID: project.ProjectID,
+ Repository: repository,
+ Tag: tag,
+ Digest: desc.Digest.String(),
+ References: manifest.References(),
+ Descriptor: desc,
+ }, nil
+}
+
+// ParseManifestInfoFromPath prase manifest from request path
+func ParseManifestInfoFromPath(req *http.Request) (*ManifestInfo, error) {
+ match, repository, reference := MatchManifestURL(req)
+ if !match {
+ return nil, fmt.Errorf("not match url %s for manifest", req.URL.Path)
+ }
+
+ projectName, _ := utils.ParseRepository(repository)
+ project, err := dao.GetProjectByName(projectName)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get project %s, error: %v", projectName, err)
+ }
+ if project == nil {
+ return nil, fmt.Errorf("project %s not found", projectName)
+ }
+
+ info := &ManifestInfo{
+ ProjectID: project.ProjectID,
+ Repository: repository,
+ }
+
+ dgt, err := digest.Parse(reference)
+ if err != nil {
+ info.Tag = reference
+ } else {
+ info.Digest = dgt.String()
+ }
+
+ return info, nil
+}
diff --git a/src/core/proxy/interceptor_test.go b/src/core/middlewares/util/util_test.go
similarity index 54%
rename from src/core/proxy/interceptor_test.go
rename to src/core/middlewares/util/util_test.go
index ab73f27bf..e02229ad9 100644
--- a/src/core/proxy/interceptor_test.go
+++ b/src/core/middlewares/util/util_test.go
@@ -1,30 +1,49 @@
-package proxy
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
import (
- "github.com/goharbor/harbor/src/common"
- "github.com/goharbor/harbor/src/common/models"
- notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
- testutils "github.com/goharbor/harbor/src/common/utils/test"
- "github.com/goharbor/harbor/src/core/config"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
+ "bytes"
+ "encoding/json"
"net/http"
"net/http/httptest"
"os"
+ "reflect"
"testing"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/manifest"
+ "github.com/docker/distribution/manifest/schema2"
+ "github.com/goharbor/harbor/src/common"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils"
+ notarytest "github.com/goharbor/harbor/src/common/utils/notary/test"
+ testutils "github.com/goharbor/harbor/src/common/utils/test"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
var endpoint = "10.117.4.142"
var notaryServer *httptest.Server
-var admiralEndpoint = "http://127.0.0.1:8282"
-var token = ""
-
func TestMain(m *testing.M) {
+ testutils.InitDatabaseFromEnv()
notaryServer = notarytest.NewNotaryServer(endpoint)
defer notaryServer.Close()
- NotaryEndpoint = notaryServer.URL
var defaultConfig = map[string]interface{}{
common.ExtEndpoint: "https://" + endpoint,
common.WithNotary: true,
@@ -125,22 +144,6 @@ func TestMatchPushManifest(t *testing.T) {
assert.Equal("14.04", tag8)
}
-func TestMatchListRepos(t *testing.T) {
- assert := assert.New(t)
- req1, _ := http.NewRequest("POST", "http://127.0.0.1:5000/v2/_catalog", nil)
- res1 := MatchListRepos(req1)
- assert.False(res1, "%s %v is not a request to list repos", req1.Method, req1.URL)
-
- req2, _ := http.NewRequest("GET", "http://127.0.0.1:5000/v2/_catalog", nil)
- res2 := MatchListRepos(req2)
- assert.True(res2, "%s %v is a request to list repos", req2.Method, req2.URL)
-
- req3, _ := http.NewRequest("GET", "https://192.168.0.5:443/v1/_catalog", nil)
- res3 := MatchListRepos(req3)
- assert.False(res3, "%s %v is not a request to pull manifest", req3.Method, req3.URL)
-
-}
-
func TestPMSPolicyChecker(t *testing.T) {
var defaultConfigAdmiral = map[string]interface{}{
common.ExtEndpoint: "https://" + endpoint,
@@ -157,7 +160,6 @@ func TestPMSPolicyChecker(t *testing.T) {
if err := config.Init(); err != nil {
panic(err)
}
- testutils.InitDatabaseFromEnv()
config.Upload(defaultConfigAdmiral)
@@ -166,9 +168,10 @@ func TestPMSPolicyChecker(t *testing.T) {
Name: name,
OwnerID: 1,
Metadata: map[string]string{
- models.ProMetaEnableContentTrust: "true",
- models.ProMetaPreventVul: "true",
- models.ProMetaSeverity: "low",
+ models.ProMetaEnableContentTrust: "true",
+ models.ProMetaPreventVul: "true",
+ models.ProMetaSeverity: "low",
+ models.ProMetaReuseSysCVEWhitelist: "false",
},
})
require.Nil(t, err)
@@ -178,26 +181,12 @@ func TestPMSPolicyChecker(t *testing.T) {
}
}(id)
- contentTrustFlag := getPolicyChecker().contentTrustEnabled("project_for_test_get_sev_low")
+ contentTrustFlag := GetPolicyChecker().ContentTrustEnabled("project_for_test_get_sev_low")
assert.True(t, contentTrustFlag)
- projectVulnerableEnabled, projectVulnerableSeverity := getPolicyChecker().vulnerablePolicy("project_for_test_get_sev_low")
+ projectVulnerableEnabled, projectVulnerableSeverity, wl := GetPolicyChecker().VulnerablePolicy("project_for_test_get_sev_low")
assert.True(t, projectVulnerableEnabled)
assert.Equal(t, projectVulnerableSeverity, models.SevLow)
-}
-
-func TestMatchNotaryDigest(t *testing.T) {
- assert := assert.New(t)
- // The data from common/utils/notary/helper_test.go
- img1 := imageInfo{"notary-demo/busybox", "1.0", "notary-demo", "sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7"}
- img2 := imageInfo{"notary-demo/busybox", "2.0", "notary-demo", "sha256:12345678"}
-
- res1, err := matchNotaryDigest(img1)
- assert.Nil(err, "Unexpected error: %v, image: %#v", err, img1)
- assert.True(res1)
-
- res2, err := matchNotaryDigest(img2)
- assert.Nil(err, "Unexpected error: %v, image: %#v, take 2", err, img2)
- assert.False(res2)
+ assert.Empty(t, wl.Items)
}
func TestCopyResp(t *testing.T) {
@@ -206,21 +195,207 @@ func TestCopyResp(t *testing.T) {
rec2 := httptest.NewRecorder()
rec1.Header().Set("X-Test", "mytest")
rec1.WriteHeader(418)
- copyResp(rec1, rec2)
+ CopyResp(rec1, rec2)
assert.Equal(418, rec2.Result().StatusCode)
assert.Equal("mytest", rec2.Header().Get("X-Test"))
}
func TestMarshalError(t *testing.T) {
assert := assert.New(t)
- js1 := marshalError("PROJECT_POLICY_VIOLATION", "Not Found")
+ js1 := MarshalError("PROJECT_POLICY_VIOLATION", "Not Found")
assert.Equal("{\"errors\":[{\"code\":\"PROJECT_POLICY_VIOLATION\",\"message\":\"Not Found\",\"detail\":\"Not Found\"}]}", js1)
- js2 := marshalError("DENIED", "The action is denied")
+ js2 := MarshalError("DENIED", "The action is denied")
assert.Equal("{\"errors\":[{\"code\":\"DENIED\",\"message\":\"The action is denied\",\"detail\":\"The action is denied\"}]}", js2)
}
-func TestIsDigest(t *testing.T) {
- assert := assert.New(t)
- assert.False(isDigest("latest"))
- assert.True(isDigest("sha256:1359608115b94599e5641638bac5aef1ddfaa79bb96057ebf41ebc8d33acf8a7"))
+func makeManifest(configSize int64, layerSizes []int64) schema2.Manifest {
+ manifest := schema2.Manifest{
+ Versioned: manifest.Versioned{SchemaVersion: 2, MediaType: schema2.MediaTypeManifest},
+ Config: distribution.Descriptor{
+ MediaType: schema2.MediaTypeImageConfig,
+ Size: configSize,
+ Digest: digest.FromString(utils.GenerateRandomString()),
+ },
+ }
+
+ for _, size := range layerSizes {
+ manifest.Layers = append(manifest.Layers, distribution.Descriptor{
+ MediaType: schema2.MediaTypeLayer,
+ Size: size,
+ Digest: digest.FromString(utils.GenerateRandomString()),
+ })
+ }
+
+ return manifest
+}
+
+func getDescriptor(manifest schema2.Manifest) distribution.Descriptor {
+ buf, _ := json.Marshal(manifest)
+ _, desc, _ := distribution.UnmarshalManifest(manifest.Versioned.MediaType, buf)
+ return desc
+}
+
+func TestParseManifestInfo(t *testing.T) {
+ manifest := makeManifest(1, []int64{2, 3, 4})
+
+ tests := []struct {
+ name string
+ req func() *http.Request
+ want *ManifestInfo
+ wantErr bool
+ }{
+ {
+ "ok",
+ func() *http.Request {
+ buf, _ := json.Marshal(manifest)
+ req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifests/latest", bytes.NewReader(buf))
+ req.Header.Add("Content-Type", manifest.MediaType)
+
+ return req
+ },
+ &ManifestInfo{
+ ProjectID: 1,
+ Repository: "library/photon",
+ Tag: "latest",
+ Digest: getDescriptor(manifest).Digest.String(),
+ References: manifest.References(),
+ Descriptor: getDescriptor(manifest),
+ },
+ false,
+ },
+ {
+ "bad content type",
+ func() *http.Request {
+ buf, _ := json.Marshal(manifest)
+ req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf))
+ req.Header.Add("Content-Type", "application/json")
+
+ return req
+ },
+ nil,
+ true,
+ },
+ {
+ "bad manifest",
+ func() *http.Request {
+ req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader([]byte("")))
+ req.Header.Add("Content-Type", schema2.MediaTypeManifest)
+
+ return req
+ },
+ nil,
+ true,
+ },
+ {
+ "body missing",
+ func() *http.Request {
+ req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", nil)
+ req.Header.Add("Content-Type", schema2.MediaTypeManifest)
+
+ return req
+ },
+ nil,
+ true,
+ },
+ {
+ "project not found",
+ func() *http.Request {
+
+ buf, _ := json.Marshal(manifest)
+
+ req, _ := http.NewRequest(http.MethodPut, "/v2/notfound/photon/manifests/latest", bytes.NewReader(buf))
+ req.Header.Add("Content-Type", manifest.MediaType)
+
+ return req
+ },
+ nil,
+ true,
+ },
+ {
+ "url not match",
+ func() *http.Request {
+ buf, _ := json.Marshal(manifest)
+ req, _ := http.NewRequest(http.MethodPut, "/v2/library/photon/manifest/latest", bytes.NewReader(buf))
+ req.Header.Add("Content-Type", manifest.MediaType)
+
+ return req
+ },
+ nil,
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := ParseManifestInfo(tt.req())
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ParseManifestInfo() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("ParseManifestInfo() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestParseManifestInfoFromPath(t *testing.T) {
+ mustRequest := func(method, url string) *http.Request {
+ req, _ := http.NewRequest(method, url, nil)
+ return req
+ }
+
+ type args struct {
+ req *http.Request
+ }
+ tests := []struct {
+ name string
+ args args
+ want *ManifestInfo
+ wantErr bool
+ }{
+ {
+ "ok for digest",
+ args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059")},
+ &ManifestInfo{
+ ProjectID: 1,
+ Repository: "library/photon",
+ Digest: "sha256:3e17b60ab9d92d953fb8ebefa25624c0d23fb95f78dde5572285d10158044059",
+ },
+ false,
+ },
+ {
+ "ok for tag",
+ args{mustRequest(http.MethodDelete, "/v2/library/photon/manifests/latest")},
+ &ManifestInfo{
+ ProjectID: 1,
+ Repository: "library/photon",
+ Tag: "latest",
+ },
+ false,
+ },
+ {
+ "project not found",
+ args{mustRequest(http.MethodDelete, "/v2/notfound/photon/manifests/latest")},
+ nil,
+ true,
+ },
+ {
+ "url not match",
+ args{mustRequest(http.MethodDelete, "/v2/library/photon/manifest/latest")},
+ nil,
+ true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, err := ParseManifestInfoFromPath(tt.args.req)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("ParseManifestInfoFromPath() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("ParseManifestInfoFromPath() = %v, want %v", got, tt.want)
+ }
+ })
+ }
}
diff --git a/src/core/middlewares/vulnerable/handler.go b/src/core/middlewares/vulnerable/handler.go
new file mode 100644
index 000000000..67d1b97ce
--- /dev/null
+++ b/src/core/middlewares/vulnerable/handler.go
@@ -0,0 +1,80 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vulnerable
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/middlewares/util"
+ "github.com/goharbor/harbor/src/pkg/scan"
+ "net/http"
+)
+
+type vulnerableHandler struct {
+ next http.Handler
+}
+
+// New ...
+func New(next http.Handler) http.Handler {
+ return &vulnerableHandler{
+ next: next,
+ }
+}
+
+// ServeHTTP ...
+func (vh vulnerableHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ imgRaw := req.Context().Value(util.ImageInfoCtxKey)
+ if imgRaw == nil || !config.WithClair() {
+ vh.next.ServeHTTP(rw, req)
+ return
+ }
+ img, _ := req.Context().Value(util.ImageInfoCtxKey).(util.ImageInfo)
+ if img.Digest == "" {
+ vh.next.ServeHTTP(rw, req)
+ return
+ }
+ projectVulnerableEnabled, projectVulnerableSeverity, wl := util.GetPolicyChecker().VulnerablePolicy(img.ProjectName)
+ if !projectVulnerableEnabled {
+ vh.next.ServeHTTP(rw, req)
+ return
+ }
+ vl, err := scan.VulnListByDigest(img.Digest)
+ if err != nil {
+ log.Errorf("Failed to get the vulnerability list, error: %v", err)
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", "Failed to get vulnerabilities."), http.StatusPreconditionFailed)
+ return
+ }
+ filtered := vl.ApplyWhitelist(wl)
+ msg := vh.filterMsg(img, filtered)
+ log.Info(msg)
+ if int(vl.Severity()) >= int(projectVulnerableSeverity) {
+ log.Debugf("the image severity: %q is higher then project setting: %q, failing the response.", vl.Severity(), projectVulnerableSeverity)
+ http.Error(rw, util.MarshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("The severity of vulnerability of the image: %q is equal or higher than the threshold in project setting: %q.", vl.Severity(), projectVulnerableSeverity)), http.StatusPreconditionFailed)
+ return
+ }
+ vh.next.ServeHTTP(rw, req)
+}
+
+func (vh vulnerableHandler) filterMsg(img util.ImageInfo, filtered scan.VulnerabilityList) string {
+ filterMsg := fmt.Sprintf("Image: %s/%s:%s, digest: %s, vulnerabilities fitered by whitelist:", img.ProjectName, img.Repository, img.Reference, img.Digest)
+ if len(filtered) == 0 {
+ filterMsg = fmt.Sprintf("%s none.", filterMsg)
+ }
+ for _, v := range filtered {
+ filterMsg = fmt.Sprintf("%s ID: %s, severity: %s;", filterMsg, v.ID, v.Severity)
+ }
+ return filterMsg
+}
diff --git a/src/core/notifier/event/event.go b/src/core/notifier/event/event.go
new file mode 100644
index 000000000..088a8d2e7
--- /dev/null
+++ b/src/core/notifier/event/event.go
@@ -0,0 +1,154 @@
+package event
+
+import (
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/notifier"
+ "github.com/goharbor/harbor/src/core/notifier/model"
+ notifyModel "github.com/goharbor/harbor/src/pkg/notification/model"
+ "github.com/pkg/errors"
+)
+
+// Event to publish
+type Event struct {
+ Topic string
+ Data interface{}
+}
+
+// Metadata is the event raw data to be processed
+type Metadata interface {
+ Resolve(event *Event) error
+}
+
+// ImageDelMetaData defines images deleting related event data
+type ImageDelMetaData struct {
+ Project *models.Project
+ Tags []string
+ OccurAt time.Time
+ Operator string
+ RepoName string
+}
+
+// Resolve image deleting metadata into common image event
+func (i *ImageDelMetaData) Resolve(evt *Event) error {
+ data := &model.ImageEvent{
+ EventType: notifyModel.EventTypeDeleteImage,
+ Project: i.Project,
+ OccurAt: i.OccurAt,
+ Operator: i.Operator,
+ RepoName: i.RepoName,
+ }
+ for _, t := range i.Tags {
+ res := &model.ImgResource{Tag: t}
+ data.Resource = append(data.Resource, res)
+ }
+ evt.Topic = model.DeleteImageTopic
+ evt.Data = data
+ return nil
+}
+
+// ImagePushMetaData defines images pushing related event data
+type ImagePushMetaData struct {
+ Project *models.Project
+ Tag string
+ Digest string
+ OccurAt time.Time
+ Operator string
+ RepoName string
+}
+
+// Resolve image pushing metadata into common image event
+func (i *ImagePushMetaData) Resolve(evt *Event) error {
+ data := &model.ImageEvent{
+ EventType: notifyModel.EventTypePushImage,
+ Project: i.Project,
+ OccurAt: i.OccurAt,
+ Operator: i.Operator,
+ RepoName: i.RepoName,
+ Resource: []*model.ImgResource{
+ {
+ Tag: i.Tag,
+ Digest: i.Digest,
+ },
+ },
+ }
+
+ evt.Topic = model.PushImageTopic
+ evt.Data = data
+ return nil
+}
+
+// ImagePullMetaData defines images pulling related event data
+type ImagePullMetaData struct {
+ Project *models.Project
+ Tag string
+ Digest string
+ OccurAt time.Time
+ Operator string
+ RepoName string
+}
+
+// Resolve image pulling metadata into common image event
+func (i *ImagePullMetaData) Resolve(evt *Event) error {
+ data := &model.ImageEvent{
+ EventType: notifyModel.EventTypePullImage,
+ Project: i.Project,
+ OccurAt: i.OccurAt,
+ Operator: i.Operator,
+ RepoName: i.RepoName,
+ Resource: []*model.ImgResource{
+ {
+ Tag: i.Tag,
+ Digest: i.Digest,
+ },
+ },
+ }
+
+ evt.Topic = model.PullImageTopic
+ evt.Data = data
+ return nil
+}
+
+// HookMetaData defines hook notification related event data
+type HookMetaData struct {
+ PolicyID int64
+ EventType string
+ Target *models.EventTarget
+ Payload *model.Payload
+}
+
+// Resolve hook metadata into hook event
+func (h *HookMetaData) Resolve(evt *Event) error {
+ data := &model.HookEvent{
+ PolicyID: h.PolicyID,
+ EventType: h.EventType,
+ Target: h.Target,
+ Payload: h.Payload,
+ }
+
+ evt.Topic = h.Target.Type
+ evt.Data = data
+ return nil
+}
+
+// Build an event by metadata
+func (e *Event) Build(metadata ...Metadata) error {
+ for _, md := range metadata {
+ if err := md.Resolve(e); err != nil {
+ log.Debugf("failed to resolve event metadata: %v", md)
+ return errors.Wrap(err, "failed to resolve event metadata")
+ }
+ }
+ return nil
+}
+
+// Publish an event
+func (e *Event) Publish() error {
+ if err := notifier.Publish(e.Topic, e.Data); err != nil {
+ log.Debugf("failed to publish topic %s with event: %v", e.Topic, e.Data)
+ return errors.Wrap(err, "failed to publish event")
+ }
+ return nil
+}
diff --git a/src/core/notifier/event/event_test.go b/src/core/notifier/event/event_test.go
new file mode 100644
index 000000000..21e0d8d23
--- /dev/null
+++ b/src/core/notifier/event/event_test.go
@@ -0,0 +1,212 @@
+package event
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+ notifierModel "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestImagePushEvent_Build(t *testing.T) {
+ type args struct {
+ imgPushMetadata *ImagePushMetaData
+ hookMetadata *HookMetaData
+ }
+
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ want *Event
+ }{
+ {
+ name: "Build Image Push Event",
+ args: args{
+ imgPushMetadata: &ImagePushMetaData{
+ Project: &models.Project{ProjectID: 1, Name: "library"},
+ Tag: "v1.0",
+ Digest: "abcd",
+ OccurAt: time.Now(),
+ Operator: "admin",
+ RepoName: "library/alpine",
+ },
+ },
+ want: &Event{
+ Topic: notifierModel.PushImageTopic,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ event := &Event{}
+ err := event.Build(tt.args.imgPushMetadata)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ assert.Equal(t, tt.want.Topic, event.Topic)
+ })
+ }
+}
+
+func TestImagePullEvent_Build(t *testing.T) {
+ type args struct {
+ imgPullMetadata *ImagePullMetaData
+ }
+
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ want *Event
+ }{
+ {
+ name: "Build Image Pull Event",
+ args: args{
+ imgPullMetadata: &ImagePullMetaData{
+ Project: &models.Project{ProjectID: 1, Name: "library"},
+ Tag: "v1.0",
+ Digest: "abcd",
+ OccurAt: time.Now(),
+ Operator: "admin",
+ RepoName: "library/alpine",
+ },
+ },
+ want: &Event{
+ Topic: notifierModel.PullImageTopic,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ event := &Event{}
+ err := event.Build(tt.args.imgPullMetadata)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ assert.Equal(t, tt.want.Topic, event.Topic)
+ })
+ }
+}
+
+func TestImageDelEvent_Build(t *testing.T) {
+ type args struct {
+ imgDelMetadata *ImageDelMetaData
+ }
+
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ want *Event
+ }{
+ {
+ name: "Build Image Delete Event",
+ args: args{
+ imgDelMetadata: &ImageDelMetaData{
+ Project: &models.Project{ProjectID: 1, Name: "library"},
+ Tags: []string{"v1.0"},
+ OccurAt: time.Now(),
+ Operator: "admin",
+ RepoName: "library/alpine",
+ },
+ },
+ want: &Event{
+ Topic: notifierModel.DeleteImageTopic,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ event := &Event{}
+ err := event.Build(tt.args.imgDelMetadata)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ assert.Equal(t, tt.want.Topic, event.Topic)
+ })
+ }
+}
+
+func TestHookEvent_Build(t *testing.T) {
+ type args struct {
+ hookMetadata *HookMetaData
+ }
+
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ want *Event
+ }{
+ {
+ name: "Build HTTP Hook Event",
+ args: args{
+ hookMetadata: &HookMetaData{
+ PolicyID: 1,
+ EventType: "pushImage",
+ Target: &models.EventTarget{
+ Type: "http",
+ Address: "http://127.0.0.1",
+ },
+ Payload: nil,
+ },
+ },
+ want: &Event{
+ Topic: notifierModel.WebhookTopic,
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ event := &Event{}
+ err := event.Build(tt.args.hookMetadata)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ assert.Equal(t, tt.want.Topic, event.Topic)
+ })
+ }
+}
+
+func TestEvent_Publish(t *testing.T) {
+ type args struct {
+ event *Event
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "Publish Error 1",
+ args: args{
+ event: &Event{
+ Topic: notifierModel.WebhookTopic,
+ Data: nil,
+ },
+ },
+ wantErr: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := tt.args.event.Publish()
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ })
+ }
+}
diff --git a/src/core/notifier/handler/notification/http_handler.go b/src/core/notifier/handler/notification/http_handler.go
new file mode 100755
index 000000000..9795a7c2b
--- /dev/null
+++ b/src/core/notifier/handler/notification/http_handler.go
@@ -0,0 +1,59 @@
+package notification
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/job/models"
+ "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/notification"
+)
+
+// HTTPHandler preprocess http event data and start the hook processing
+type HTTPHandler struct {
+}
+
+// Handle handles http event
+func (h *HTTPHandler) Handle(value interface{}) error {
+ if value == nil {
+ return errors.New("HTTPHandler cannot handle nil value")
+ }
+
+ event, ok := value.(*model.HookEvent)
+ if !ok || event == nil {
+ return errors.New("invalid notification http event")
+ }
+
+ return h.process(event)
+}
+
+// IsStateful ...
+func (h *HTTPHandler) IsStateful() bool {
+ return false
+}
+
+func (h *HTTPHandler) process(event *model.HookEvent) error {
+ j := &models.JobData{
+ Metadata: &models.JobMetadata{
+ JobKind: job.KindGeneric,
+ },
+ }
+ j.Name = job.WebhookJob
+
+ payload, err := json.Marshal(event.Payload)
+ if err != nil {
+ return fmt.Errorf("marshal from payload %v failed: %v", event.Payload, err)
+ }
+
+ j.Parameters = map[string]interface{}{
+ "payload": string(payload),
+ "address": event.Target.Address,
+ // Users can define a auth header in http statement in notification(webhook) policy.
+ // So it will be sent in header in http request.
+ "auth_header": event.Target.AuthHeader,
+ "skip_cert_verify": event.Target.SkipCertVerify,
+ }
+ return notification.HookManager.StartHook(event, j)
+}
diff --git a/src/core/notifier/handler/notification/http_handler_test.go b/src/core/notifier/handler/notification/http_handler_test.go
new file mode 100644
index 000000000..c7d5ef3ae
--- /dev/null
+++ b/src/core/notifier/handler/notification/http_handler_test.go
@@ -0,0 +1,97 @@
+package notification
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/goharbor/harbor/src/common/job/models"
+ cModels "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/core/notifier/event"
+ "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/goharbor/harbor/src/pkg/notification"
+ "github.com/stretchr/testify/require"
+)
+
+type fakedHookManager struct {
+}
+
+func (f *fakedHookManager) StartHook(event *model.HookEvent, job *models.JobData) error {
+ return nil
+}
+
+func TestHTTPHandler_Handle(t *testing.T) {
+ hookMgr := notification.HookManager
+ defer func() {
+ notification.HookManager = hookMgr
+ }()
+ notification.HookManager = &fakedHookManager{}
+
+ handler := &HTTPHandler{}
+
+ type args struct {
+ event *event.Event
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "HTTPHandler_Handle Want Error 1",
+ args: args{
+ event: &event.Event{
+ Topic: "http",
+ Data: nil,
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "HTTPHandler_Handle Want Error 2",
+ args: args{
+ event: &event.Event{
+ Topic: "http",
+ Data: &model.ImageEvent{},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "HTTPHandler_Handle 1",
+ args: args{
+ event: &event.Event{
+ Topic: "http",
+ Data: &model.HookEvent{
+ PolicyID: 1,
+ EventType: "pushImage",
+ Target: &cModels.EventTarget{
+ Type: "http",
+ Address: "http://127.0.0.1:8080",
+ },
+ Payload: &model.Payload{
+ OccurAt: time.Now().Unix(),
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := handler.Handle(tt.args.event.Data)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ })
+ }
+}
+
+func TestHTTPHandler_IsStateful(t *testing.T) {
+ handler := &HTTPHandler{}
+ assert.False(t, handler.IsStateful())
+}
diff --git a/src/core/notifier/handler/notification/image_handler.go b/src/core/notifier/handler/notification/image_handler.go
new file mode 100644
index 000000000..f9dc12468
--- /dev/null
+++ b/src/core/notifier/handler/notification/image_handler.go
@@ -0,0 +1,18 @@
+package notification
+
+// ImagePreprocessHandler preprocess image event data
+type ImagePreprocessHandler struct {
+}
+
+// Handle preprocess image event data and then publish hook event
+func (h *ImagePreprocessHandler) Handle(value interface{}) error {
+ if err := preprocessAndSendImageHook(value); err != nil {
+ return err
+ }
+ return nil
+}
+
+// IsStateful ...
+func (h *ImagePreprocessHandler) IsStateful() bool {
+ return false
+}
diff --git a/src/core/notifier/handler/notification/image_handler_test.go b/src/core/notifier/handler/notification/image_handler_test.go
new file mode 100644
index 000000000..ae1696b44
--- /dev/null
+++ b/src/core/notifier/handler/notification/image_handler_test.go
@@ -0,0 +1,193 @@
+package notification
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/goharbor/harbor/src/pkg/notification"
+ notificationModel "github.com/goharbor/harbor/src/pkg/notification/model"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+type fakedNotificationPlyMgr struct {
+}
+
+func (f *fakedNotificationPlyMgr) Create(*models.NotificationPolicy) (int64, error) {
+ return 0, nil
+}
+
+func (f *fakedNotificationPlyMgr) List(id int64) ([]*models.NotificationPolicy, error) {
+ return nil, nil
+}
+
+func (f *fakedNotificationPlyMgr) Get(id int64) (*models.NotificationPolicy, error) {
+ return nil, nil
+}
+
+func (f *fakedNotificationPlyMgr) GetByNameAndProjectID(string, int64) (*models.NotificationPolicy, error) {
+ return nil, nil
+}
+
+func (f *fakedNotificationPlyMgr) Update(*models.NotificationPolicy) error {
+ return nil
+}
+
+func (f *fakedNotificationPlyMgr) Delete(int64) error {
+ return nil
+}
+
+func (f *fakedNotificationPlyMgr) Test(*models.NotificationPolicy) error {
+ return nil
+}
+
+func (f *fakedNotificationPlyMgr) GetRelatedPolices(id int64, eventType string) ([]*models.NotificationPolicy, error) {
+ if id == 1 {
+ return []*models.NotificationPolicy{
+ {
+ ID: 1,
+ EventTypes: []string{
+ notificationModel.EventTypePullImage,
+ notificationModel.EventTypePushImage,
+ },
+ Targets: []models.EventTarget{
+ {
+ Type: "http",
+ Address: "http://127.0.0.1:8080",
+ },
+ },
+ },
+ }, nil
+ }
+ if id == 2 {
+ return nil, nil
+ }
+ return nil, errors.New("")
+}
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+ os.Exit(m.Run())
+}
+
+func TestImagePreprocessHandler_Handle(t *testing.T) {
+ PolicyMgr := notification.PolicyMgr
+ defer func() {
+ notification.PolicyMgr = PolicyMgr
+ }()
+ notification.PolicyMgr = &fakedNotificationPlyMgr{}
+
+ handler := &ImagePreprocessHandler{}
+ config.Init()
+
+ type args struct {
+ data interface{}
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "ImagePreprocessHandler Want Error 1",
+ args: args{
+ data: nil,
+ },
+ wantErr: true,
+ },
+ {
+ name: "ImagePreprocessHandler Want Error 2",
+ args: args{
+ data: &model.ImageEvent{},
+ },
+ wantErr: true,
+ },
+ {
+ name: "ImagePreprocessHandler Want Error 3",
+ args: args{
+ data: &model.ImageEvent{
+ Resource: []*model.ImgResource{
+ {
+ Tag: "v1.0",
+ },
+ },
+ Project: &models.Project{
+ ProjectID: 3,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "ImagePreprocessHandler Want Error 4",
+ args: args{
+ data: &model.ImageEvent{
+ Resource: []*model.ImgResource{
+ {
+ Tag: "v1.0",
+ },
+ },
+ Project: &models.Project{
+ ProjectID: 1,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ // No handlers registered for handling topic http
+ {
+ name: "ImagePreprocessHandler Want Error 5",
+ args: args{
+ data: &model.ImageEvent{
+ RepoName: "test/alpine",
+ Resource: []*model.ImgResource{
+ {
+ Tag: "v1.0",
+ },
+ },
+ Project: &models.Project{
+ ProjectID: 1,
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "ImagePreprocessHandler 2",
+ args: args{
+ data: &model.ImageEvent{
+ Resource: []*model.ImgResource{
+ {
+ Tag: "v1.0",
+ },
+ },
+ Project: &models.Project{
+ ProjectID: 2,
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ err := handler.Handle(tt.args.data)
+ if tt.wantErr {
+ require.NotNil(t, err, "Error: %s", err)
+ return
+ }
+ assert.Nil(t, err)
+ })
+ }
+}
+
+func TestImagePreprocessHandler_IsStateful(t *testing.T) {
+ handler := &ImagePreprocessHandler{}
+ assert.False(t, handler.IsStateful())
+}
diff --git a/src/core/notifier/handler/notification/processor.go b/src/core/notifier/handler/notification/processor.go
new file mode 100644
index 000000000..513640fd2
--- /dev/null
+++ b/src/core/notifier/handler/notification/processor.go
@@ -0,0 +1,174 @@
+package notification
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/notifier/event"
+ notifyModel "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/goharbor/harbor/src/pkg/notification"
+)
+
+// getNameFromImgRepoFullName gets image name from repo full name with format `repoName/imageName`
+func getNameFromImgRepoFullName(repo string) string {
+ idx := strings.Index(repo, "/")
+ return repo[idx+1:]
+}
+
+func buildImageResourceURL(extURL, repoName, tag string) (string, error) {
+ resURL := fmt.Sprintf("%s/%s:%s", extURL, repoName, tag)
+ return resURL, nil
+}
+
+func constructImagePayload(event *notifyModel.ImageEvent) (*notifyModel.Payload, error) {
+ repoName := event.RepoName
+ if repoName == "" {
+ return nil, fmt.Errorf("invalid %s event with empty repo name", event.EventType)
+ }
+
+ repoType := models.ProjectPrivate
+ if event.Project.IsPublic() {
+ repoType = models.ProjectPublic
+ }
+
+ imageName := getNameFromImgRepoFullName(repoName)
+
+ payload := ¬ifyModel.Payload{
+ Type: event.EventType,
+ OccurAt: event.OccurAt.Unix(),
+ EventData: ¬ifyModel.EventData{
+ Repository: ¬ifyModel.Repository{
+ Name: imageName,
+ Namespace: event.Project.Name,
+ RepoFullName: repoName,
+ RepoType: repoType,
+ },
+ },
+ Operator: event.Operator,
+ }
+
+ repoRecord, err := dao.GetRepositoryByName(repoName)
+ if err != nil {
+ log.Errorf("failed to get repository with name %s: %v", repoName, err)
+ return nil, err
+ }
+ // once repo has been delete, cannot ensure to get repo record
+ if repoRecord == nil {
+ log.Debugf("cannot find repository info with repo %s", repoName)
+ } else {
+ payload.EventData.Repository.DateCreated = repoRecord.CreationTime.Unix()
+ }
+
+ extURL, err := config.ExtURL()
+ if err != nil {
+ return nil, fmt.Errorf("get external endpoint failed: %v", err)
+ }
+
+ for _, res := range event.Resource {
+ tag := res.Tag
+ digest := res.Digest
+
+ if tag == "" {
+ log.Errorf("invalid notification event with empty tag: %v", event)
+ continue
+ }
+
+ resURL, err := buildImageResourceURL(extURL, event.RepoName, tag)
+ if err != nil {
+ log.Errorf("get resource URL failed: %v", err)
+ continue
+ }
+
+ resource := ¬ifyModel.Resource{
+ Tag: tag,
+ Digest: digest,
+ ResourceURL: resURL,
+ }
+ payload.EventData.Resources = append(payload.EventData.Resources, resource)
+ }
+
+ return payload, nil
+}
+
+// send hook by publishing topic of specified target type(notify type)
+func sendHookWithPolicies(policies []*models.NotificationPolicy, payload *notifyModel.Payload, eventType string) error {
+ for _, ply := range policies {
+ targets := ply.Targets
+ for _, target := range targets {
+ evt := &event.Event{}
+ hookMetadata := &event.HookMetaData{
+ EventType: eventType,
+ PolicyID: ply.ID,
+ Payload: payload,
+ Target: &target,
+ }
+ if err := evt.Build(hookMetadata); err != nil {
+ log.Errorf("failed to build hook notify event metadata: %v", err)
+ return err
+ }
+ if err := evt.Publish(); err != nil {
+ log.Errorf("failed to publish hook notify event: %v", err)
+ return err
+ }
+
+ log.Debugf("published image event %s by topic %s", payload.Type, target.Type)
+ }
+ }
+ return nil
+}
+
+func resolveImageEventData(value interface{}) (*notifyModel.ImageEvent, error) {
+ imgEvent, ok := value.(*notifyModel.ImageEvent)
+ if !ok || imgEvent == nil {
+ return nil, errors.New("invalid image event")
+ }
+
+ if len(imgEvent.Resource) == 0 {
+ return nil, fmt.Errorf("empty event resouece data in image event: %v", imgEvent)
+ }
+
+ return imgEvent, nil
+}
+
+// preprocessAndSendImageHook preprocess image event data and send hook by notification policy target
+func preprocessAndSendImageHook(value interface{}) error {
+ // if global notification configured disabled, return directly
+ if !config.NotificationEnable() {
+ log.Debug("notification feature is not enabled")
+ return nil
+ }
+
+ imgEvent, err := resolveImageEventData(value)
+ if err != nil {
+ return err
+ }
+
+ policies, err := notification.PolicyMgr.GetRelatedPolices(imgEvent.Project.ProjectID, imgEvent.EventType)
+ if err != nil {
+ log.Errorf("failed to find policy for %s event: %v", imgEvent.EventType, err)
+ return err
+ }
+ // if cannot find policy including event type in project, return directly
+ if len(policies) == 0 {
+ log.Debugf("cannot find policy for %s event: %v", imgEvent.EventType, imgEvent)
+ return nil
+ }
+
+ payload, err := constructImagePayload(imgEvent)
+ if err != nil {
+ return err
+ }
+
+ err = sendHookWithPolicies(policies, payload, imgEvent.EventType)
+ if err != nil {
+ return err
+ }
+
+ return nil
+
+}
diff --git a/src/core/notifier/model/event.go b/src/core/notifier/model/event.go
new file mode 100755
index 000000000..67889e751
--- /dev/null
+++ b/src/core/notifier/model/event.go
@@ -0,0 +1,61 @@
+package model
+
+import (
+ "time"
+
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// ImageEvent is image related event data to publish
+type ImageEvent struct {
+ EventType string
+ Project *models.Project
+ Resource []*ImgResource
+ OccurAt time.Time
+ Operator string
+ RepoName string
+}
+
+// ImgResource include image digest and tag
+type ImgResource struct {
+ Digest string
+ Tag string
+}
+
+// HookEvent is hook related event data to publish
+type HookEvent struct {
+ PolicyID int64
+ EventType string
+ Target *models.EventTarget
+ Payload *Payload
+}
+
+// Payload of notification event
+type Payload struct {
+ Type string `json:"type"`
+ OccurAt int64 `json:"occur_at"`
+ EventData *EventData `json:"event_data,omitempty"`
+ Operator string `json:"operator"`
+}
+
+// EventData of notification event payload
+type EventData struct {
+ Resources []*Resource `json:"resources"`
+ Repository *Repository `json:"repository"`
+}
+
+// Resource describe infos of resource triggered notification
+type Resource struct {
+ Digest string `json:"digest,omitempty"`
+ Tag string `json:"tag"`
+ ResourceURL string `json:"resource_url,omitempty"`
+}
+
+// Repository info of notification event
+type Repository struct {
+ DateCreated int64 `json:"date_created,omitempty"`
+ Name string `json:"name"`
+ Namespace string `json:"namespace"`
+ RepoFullName string `json:"repo_full_name"`
+ RepoType string `json:"repo_type"`
+}
diff --git a/src/core/notifier/model/topic.go b/src/core/notifier/model/topic.go
new file mode 100644
index 000000000..7278858b8
--- /dev/null
+++ b/src/core/notifier/model/topic.go
@@ -0,0 +1,26 @@
+package model
+
+// Define global topic names
+const (
+ // PushImageTopic is topic for push image event
+ PushImageTopic = "OnPushImage"
+ // PullImageTopic is topic for pull image event
+ PullImageTopic = "OnPullImage"
+ // DeleteImageTopic is topic for delete image event
+ DeleteImageTopic = "OnDeleteImage"
+ // UploadChartTopic is topic for upload chart event
+ UploadChartTopic = "OnUploadChart"
+ // DownloadChartTopic is topic for download chart event
+ DownloadChartTopic = "OnDownloadChart"
+ // DeleteChartTopic is topic for delete chart event
+ DeleteChartTopic = "OnDeleteChart"
+ // ScanningFailedTopic is topic for scanning failed event
+ ScanningFailedTopic = "OnScanningFailed"
+ // ScanningCompletedTopic is topic for scanning completed event
+ ScanningCompletedTopic = "OnScanningCompleted"
+
+ // WebhookTopic is topic for sending webhook payload
+ WebhookTopic = "http"
+ // EmailTopic is topic for sending email payload
+ EmailTopic = "email"
+)
diff --git a/src/core/notifier/topic/topics.go b/src/core/notifier/topic/topics.go
new file mode 100644
index 000000000..2762da259
--- /dev/null
+++ b/src/core/notifier/topic/topics.go
@@ -0,0 +1,28 @@
+package topic
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/notifier"
+ "github.com/goharbor/harbor/src/core/notifier/handler/notification"
+ "github.com/goharbor/harbor/src/core/notifier/model"
+)
+
+// Subscribe topics
+func init() {
+ handlersMap := map[string][]notifier.NotificationHandler{
+ model.PushImageTopic: {¬ification.ImagePreprocessHandler{}},
+ model.PullImageTopic: {¬ification.ImagePreprocessHandler{}},
+ model.DeleteImageTopic: {¬ification.ImagePreprocessHandler{}},
+ model.WebhookTopic: {¬ification.HTTPHandler{}},
+ }
+
+ for t, handlers := range handlersMap {
+ for _, handler := range handlers {
+ if err := notifier.Subscribe(t, handler); err != nil {
+ log.Errorf("failed to subscribe topic %s: %v", t, err)
+ continue
+ }
+ log.Debugf("topic %s is subscribed", t)
+ }
+ }
+}
diff --git a/src/core/notifier/topics.go b/src/core/notifier/topics.go
deleted file mode 100644
index 23aca94cf..000000000
--- a/src/core/notifier/topics.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package notifier
-
-import (
- "github.com/goharbor/harbor/src/common"
-)
-
-// Define global topic names
-const (
- // ScanAllPolicyTopic is for notifying the change of scanning all policy.
- ScanAllPolicyTopic = common.ScanAllPolicy
-)
diff --git a/src/core/promgr/pmsdriver/local/local.go b/src/core/promgr/pmsdriver/local/local.go
index b02b19cbd..4706f3f43 100644
--- a/src/core/promgr/pmsdriver/local/local.go
+++ b/src/core/promgr/pmsdriver/local/local.go
@@ -20,7 +20,6 @@ import (
"time"
"github.com/goharbor/harbor/src/common/dao"
- "github.com/goharbor/harbor/src/common/dao/group"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils"
errutil "github.com/goharbor/harbor/src/common/utils/error"
@@ -132,19 +131,16 @@ func (d *driver) Update(projectIDOrName interface{},
func (d *driver) List(query *models.ProjectQueryParam) (*models.ProjectQueryResult, error) {
var total int64
var projects []*models.Project
- var groupDNCondition string
-
- // List with LDAP group projects
+ var groupIDs []int
if query != nil && query.Member != nil {
- groupDNCondition = group.GetGroupDNQueryCondition(query.Member.GroupList)
+ groupIDs = query.Member.GroupIDs
}
-
- count, err := dao.GetTotalGroupProjects(groupDNCondition, query)
+ count, err := dao.GetTotalGroupProjects(groupIDs, query)
if err != nil {
return nil, err
}
total = int64(count)
- projects, err = dao.GetGroupProjects(groupDNCondition, query)
+ projects, err = dao.GetGroupProjects(groupIDs, query)
if err != nil {
return nil, err
}
diff --git a/src/core/promgr/promgr.go b/src/core/promgr/promgr.go
index b2de26ff3..3ac8f6ca8 100644
--- a/src/core/promgr/promgr.go
+++ b/src/core/promgr/promgr.go
@@ -16,6 +16,7 @@ package promgr
import (
"fmt"
+ "github.com/goharbor/harbor/src/pkg/scan/whitelist"
"strconv"
"github.com/goharbor/harbor/src/common/models"
@@ -44,6 +45,7 @@ type defaultProjectManager struct {
pmsDriver pmsdriver.PMSDriver
metaMgrEnabled bool // if metaMgrEnabled is enabled, metaMgr will be used to CURD metadata
metaMgr metamgr.ProjectMetadataManager
+ whitelistMgr whitelist.Manager
}
// NewDefaultProjectManager returns an instance of defaultProjectManager,
@@ -56,6 +58,7 @@ func NewDefaultProjectManager(driver pmsdriver.PMSDriver, metaMgrEnabled bool) P
}
if metaMgrEnabled {
mgr.metaMgr = metamgr.NewDefaultProjectMetadataManager()
+ mgr.whitelistMgr = whitelist.NewDefaultManager()
}
return mgr
}
@@ -77,6 +80,11 @@ func (d *defaultProjectManager) Get(projectIDOrName interface{}) (*models.Projec
for k, v := range meta {
project.Metadata[k] = v
}
+ wl, err := d.whitelistMgr.Get(project.ProjectID)
+ if err != nil {
+ return nil, err
+ }
+ project.CVEWhitelist = *wl
}
return project, nil
}
@@ -85,9 +93,12 @@ func (d *defaultProjectManager) Create(project *models.Project) (int64, error) {
if err != nil {
return 0, err
}
- if len(project.Metadata) > 0 && d.metaMgrEnabled {
- if err = d.metaMgr.Add(id, project.Metadata); err != nil {
- log.Errorf("failed to add metadata for project %s: %v", project.Name, err)
+ if d.metaMgrEnabled {
+ d.whitelistMgr.CreateEmpty(id)
+ if len(project.Metadata) > 0 {
+ if err = d.metaMgr.Add(id, project.Metadata); err != nil {
+ log.Errorf("failed to add metadata for project %s: %v", project.Name, err)
+ }
}
}
return id, nil
@@ -110,37 +121,40 @@ func (d *defaultProjectManager) Delete(projectIDOrName interface{}) error {
}
func (d *defaultProjectManager) Update(projectIDOrName interface{}, project *models.Project) error {
- if len(project.Metadata) > 0 && d.metaMgrEnabled {
- pro, err := d.Get(projectIDOrName)
- if err != nil {
+ pro, err := d.Get(projectIDOrName)
+ if err != nil {
+ return err
+ }
+ if pro == nil {
+ return fmt.Errorf("project %v not found", projectIDOrName)
+ }
+ // TODO transaction?
+ if d.metaMgrEnabled {
+ if err := d.whitelistMgr.Set(pro.ProjectID, project.CVEWhitelist); err != nil {
return err
}
- if pro == nil {
- return fmt.Errorf("project %v not found", projectIDOrName)
- }
-
- // TODO transaction?
- metaNeedUpdated := map[string]string{}
- metaNeedCreated := map[string]string{}
- if pro.Metadata == nil {
- pro.Metadata = map[string]string{}
- }
- for key, value := range project.Metadata {
- _, exist := pro.Metadata[key]
- if exist {
- metaNeedUpdated[key] = value
- } else {
- metaNeedCreated[key] = value
+ if len(project.Metadata) > 0 {
+ metaNeedUpdated := map[string]string{}
+ metaNeedCreated := map[string]string{}
+ if pro.Metadata == nil {
+ pro.Metadata = map[string]string{}
+ }
+ for key, value := range project.Metadata {
+ _, exist := pro.Metadata[key]
+ if exist {
+ metaNeedUpdated[key] = value
+ } else {
+ metaNeedCreated[key] = value
+ }
+ }
+ if err = d.metaMgr.Add(pro.ProjectID, metaNeedCreated); err != nil {
+ return err
+ }
+ if err = d.metaMgr.Update(pro.ProjectID, metaNeedUpdated); err != nil {
+ return err
}
}
- if err = d.metaMgr.Add(pro.ProjectID, metaNeedCreated); err != nil {
- return err
- }
- if err = d.metaMgr.Update(pro.ProjectID, metaNeedUpdated); err != nil {
- return err
- }
}
-
return d.pmsDriver.Update(projectIDOrName, project)
}
@@ -179,6 +193,7 @@ func (d *defaultProjectManager) List(query *models.ProjectQueryParam) (*models.P
project.Metadata = meta
}
}
+ // the whitelist is not populated deliberately
return result, nil
}
diff --git a/src/core/proxy/interceptors.go b/src/core/proxy/interceptors.go
deleted file mode 100644
index b8a3fe3b8..000000000
--- a/src/core/proxy/interceptors.go
+++ /dev/null
@@ -1,397 +0,0 @@
-package proxy
-
-import (
- "encoding/json"
-
- "github.com/goharbor/harbor/src/common/dao"
- "github.com/goharbor/harbor/src/common/models"
- "github.com/goharbor/harbor/src/common/utils/clair"
- "github.com/goharbor/harbor/src/common/utils/log"
- "github.com/goharbor/harbor/src/common/utils/notary"
- "github.com/goharbor/harbor/src/core/config"
- "github.com/goharbor/harbor/src/core/promgr"
- coreutils "github.com/goharbor/harbor/src/core/utils"
-
- "context"
- "fmt"
- "net/http"
- "net/http/httptest"
- "regexp"
- "strconv"
- "strings"
-)
-
-type contextKey string
-
-const (
- manifestURLPattern = `^/v2/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)+)manifests/([\w][\w.:-]{0,127})`
- catalogURLPattern = `/v2/_catalog`
- imageInfoCtxKey = contextKey("ImageInfo")
- // TODO: temp solution, remove after vmware/harbor#2242 is resolved.
- tokenUsername = "harbor-core"
-)
-
-// Record the docker deamon raw response.
-var rec *httptest.ResponseRecorder
-
-// NotaryEndpoint , exported for testing.
-var NotaryEndpoint = ""
-
-// MatchPullManifest checks if the request looks like a request to pull manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
-func MatchPullManifest(req *http.Request) (bool, string, string) {
- // TODO: add user agent check.
- if req.Method != http.MethodGet {
- return false, "", ""
- }
- return matchManifestURL(req)
-}
-
-// MatchPushManifest checks if the request looks like a request to push manifest. If it is returns the image and tag/sha256 digest as 2nd and 3rd return values
-func MatchPushManifest(req *http.Request) (bool, string, string) {
- if req.Method != http.MethodPut {
- return false, "", ""
- }
- return matchManifestURL(req)
-}
-
-func matchManifestURL(req *http.Request) (bool, string, string) {
- re := regexp.MustCompile(manifestURLPattern)
- s := re.FindStringSubmatch(req.URL.Path)
- if len(s) == 3 {
- s[1] = strings.TrimSuffix(s[1], "/")
- return true, s[1], s[2]
- }
- return false, "", ""
-}
-
-// MatchListRepos checks if the request looks like a request to list repositories.
-func MatchListRepos(req *http.Request) bool {
- if req.Method != http.MethodGet {
- return false
- }
- re := regexp.MustCompile(catalogURLPattern)
- s := re.FindStringSubmatch(req.URL.Path)
- if len(s) == 1 {
- return true
- }
- return false
-}
-
-// policyChecker checks the policy of a project by project name, to determine if it's needed to check the image's status under this project.
-type policyChecker interface {
- // contentTrustEnabled returns whether a project has enabled content trust.
- contentTrustEnabled(name string) bool
- // vulnerablePolicy returns whether a project has enabled vulnerable, and the project's severity.
- vulnerablePolicy(name string) (bool, models.Severity)
-}
-
-type pmsPolicyChecker struct {
- pm promgr.ProjectManager
-}
-
-func (pc pmsPolicyChecker) contentTrustEnabled(name string) bool {
- project, err := pc.pm.Get(name)
- if err != nil {
- log.Errorf("Unexpected error when getting the project, error: %v", err)
- return true
- }
- return project.ContentTrustEnabled()
-}
-func (pc pmsPolicyChecker) vulnerablePolicy(name string) (bool, models.Severity) {
- project, err := pc.pm.Get(name)
- if err != nil {
- log.Errorf("Unexpected error when getting the project, error: %v", err)
- return true, models.SevUnknown
- }
- return project.VulPrevented(), clair.ParseClairSev(project.Severity())
-}
-
-// newPMSPolicyChecker returns an instance of an pmsPolicyChecker
-func newPMSPolicyChecker(pm promgr.ProjectManager) policyChecker {
- return &pmsPolicyChecker{
- pm: pm,
- }
-}
-
-func getPolicyChecker() policyChecker {
- return newPMSPolicyChecker(config.GlobalProjectMgr)
-}
-
-type imageInfo struct {
- repository string
- reference string
- projectName string
- digest string
-}
-
-type urlHandler struct {
- next http.Handler
-}
-
-func (uh urlHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- log.Debugf("in url handler, path: %s", req.URL.Path)
- flag, repository, reference := MatchPullManifest(req)
- if flag {
- components := strings.SplitN(repository, "/", 2)
- if len(components) < 2 {
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Bad repository name: %s", repository)), http.StatusBadRequest)
- return
- }
-
- client, err := coreutils.NewRepositoryClientForUI(tokenUsername, repository)
- if err != nil {
- log.Errorf("Error creating repository Client: %v", err)
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError)
- return
- }
- digest, _, err := client.ManifestExist(reference)
- if err != nil {
- log.Errorf("Failed to get digest for reference: %s, error: %v", reference, err)
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("Failed due to internal Error: %v", err)), http.StatusInternalServerError)
- return
- }
-
- img := imageInfo{
- repository: repository,
- reference: reference,
- projectName: components[0],
- digest: digest,
- }
-
- log.Debugf("image info of the request: %#v", img)
- ctx := context.WithValue(req.Context(), imageInfoCtxKey, img)
- req = req.WithContext(ctx)
- }
- uh.next.ServeHTTP(rw, req)
-}
-
-type readonlyHandler struct {
- next http.Handler
-}
-
-func (rh readonlyHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- if config.ReadOnly() {
- if req.Method == http.MethodDelete || req.Method == http.MethodPost || req.Method == http.MethodPatch || req.Method == http.MethodPut {
- log.Warningf("The request is prohibited in readonly mode, url is: %s", req.URL.Path)
- http.Error(rw, marshalError("DENIED", "The system is in read only mode. Any modification is prohibited."), http.StatusForbidden)
- return
- }
- }
- rh.next.ServeHTTP(rw, req)
-}
-
-type multipleManifestHandler struct {
- next http.Handler
-}
-
-// The handler is responsible for blocking request to upload manifest list by docker client, which is not supported so far by Harbor.
-func (mh multipleManifestHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- match, _, _ := MatchPushManifest(req)
- if match {
- contentType := req.Header.Get("Content-type")
- // application/vnd.docker.distribution.manifest.list.v2+json
- if strings.Contains(contentType, "manifest.list.v2") {
- log.Debugf("Content-type: %s is not supported, failing the response.", contentType)
- http.Error(rw, marshalError("UNSUPPORTED_MEDIA_TYPE", "Manifest.list is not supported."), http.StatusUnsupportedMediaType)
- return
- }
- }
- mh.next.ServeHTTP(rw, req)
-}
-
-type listReposHandler struct {
- next http.Handler
-}
-
-func (lrh listReposHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- listReposFlag := MatchListRepos(req)
- if listReposFlag {
- rec = httptest.NewRecorder()
- lrh.next.ServeHTTP(rec, req)
- if rec.Result().StatusCode != http.StatusOK {
- copyResp(rec, rw)
- return
- }
- var ctlg struct {
- Repositories []string `json:"repositories"`
- }
- decoder := json.NewDecoder(rec.Body)
- if err := decoder.Decode(&ctlg); err != nil {
- log.Errorf("Decode repositories error: %v", err)
- copyResp(rec, rw)
- return
- }
- var entries []string
- for repo := range ctlg.Repositories {
- log.Debugf("the repo in the response %s", ctlg.Repositories[repo])
- exist := dao.RepositoryExists(ctlg.Repositories[repo])
- if exist {
- entries = append(entries, ctlg.Repositories[repo])
- }
- }
- type Repos struct {
- Repositories []string `json:"repositories"`
- }
- resp := &Repos{Repositories: entries}
- respJSON, err := json.Marshal(resp)
- if err != nil {
- log.Errorf("Encode repositories error: %v", err)
- copyResp(rec, rw)
- return
- }
-
- for k, v := range rec.Header() {
- rw.Header()[k] = v
- }
- clen := len(respJSON)
- rw.Header().Set(http.CanonicalHeaderKey("Content-Length"), strconv.Itoa(clen))
- rw.Write(respJSON)
- return
- }
- lrh.next.ServeHTTP(rw, req)
-}
-
-type contentTrustHandler struct {
- next http.Handler
-}
-
-func (cth contentTrustHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- imgRaw := req.Context().Value(imageInfoCtxKey)
- if imgRaw == nil || !config.WithNotary() {
- cth.next.ServeHTTP(rw, req)
- return
- }
- img, _ := req.Context().Value(imageInfoCtxKey).(imageInfo)
- if img.digest == "" {
- cth.next.ServeHTTP(rw, req)
- return
- }
- if !getPolicyChecker().contentTrustEnabled(img.projectName) {
- cth.next.ServeHTTP(rw, req)
- return
- }
- match, err := matchNotaryDigest(img)
- if err != nil {
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "Failed in communication with Notary please check the log"), http.StatusInternalServerError)
- return
- }
- if !match {
- log.Debugf("digest mismatch, failing the response.")
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "The image is not signed in Notary."), http.StatusPreconditionFailed)
- return
- }
- cth.next.ServeHTTP(rw, req)
-}
-
-type vulnerableHandler struct {
- next http.Handler
-}
-
-func (vh vulnerableHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- imgRaw := req.Context().Value(imageInfoCtxKey)
- if imgRaw == nil || !config.WithClair() {
- vh.next.ServeHTTP(rw, req)
- return
- }
- img, _ := req.Context().Value(imageInfoCtxKey).(imageInfo)
- if img.digest == "" {
- vh.next.ServeHTTP(rw, req)
- return
- }
- projectVulnerableEnabled, projectVulnerableSeverity := getPolicyChecker().vulnerablePolicy(img.projectName)
- if !projectVulnerableEnabled {
- vh.next.ServeHTTP(rw, req)
- return
- }
- overview, err := dao.GetImgScanOverview(img.digest)
- if err != nil {
- log.Errorf("failed to get ImgScanOverview with repo: %s, reference: %s, digest: %s. Error: %v", img.repository, img.reference, img.digest, err)
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "Failed to get ImgScanOverview."), http.StatusPreconditionFailed)
- return
- }
- // severity is 0 means that the image fails to scan or not scanned successfully.
- if overview == nil || overview.Sev == 0 {
- log.Debugf("cannot get the image scan overview info, failing the response.")
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", "Cannot get the image severity."), http.StatusPreconditionFailed)
- return
- }
- imageSev := overview.Sev
- if imageSev >= int(projectVulnerableSeverity) {
- log.Debugf("the image severity: %q is higher then project setting: %q, failing the response.", models.Severity(imageSev), projectVulnerableSeverity)
- http.Error(rw, marshalError("PROJECT_POLICY_VIOLATION", fmt.Sprintf("The severity of vulnerability of the image: %q is equal or higher than the threshold in project setting: %q.", models.Severity(imageSev), projectVulnerableSeverity)), http.StatusPreconditionFailed)
- return
- }
- vh.next.ServeHTTP(rw, req)
-}
-
-func matchNotaryDigest(img imageInfo) (bool, error) {
- if NotaryEndpoint == "" {
- NotaryEndpoint = config.InternalNotaryEndpoint()
- }
- targets, err := notary.GetInternalTargets(NotaryEndpoint, tokenUsername, img.repository)
- if err != nil {
- return false, err
- }
- for _, t := range targets {
- if isDigest(img.reference) {
- d, err := notary.DigestFromTarget(t)
- if err != nil {
- return false, err
- }
- if img.digest == d {
- return true, nil
- }
- } else {
- if t.Tag == img.reference {
- log.Debugf("found reference: %s in notary, try to match digest.", img.reference)
- d, err := notary.DigestFromTarget(t)
- if err != nil {
- return false, err
- }
- if img.digest == d {
- return true, nil
- }
- }
- }
- }
- log.Debugf("image: %#v, not found in notary", img)
- return false, nil
-}
-
-// A sha256 is a string with 64 characters.
-func isDigest(ref string) bool {
- return strings.HasPrefix(ref, "sha256:") && len(ref) == 71
-}
-
-func copyResp(rec *httptest.ResponseRecorder, rw http.ResponseWriter) {
- for k, v := range rec.Header() {
- rw.Header()[k] = v
- }
- rw.WriteHeader(rec.Result().StatusCode)
- rw.Write(rec.Body.Bytes())
-}
-
-func marshalError(code, msg string) string {
- var tmpErrs struct {
- Errors []JSONError `json:"errors,omitempty"`
- }
- tmpErrs.Errors = append(tmpErrs.Errors, JSONError{
- Code: code,
- Message: msg,
- Detail: msg,
- })
-
- str, err := json.Marshal(tmpErrs)
- if err != nil {
- log.Debugf("failed to marshal json error, %v", err)
- return msg
- }
- return string(str)
-}
-
-// JSONError wraps a concrete Code and Message, it's readable for docker deamon.
-type JSONError struct {
- Code string `json:"code,omitempty"`
- Message string `json:"message,omitempty"`
- Detail string `json:"detail,omitempty"`
-}
diff --git a/src/core/proxy/proxy.go b/src/core/proxy/proxy.go
deleted file mode 100644
index eadbfed38..000000000
--- a/src/core/proxy/proxy.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package proxy
-
-import (
- "github.com/goharbor/harbor/src/core/config"
-
- "fmt"
- "net/http"
- "net/http/httputil"
- "net/url"
-)
-
-// Proxy is the instance of the reverse proxy in this package.
-var Proxy *httputil.ReverseProxy
-
-var handlers handlerChain
-
-type handlerChain struct {
- head http.Handler
-}
-
-// Init initialize the Proxy instance and handler chain.
-func Init(urls ...string) error {
- var err error
- var registryURL string
- if len(urls) > 1 {
- return fmt.Errorf("the parm, urls should have only 0 or 1 elements")
- }
- if len(urls) == 0 {
- registryURL, err = config.RegistryURL()
- if err != nil {
- return err
- }
- } else {
- registryURL = urls[0]
- }
- targetURL, err := url.Parse(registryURL)
- if err != nil {
- return err
- }
- Proxy = httputil.NewSingleHostReverseProxy(targetURL)
- handlers = handlerChain{head: readonlyHandler{next: urlHandler{next: multipleManifestHandler{next: listReposHandler{next: contentTrustHandler{next: vulnerableHandler{next: Proxy}}}}}}}
- return nil
-}
-
-// Handle handles the request.
-func Handle(rw http.ResponseWriter, req *http.Request) {
- handlers.head.ServeHTTP(rw, req)
-}
diff --git a/src/core/router.go b/src/core/router.go
old mode 100644
new mode 100755
index 1c4c31f3f..7e01b934e
--- a/src/core/router.go
+++ b/src/core/router.go
@@ -15,17 +15,16 @@
package main
import (
+ "github.com/astaxie/beego"
"github.com/goharbor/harbor/src/common"
"github.com/goharbor/harbor/src/core/api"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/controllers"
"github.com/goharbor/harbor/src/core/service/notifications/admin"
- "github.com/goharbor/harbor/src/core/service/notifications/clair"
"github.com/goharbor/harbor/src/core/service/notifications/jobs"
"github.com/goharbor/harbor/src/core/service/notifications/registry"
+ "github.com/goharbor/harbor/src/core/service/notifications/scheduler"
"github.com/goharbor/harbor/src/core/service/token"
-
- "github.com/astaxie/beego"
)
func initRouters() {
@@ -67,6 +66,7 @@ func initRouters() {
beego.Router("/api/ping", &api.SystemInfoAPI{}, "get:Ping")
beego.Router("/api/search", &api.SearchAPI{})
beego.Router("/api/projects/", &api.ProjectAPI{}, "get:List;post:Post")
+ beego.Router("/api/projects/:id([0-9]+)/summary", &api.ProjectAPI{}, "get:Summary")
beego.Router("/api/projects/:id([0-9]+)/logs", &api.ProjectAPI{}, "get:Logs")
beego.Router("/api/projects/:id([0-9]+)/_deletable", &api.ProjectAPI{}, "get:Deletable")
beego.Router("/api/projects/:id([0-9]+)/metadatas/?:name", &api.MetadataAPI{}, "get:Get")
@@ -76,6 +76,9 @@ func initRouters() {
beego.Router("/api/projects/:pid([0-9]+)/robots", &api.RobotAPI{}, "post:Post;get:List")
beego.Router("/api/projects/:pid([0-9]+)/robots/:id([0-9]+)", &api.RobotAPI{}, "get:Get;put:Put;delete:Delete")
+ beego.Router("/api/quotas", &api.QuotaAPI{}, "get:List")
+ beego.Router("/api/quotas/:id([0-9]+)", &api.QuotaAPI{}, "get:Get;put:Put")
+
beego.Router("/api/repositories", &api.RepositoryAPI{}, "get:Get")
beego.Router("/api/repositories/*", &api.RepositoryAPI{}, "delete:Delete;put:Put")
beego.Router("/api/repositories/*/labels", &api.RepositoryLabelAPI{}, "get:GetOfRepository;post:AddToRepository")
@@ -96,6 +99,8 @@ func initRouters() {
beego.Router("/api/system/gc/:id([0-9]+)/log", &api.GCAPI{}, "get:GetLog")
beego.Router("/api/system/gc/schedule", &api.GCAPI{}, "get:Get;put:Put;post:Post")
beego.Router("/api/system/scanAll/schedule", &api.ScanAllAPI{}, "get:Get;put:Put;post:Post")
+ beego.Router("/api/system/CVEWhitelist", &api.SysCVEWhitelistAPI{}, "get:Get;put:Put")
+ beego.Router("/api/system/oidc/ping", &api.OIDCAPI{}, "post:Ping")
beego.Router("/api/logs", &api.LogAPI{})
@@ -108,6 +113,14 @@ func initRouters() {
beego.Router("/api/replication/policies", &api.ReplicationPolicyAPI{}, "get:List;post:Create")
beego.Router("/api/replication/policies/:id([0-9]+)", &api.ReplicationPolicyAPI{}, "get:Get;put:Update;delete:Delete")
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/policies", &api.NotificationPolicyAPI{}, "get:List;post:Post")
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/:id([0-9]+)", &api.NotificationPolicyAPI{})
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/policies/test", &api.NotificationPolicyAPI{}, "post:Test")
+
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/lasttrigger", &api.NotificationPolicyAPI{}, "get:ListGroupByEventType")
+
+ beego.Router("/api/projects/:pid([0-9]+)/webhook/jobs/", &api.NotificationJobAPI{}, "get:List")
+
beego.Router("/api/internal/configurations", &api.ConfigAPI{}, "get:GetInternalConfig;put:Put")
beego.Router("/api/configurations", &api.ConfigAPI{}, "get:Get;put:Put")
beego.Router("/api/statistics", &api.StatisticAPI{})
@@ -121,14 +134,18 @@ func initRouters() {
beego.Router("/api/internal/syncregistry", &api.InternalAPI{}, "post:SyncRegistry")
beego.Router("/api/internal/renameadmin", &api.InternalAPI{}, "post:RenameAdmin")
+ beego.Router("/api/internal/switchquota", &api.InternalAPI{}, "put:SwitchQuota")
+ beego.Router("/api/internal/syncquota", &api.InternalAPI{}, "post:SyncQuota")
// external service that hosted on harbor process:
beego.Router("/service/notifications", ®istry.NotificationHandler{})
- beego.Router("/service/notifications/clair", &clair.Handler{}, "post:Handle")
beego.Router("/service/notifications/jobs/scan/:id([0-9]+)", &jobs.Handler{}, "post:HandleScan")
beego.Router("/service/notifications/jobs/adminjob/:id([0-9]+)", &admin.Handler{}, "post:HandleAdminJob")
beego.Router("/service/notifications/jobs/replication/:id([0-9]+)", &jobs.Handler{}, "post:HandleReplicationScheduleJob")
beego.Router("/service/notifications/jobs/replication/task/:id([0-9]+)", &jobs.Handler{}, "post:HandleReplicationTask")
+ beego.Router("/service/notifications/jobs/webhook/:id([0-9]+)", &jobs.Handler{}, "post:HandleNotificationJob")
+ beego.Router("/service/notifications/jobs/retention/task/:id([0-9]+)", &jobs.Handler{}, "post:HandleRetentionTask")
+ beego.Router("/service/notifications/schedules/:id([0-9]+)", &scheduler.Handler{}, "post:Handle")
beego.Router("/service/token", &token.Handler{})
beego.Router("/api/registries", &api.RegistryAPI{}, "get:List;post:Post")
@@ -138,6 +155,16 @@ func initRouters() {
beego.Router("/api/registries/:id/info", &api.RegistryAPI{}, "get:GetInfo")
beego.Router("/api/registries/:id/namespace", &api.RegistryAPI{}, "get:GetNamespace")
+ beego.Router("/api/retentions/metadatas", &api.RetentionAPI{}, "get:GetMetadatas")
+ beego.Router("/api/retentions/:id", &api.RetentionAPI{}, "get:GetRetention")
+ beego.Router("/api/retentions", &api.RetentionAPI{}, "post:CreateRetention")
+ beego.Router("/api/retentions/:id", &api.RetentionAPI{}, "put:UpdateRetention")
+ beego.Router("/api/retentions/:id/executions", &api.RetentionAPI{}, "post:TriggerRetentionExec")
+ beego.Router("/api/retentions/:id/executions/:eid", &api.RetentionAPI{}, "patch:OperateRetentionExec")
+ beego.Router("/api/retentions/:id/executions", &api.RetentionAPI{}, "get:ListRetentionExecs")
+ beego.Router("/api/retentions/:id/executions/:eid/tasks", &api.RetentionAPI{}, "get:ListRetentionExecTasks")
+ beego.Router("/api/retentions/:id/executions/:eid/tasks/:tid", &api.RetentionAPI{}, "get:GetRetentionExecTaskLog")
+
beego.Router("/v2/*", &controllers.RegistryProxy{}, "*:Handle")
// APIs for chart repository
diff --git a/src/core/service/notifications/clair/handler.go b/src/core/service/notifications/clair/handler.go
deleted file mode 100644
index 0c96e6768..000000000
--- a/src/core/service/notifications/clair/handler.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2018 Project Harbor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clair
-
-import (
- "encoding/json"
- "time"
-
- "github.com/goharbor/harbor/src/common/dao"
- "github.com/goharbor/harbor/src/common/models"
- "github.com/goharbor/harbor/src/common/utils"
- "github.com/goharbor/harbor/src/common/utils/clair"
- "github.com/goharbor/harbor/src/common/utils/log"
- "github.com/goharbor/harbor/src/core/api"
- "github.com/goharbor/harbor/src/core/config"
-)
-
-const (
- rescanInterval = 15 * time.Minute
-)
-
-var (
- clairClient *clair.Client
-)
-
-// Handler handles reqeust on /service/notifications/clair/, which listens to clair's notifications.
-// When there's unexpected error it will silently fail without removing the notification such that it will be triggered again.
-type Handler struct {
- api.BaseController
-}
-
-// Handle ...
-func (h *Handler) Handle() {
- if clairClient == nil {
- clairClient = clair.NewClient(config.ClairEndpoint(), nil)
- }
- var ne models.ClairNotificationEnvelope
- if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &ne); err != nil {
- log.Errorf("Failed to decode the request: %v", err)
- return
- }
- log.Debugf("Received notification from Clair, name: %s", ne.Notification.Name)
- notification, err := clairClient.GetNotification(ne.Notification.Name)
- if err != nil {
- log.Errorf("Failed to get notification details from Clair, name: %s, err: %v", ne.Notification.Name, err)
- return
- }
- ns := make(map[string]bool)
- if old := notification.Old; old != nil {
- if vuln := old.Vulnerability; vuln != nil {
- log.Debugf("old vulnerability namespace: %s", vuln.NamespaceName)
- ns[vuln.NamespaceName] = true
- }
- }
- if newNotification := notification.New; newNotification != nil {
- if vuln := newNotification.Vulnerability; vuln != nil {
- log.Debugf("new vulnerability namespace: %s", vuln.NamespaceName)
- ns[vuln.NamespaceName] = true
- }
- }
- for k, v := range ns {
- if v {
- if err := dao.SetClairVulnTimestamp(k, time.Now()); err == nil {
- log.Debugf("Updated the timestamp for namespaces: %s", k)
- } else {
- log.Warningf("Failed to update the timestamp for namespaces: %s, error: %v", k, err)
- }
- }
- }
- if utils.ScanOverviewMarker().Check() {
- go func() {
- <-time.After(rescanInterval)
- l, err := dao.ListImgScanOverviews()
- if err != nil {
- log.Errorf("Failed to list scan overview records, error: %v", err)
- return
- }
- for _, e := range l {
- if err := clair.UpdateScanOverview(e.Digest, e.DetailsKey, config.ClairEndpoint()); err != nil {
- log.Errorf("Failed to refresh scan overview for image: %s", e.Digest)
- } else {
- log.Debugf("Refreshed scan overview for record with digest: %s", e.Digest)
- }
- }
- }()
- utils.ScanOverviewMarker().Mark()
- } else {
- log.Debugf("There is a rescan scheduled at %v already, skip.", utils.ScanOverviewMarker().Next())
- }
- if err := clairClient.DeleteNotification(ne.Notification.Name); err != nil {
- log.Warningf("Failed to remove notification from Clair, name: %s", ne.Notification.Name)
- } else {
- log.Debugf("Removed notification from Clair, name: %s", ne.Notification.Name)
- }
-}
diff --git a/src/core/service/notifications/jobs/handler.go b/src/core/service/notifications/jobs/handler.go
old mode 100644
new mode 100755
index 2ddf06b27..47377f9cc
--- a/src/core/service/notifications/jobs/handler.go
+++ b/src/core/service/notifications/jobs/handler.go
@@ -16,6 +16,7 @@ package jobs
import (
"encoding/json"
+ "time"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/job"
@@ -23,6 +24,9 @@ import (
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
+ jjob "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/notification"
+ "github.com/goharbor/harbor/src/pkg/retention"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/replication/operation/hook"
"github.com/goharbor/harbor/src/replication/policy/scheduler"
@@ -30,12 +34,11 @@ import (
var statusMap = map[string]string{
job.JobServiceStatusPending: models.JobPending,
+ job.JobServiceStatusScheduled: models.JobScheduled,
job.JobServiceStatusRunning: models.JobRunning,
job.JobServiceStatusStopped: models.JobStopped,
- job.JobServiceStatusCancelled: models.JobCanceled,
job.JobServiceStatusError: models.JobError,
job.JobServiceStatusSuccess: models.JobFinished,
- job.JobServiceStatusScheduled: models.JobScheduled,
}
// Handler handles reqeust on /service/notifications/jobs/*, which listens to the webhook of jobservice.
@@ -44,6 +47,7 @@ type Handler struct {
id int64
status string
rawStatus string
+ checkIn string
}
// Prepare ...
@@ -71,6 +75,7 @@ func (h *Handler) Prepare() {
return
}
h.status = status
+ h.checkIn = data.CheckIn
}
// HandleScan handles the webhook of scan job
@@ -97,7 +102,71 @@ func (h *Handler) HandleReplicationScheduleJob() {
func (h *Handler) HandleReplicationTask() {
log.Debugf("received replication task status update event: task-%d, status-%s", h.id, h.status)
if err := hook.UpdateTask(replication.OperationCtl, h.id, h.rawStatus); err != nil {
- log.Errorf("Failed to update replication task status, id: %d, status: %s", h.id, h.status)
+ log.Errorf("failed to update the status of the replication task %d: %v", h.id, err)
+ h.SendInternalServerError(err)
+ return
+ }
+}
+
+// HandleRetentionTask handles the webhook of retention task
+func (h *Handler) HandleRetentionTask() {
+ taskID := h.id
+ status := h.rawStatus
+ log.Debugf("received retention task status update event: task-%d, status-%s", taskID, status)
+ mgr := &retention.DefaultManager{}
+ // handle checkin
+ if h.checkIn != "" {
+ var retainObj struct {
+ Total int `json:"total"`
+ Retained int `json:"retained"`
+ }
+ if err := json.Unmarshal([]byte(h.checkIn), &retainObj); err != nil {
+ log.Errorf("failed to resolve checkin of retention task %d: %v", taskID, err)
+ return
+ }
+ task := &retention.Task{
+ ID: taskID,
+ Total: retainObj.Total,
+ Retained: retainObj.Retained,
+ }
+ if err := mgr.UpdateTask(task, "Total", "Retained"); err != nil {
+ log.Errorf("failed to update of retention task %d: %v", taskID, err)
+ h.SendInternalServerError(err)
+ return
+ }
+ return
+ }
+
+ // handle status updating
+ if err := mgr.UpdateTaskStatus(taskID, status); err != nil {
+ log.Errorf("failed to update the status of retention task %d: %v", taskID, err)
+ h.SendInternalServerError(err)
+ return
+ }
+ // if the status is the final status, update the end time
+ if status == jjob.StoppedStatus.String() || status == jjob.SuccessStatus.String() ||
+ status == jjob.ErrorStatus.String() {
+ task := &retention.Task{
+ ID: taskID,
+ EndTime: time.Now(),
+ }
+ if err := mgr.UpdateTask(task, "EndTime"); err != nil {
+ log.Errorf("failed to update of retention task %d: %v", taskID, err)
+ h.SendInternalServerError(err)
+ return
+ }
+ }
+}
+
+// HandleNotificationJob handles the hook of notification job
+func (h *Handler) HandleNotificationJob() {
+ log.Debugf("received notification job status update event: job-%d, status-%s", h.id, h.status)
+ if err := notification.JobMgr.Update(&models.NotificationJob{
+ ID: h.id,
+ Status: h.status,
+ UpdateTime: time.Now(),
+ }, "Status", "UpdateTime"); err != nil {
+ log.Errorf("Failed to update notification job status, id: %d, status: %s", h.id, h.status)
h.SendInternalServerError(err)
return
}
diff --git a/src/core/service/notifications/registry/handler.go b/src/core/service/notifications/registry/handler.go
old mode 100644
new mode 100755
index d3530f979..eb581ef1e
--- a/src/core/service/notifications/registry/handler.go
+++ b/src/core/service/notifications/registry/handler.go
@@ -27,6 +27,7 @@ import (
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
"github.com/goharbor/harbor/src/core/config"
+ notifierEvt "github.com/goharbor/harbor/src/core/notifier/event"
coreutils "github.com/goharbor/harbor/src/core/utils"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/replication/adapter"
@@ -111,11 +112,30 @@ func (n *NotificationHandler) Post() {
}()
}
- if !coreutils.WaitForManifestReady(repository, tag, 5) {
+ if !coreutils.WaitForManifestReady(repository, tag, 6) {
log.Errorf("Manifest for image %s:%s is not ready, skip the follow up actions.", repository, tag)
return
}
+ // build and publish image push event
+ evt := ¬ifierEvt.Event{}
+ imgPushMetadata := ¬ifierEvt.ImagePushMetaData{
+ Project: pro,
+ Tag: tag,
+ Digest: event.Target.Digest,
+ RepoName: event.Target.Repository,
+ OccurAt: time.Now(),
+ Operator: event.Actor.Name,
+ }
+ if err := evt.Build(imgPushMetadata); err != nil {
+ // do not return when building event metadata failed
+ log.Errorf("failed to build image push event metadata: %v", err)
+ }
+ if err := evt.Publish(); err != nil {
+ // do not return when publishing event failed
+ log.Errorf("failed to publish image push event: %v", err)
+ }
+
// TODO: handle image delete event and chart event
go func() {
e := &rep_event.Event{
@@ -148,12 +168,70 @@ func (n *NotificationHandler) Post() {
}
}
if action == "pull" {
+ // build and publish image pull event
+ evt := ¬ifierEvt.Event{}
+ imgPullMetadata := ¬ifierEvt.ImagePullMetaData{
+ Project: pro,
+ Tag: tag,
+ Digest: event.Target.Digest,
+ RepoName: event.Target.Repository,
+ OccurAt: time.Now(),
+ Operator: event.Actor.Name,
+ }
+ if err := evt.Build(imgPullMetadata); err != nil {
+ // do not return when building event metadata failed
+ log.Errorf("failed to build image push event metadata: %v", err)
+ }
+ if err := evt.Publish(); err != nil {
+ // do not return when publishing event failed
+ log.Errorf("failed to publish image pull event: %v", err)
+ }
+
go func() {
log.Debugf("Increase the repository %s pull count.", repository)
if err := dao.IncreasePullCount(repository); err != nil {
log.Errorf("Error happens when increasing pull count: %v", repository)
}
}()
+
+ // update the artifact pull time, and ignore the events without tag.
+ if tag != "" {
+ go func() {
+ artifactQuery := &models.ArtifactQuery{
+ PID: pro.ProjectID,
+ Repo: repository,
+ }
+
+ // handle pull by tag or digest
+ pullByDigest := utils.IsDigest(tag)
+ if pullByDigest {
+ artifactQuery.Digest = tag
+ } else {
+ artifactQuery.Tag = tag
+ }
+
+ afs, err := dao.ListArtifacts(artifactQuery)
+ if err != nil {
+ log.Errorf("Error occurred when to get artifact %v", err)
+ return
+ }
+ if len(afs) > 0 {
+ log.Warningf("get multiple artifact records when to update pull time with query :%d-%s-%s, "+
+ "all of them will be updated.", artifactQuery.PID, artifactQuery.Repo, artifactQuery.Tag)
+ }
+
+ // ToDo: figure out how to do batch update in Pg as beego orm doesn't support update multiple like insert does.
+ for _, af := range afs {
+ log.Debugf("Update the artifact: %s pull time.", af.Repo)
+ af.PullTime = time.Now()
+ if err := dao.UpdateArtifactPullTime(af); err != nil {
+ log.Errorf("Error happens when updating the pull time of artifact: %d-%s, with err: %v",
+ artifactQuery.PID, artifactQuery.Repo, err)
+ }
+ }
+ }()
+ }
+
}
}
}
diff --git a/src/core/service/notifications/scheduler/handler.go b/src/core/service/notifications/scheduler/handler.go
new file mode 100644
index 000000000..b07cfd5b6
--- /dev/null
+++ b/src/core/service/notifications/scheduler/handler.go
@@ -0,0 +1,79 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduler
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/job/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/api"
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+ "github.com/goharbor/harbor/src/pkg/scheduler/hook"
+)
+
+// Handler handles the scheduler requests
+type Handler struct {
+ api.BaseController
+}
+
+// Handle ...
+func (h *Handler) Handle() {
+ log.Debugf("received scheduler hook event for schedule %s", h.GetStringFromPath(":id"))
+
+ var data models.JobStatusChange
+ if err := json.Unmarshal(h.Ctx.Input.CopyBody(1<<32), &data); err != nil {
+ log.Errorf("failed to decode hook event: %v", err)
+ return
+ }
+ // status update
+ if len(data.CheckIn) == 0 {
+ schedulerID, err := h.GetInt64FromPath(":id")
+ if err != nil {
+ log.Errorf("failed to get the schedule ID: %v", err)
+ return
+ }
+ if err := hook.GlobalController.UpdateStatus(schedulerID, data.Status); err != nil {
+ h.SendInternalServerError(fmt.Errorf("failed to update status of job %s: %v", data.JobID, err))
+ return
+ }
+ log.Debugf("handle status update hook event for schedule %s completed", h.GetStringFromPath(":id"))
+ return
+ }
+
+ // run callback function
+ // just log the error message when handling check in request if got any error
+ params := map[string]interface{}{}
+ if err := json.Unmarshal([]byte(data.CheckIn), ¶ms); err != nil {
+ log.Errorf("failed to unmarshal parameters from check in message: %v", err)
+ return
+ }
+ callbackFuncNameParam, exist := params[scheduler.JobParamCallbackFunc]
+ if !exist {
+ log.Error("cannot get the parameter \"callback_func_name\" from the check in message")
+ return
+ }
+ callbackFuncName, ok := callbackFuncNameParam.(string)
+ if !ok || len(callbackFuncName) == 0 {
+ log.Errorf("invalid \"callback_func_name\": %v", callbackFuncName)
+ return
+ }
+ if err := hook.GlobalController.Run(callbackFuncName, params[scheduler.JobParamCallbackFuncParams]); err != nil {
+ log.Errorf("failed to run the callback function %s: %v", callbackFuncName, err)
+ return
+ }
+ log.Debugf("callback function %s called for schedule %s", callbackFuncName, h.GetStringFromPath(":id"))
+}
diff --git a/src/core/utils/retag.go b/src/core/utils/retag.go
index b53f5b713..449a758b7 100644
--- a/src/core/utils/retag.go
+++ b/src/core/utils/retag.go
@@ -28,13 +28,13 @@ import (
// Retag tags an image to another
func Retag(srcImage, destImage *models.Image) error {
isSameRepo := getRepoName(srcImage) == getRepoName(destImage)
- srcClient, err := NewRepositoryClientForUI("harbor-ui", getRepoName(srcImage))
+ srcClient, err := NewRepositoryClientForLocal("harbor-ui", getRepoName(srcImage))
if err != nil {
return err
}
destClient := srcClient
if !isSameRepo {
- destClient, err = NewRepositoryClientForUI("harbor-ui", getRepoName(destImage))
+ destClient, err = NewRepositoryClientForLocal("harbor-ui", getRepoName(destImage))
if err != nil {
return err
}
diff --git a/src/core/utils/utils.go b/src/core/utils/utils.go
index 5959c4514..e55f8a010 100644
--- a/src/core/utils/utils.go
+++ b/src/core/utils/utils.go
@@ -17,6 +17,7 @@ package utils
import (
"net/http"
+ "os"
"time"
"github.com/goharbor/harbor/src/common/utils/log"
@@ -33,7 +34,20 @@ func NewRepositoryClientForUI(username, repository string) (*registry.Repository
if err != nil {
return nil, err
}
+ return newRepositoryClient(endpoint, username, repository)
+}
+// NewRepositoryClientForLocal creates a repository client that can only be used to
+// access the internal registry with 127.0.0.1
+func NewRepositoryClientForLocal(username, repository string) (*registry.Repository, error) {
+ // The 127.0.0.1:8080 is not reachable as we do not enable core in UT env.
+ if os.Getenv("UTTEST") == "true" {
+ return NewRepositoryClientForUI(username, repository)
+ }
+ return newRepositoryClient(config.LocalCoreURL(), username, repository)
+}
+
+func newRepositoryClient(endpoint, username, repository string) (*registry.Repository, error) {
uam := &auth.UserAgentModifier{
UserAgent: "harbor-registry-client",
}
@@ -48,14 +62,19 @@ func NewRepositoryClientForUI(username, repository string) (*registry.Repository
// WaitForManifestReady implements exponential sleeep to wait until manifest is ready in registry.
// This is a workaround for https://github.com/docker/distribution/issues/2625
func WaitForManifestReady(repository string, tag string, maxRetry int) bool {
- // The initial wait interval, hard-coded to 50ms
- interval := 50 * time.Millisecond
+ // The initial wait interval, hard-coded to 80ms, interval will be 80ms,200ms,500ms,1.25s,3.124999936s
+ interval := 80 * time.Millisecond
repoClient, err := NewRepositoryClientForUI("harbor-core", repository)
if err != nil {
log.Errorf("Failed to create repo client.")
return false
}
for i := 0; i < maxRetry; i++ {
+ if i != 0 {
+ log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval)
+ time.Sleep(interval)
+ interval = time.Duration(int64(float32(interval) * 2.5))
+ }
_, exist, err := repoClient.ManifestExist(tag)
if err != nil {
log.Errorf("Unexpected error when checking manifest existence, image: %s:%s, error: %v", repository, tag, err)
@@ -64,9 +83,6 @@ func WaitForManifestReady(repository string, tag string, maxRetry int) bool {
if exist {
return true
}
- log.Warningf("manifest for image %s:%s is not ready, retry after %v", repository, tag, interval)
- time.Sleep(interval)
- interval = interval * 2
}
return false
}
diff --git a/src/core/views/404.tpl b/src/core/views/404.tpl
index 88213a5d5..e6d0d6f2e 100644
--- a/src/core/views/404.tpl
+++ b/src/core/views/404.tpl
@@ -67,7 +67,7 @@ a.underline, .underline{
Page Not Found
diff --git a/src/go.mod b/src/go.mod
new file mode 100644
index 000000000..fdc8554c8
--- /dev/null
+++ b/src/go.mod
@@ -0,0 +1,85 @@
+module github.com/goharbor/harbor/src
+
+go 1.12
+
+replace github.com/goharbor/harbor => ../
+
+require (
+ github.com/Knetic/govaluate v3.0.0+incompatible // indirect
+ github.com/Masterminds/semver v1.4.2
+ github.com/Microsoft/go-winio v0.4.12 // indirect
+ github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
+ github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d // indirect
+ github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
+ github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97
+ github.com/astaxie/beego v1.9.0
+ github.com/aws/aws-sdk-go v1.19.47
+ github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0
+ github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
+ github.com/bitly/go-simplejson v0.5.0 // indirect
+ github.com/bmatcuk/doublestar v1.1.1
+ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
+ github.com/bugsnag/bugsnag-go v1.5.2 // indirect
+ github.com/bugsnag/panicwrap v1.2.0 // indirect
+ github.com/casbin/casbin v1.7.0
+ github.com/cenkalti/backoff v2.1.1+incompatible // indirect
+ github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e // indirect
+ github.com/coreos/go-oidc v2.0.0+incompatible
+ github.com/dghubble/sling v1.1.0
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible
+ github.com/docker/distribution v2.7.1+incompatible
+ github.com/docker/docker v1.13.1 // indirect
+ github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c // indirect
+ github.com/docker/go-connections v0.4.0 // indirect
+ github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 // indirect
+ github.com/docker/go-units v0.4.0 // indirect
+ github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7
+ github.com/garyburd/redigo v1.6.0
+ github.com/ghodss/yaml v1.0.0
+ github.com/go-sql-driver/mysql v1.4.1
+ github.com/gobwas/glob v0.2.3 // indirect
+ github.com/gocraft/work v0.5.1
+ github.com/gofrs/uuid v3.2.0+incompatible // indirect
+ github.com/golang-migrate/migrate v3.3.0+incompatible
+ github.com/gomodule/redigo v2.0.0+incompatible
+ github.com/google/certificate-transparency-go v1.0.21 // indirect
+ github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect
+ github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect
+ github.com/gorilla/handlers v1.3.0
+ github.com/gorilla/mux v1.6.2
+ github.com/graph-gophers/dataloader v5.0.0+incompatible
+ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
+ github.com/jinzhu/gorm v1.9.8 // indirect
+ github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da
+ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
+ github.com/lib/pq v1.1.0
+ github.com/mattn/go-runewidth v0.0.4 // indirect
+ github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e // indirect
+ github.com/olekukonko/tablewriter v0.0.1
+ github.com/opencontainers/go-digest v1.0.0-rc0
+ github.com/opencontainers/image-spec v1.0.1 // indirect
+ github.com/opentracing/opentracing-go v1.1.0 // indirect
+ github.com/pkg/errors v0.8.1
+ github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
+ github.com/prometheus/client_golang v0.9.4 // indirect
+ github.com/robfig/cron v1.0.0
+ github.com/sirupsen/logrus v1.4.1 // indirect
+ github.com/spf13/viper v1.4.0 // indirect
+ github.com/stretchr/testify v1.3.0
+ github.com/theupdateframework/notary v0.6.1
+ golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c
+ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421
+ gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect
+ gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
+ gopkg.in/fatih/pool.v2 v2.0.0 // indirect
+ gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/ldap.v2 v2.5.0
+ gopkg.in/square/go-jose.v2 v2.3.0 // indirect
+ gopkg.in/yaml.v2 v2.2.2
+ k8s.io/api v0.0.0-20190222213804-5cb15d344471
+ k8s.io/apimachinery v0.0.0-20180704011316-f534d624797b
+ k8s.io/client-go v8.0.0+incompatible
+ k8s.io/helm v2.9.1+incompatible
+)
diff --git a/src/go.sum b/src/go.sum
new file mode 100644
index 000000000..5ac4284ff
--- /dev/null
+++ b/src/go.sum
@@ -0,0 +1,396 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
+cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/Knetic/govaluate v3.0.0+incompatible h1:7o6+MAPhYTCF0+fdvoz1xDedhRb4f6s9Tn1Tt7/WTEg=
+github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
+github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d h1:RjxaKUAINjr+fYbaYjpdBUZc8R3+wF/Yr2XkDHho4Sg=
+github.com/Unknwon/goconfig v0.0.0-20160216183935-5f601ca6ef4d/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw=
+github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 h1:w1UutsfOrms1J05zt7ISrnJIXKzwaspym5BTKGx93EI=
+github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97 h1:bNE5ID4C3YOkROfvBjXJUG53gyb+8az3TQN02LqnGBk=
+github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
+github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/astaxie/beego v1.9.0 h1:tPzS+D1oCLi+SEb/TLNRNYpCjaMVfAGoy9OTLwS5ul4=
+github.com/astaxie/beego v1.9.0/go.mod h1:0R4++1tUqERR0WYFWdfkcrsyoVBCG4DgpDGokT3yb+U=
+github.com/aws/aws-sdk-go v1.19.47 h1:ZEze0mpk8Fttrsz6UNLqhH/jRGYbMPfWFA2ILas4AmM=
+github.com/aws/aws-sdk-go v1.19.47/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
+github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0 h1:fQaDnUQvBXHHQdGBu9hz8nPznB4BeiPQokvmQVjmNEw=
+github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0/go.mod h1:KLeFCpAMq2+50NkXC8iiJxLLiiTfTqrGtKEVm+2fk7s=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bmatcuk/doublestar v1.1.1 h1:YroD6BJCZBYx06yYFEWvUuKVWQn3vLLQAVmDmvTSaiQ=
+github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bugsnag/bugsnag-go v1.5.2 h1:fdaGJJEReigPzSE6HajOhpJwE2IEP/TdHDHXKGeOJtc=
+github.com/bugsnag/bugsnag-go v1.5.2/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/panicwrap v1.2.0 h1:OzrKrRvXis8qEvOkfcxNcYbOd2O7xXS2nnKMEMABFQA=
+github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/casbin/casbin v1.7.0 h1:PuzlE8w0JBg/DhIqnkF1Dewf3z+qmUZMVN07PonvVUQ=
+github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE=
+github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY=
+github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e h1:ZtyhUG4s94BMUCdgvRZySr/AXYL5CDcjxhIV/83xJog=
+github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-oidc v2.0.0+incompatible h1:+RStIopZ8wooMx+Vs5Bt8zMXxV1ABl5LbakNExNmZIg=
+github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20190423183735-731ef375ac02 h1:PS3xfVPa8N84AzoWZHFCbA0+ikz4f4skktfjQoNMsgk=
+github.com/denisenkom/go-mssqldb v0.0.0-20190423183735-731ef375ac02/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
+github.com/dghubble/sling v1.1.0 h1:DLu20Bq2qsB9cI5Hldaxj+TMPEaPpPE8IR2kvD22Atg=
+github.com/dghubble/sling v1.1.0/go.mod h1:ZcPRuLm0qrcULW2gOrjXrAWgf76sahqSyxXyVOvkunE=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
+github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c h1:Ggg7IiOtghyZzn3ozi31kPHpV6qSjMgmesXaWCijYNM=
+github.com/docker/go v0.0.0-20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK5QUlFslkQu2yW2TKzFZcXq/leZfM0UH5Q=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82 h1:X0fj836zx99zFu83v/M79DuBn84IL/Syx1SY6Y5ZEMA=
+github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
+github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
+github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gocraft/work v0.5.1 h1:3bRjMiOo6N4zcRgZWV3Y7uX7R22SF+A9bPTk4xRXr34=
+github.com/gocraft/work v0.5.1/go.mod h1:pc3n9Pb5FAESPPGfM0nL+7Q1xtgtRnF8rr/azzhQVlM=
+github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
+github.com/golang-migrate/migrate v3.3.0+incompatible h1:RuACw4Vio/z4aebypBmpU9xKKmSiZBiHOx/Ro1QLcYc=
+github.com/golang-migrate/migrate v3.3.0+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
+github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/certificate-transparency-go v1.0.21 h1:Yf1aXowfZ2nuboBsg7iYGLmwsOARdV86pfH3g95wXmE=
+github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0=
+github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v1.3.0 h1:tsg9qP3mjt1h4Roxp+M1paRjrVBfPSOpBuVclh6YluI=
+github.com/gorilla/handlers v1.3.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/graph-gophers/dataloader v5.0.0+incompatible h1:R+yjsbrNq1Mo3aPG+Z/EKYrXrXXUNJHOgbRt+U6jOug=
+github.com/graph-gophers/dataloader v5.0.0+incompatible/go.mod h1:jk4jk0c5ZISbKaMe8WsVopGB5/15GvGHMdMdPtwlRp4=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/jinzhu/gorm v1.9.8 h1:n5uvxqLepIP2R1XF7pudpt9Rv8I3m7G9trGxJVjLZ5k=
+github.com/jinzhu/gorm v1.9.8/go.mod h1:bdqTT3q6dhSph2K3pWxrHP6nqxuAp2yQ3KFtc3U3F84=
+github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a h1:eeaG9XMUvRBYXJi4pg1ZKM7nxc5AfXfojeLLW7O5J3k=
+github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.0.0 h1:6WV8LvwPpDhKjo5U9O6b4+xdG/jTXNPwlDme/MTo8Ns=
+github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da h1:5y58+OCjoHCYB8182mpf/dEsq0vwTKPOo4zGfH0xW9A=
+github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da/go.mod h1:oLH0CmIaxCGXD67VKGR5AacGXZSMznlmeqM8RzPrcY8=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/lib/pq v1.1.0 h1:/5u4a+KGJptBRqGzPvYQL9p0d/tPR4S31+Tnzj9lEO4=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
+github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e h1:Gp+x7hv/aFRJUV6O0nu77E8N0T5PPfJGXjzQ9qgxVvE=
+github.com/miekg/pkcs11 v0.0.0-20170220202408-7283ca79f35e/go.mod h1:WCBAbTOdfhHhz7YXujeZMF7owC4tPb1naKFsgfUISjo=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
+github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/opencontainers/go-digest v1.0.0-rc0 h1:YHPGfp+qlmg7loi376Jk5jNEgjgUUIdXGFsel8aFHnA=
+github.com/opencontainers/go-digest v1.0.0-rc0/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
+github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A=
+github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/robfig/cron v1.0.0 h1:slmQxIUH6U9ruw4XoJ7C2pyyx4yYeiHx8S9pNootHsM=
+github.com/robfig/cron v1.0.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/theupdateframework/notary v0.6.1 h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=
+github.com/theupdateframework/notary v0.6.1/go.mod h1:MOfgIfmox8s7/7fduvB2xyPPMJCrjRLRizA8OFwpnKY=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 h1:nn6Zav2sOQHCFJHEspya8KqxhFwKci30UxHy3HXPTyQ=
+gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/dancannon/gorethink.v3 v3.0.5 h1:/g7PWP7zUS6vSNmHSDbjCHQh1Rqn8Jy6zSMQxAsBSMQ=
+gopkg.in/dancannon/gorethink.v3 v3.0.5/go.mod h1:GXsi1e3N2OcKhcP6nsYABTiUejbWMFO4GY5a4pEaeEc=
+gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg=
+gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU=
+gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk=
+gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ldap.v2 v2.5.0 h1:1rO3ojzsHUk+gq4ZYhC4Pg+EzWaaKIV8+DJwExS5/QQ=
+gopkg.in/ldap.v2 v2.5.0/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.3.0 h1:nLzhkFyl5bkblqYBoiWJUt5JkWOzmiaBtCxdJAqJd3U=
+gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE=
+k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
+k8s.io/apimachinery v0.0.0-20180704011316-f534d624797b h1:IEJ1jhyB5TOkHdq5dBEdef+MV3YAK9UYckpKYXI4Vsw=
+k8s.io/apimachinery v0.0.0-20180704011316-f534d624797b/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
+k8s.io/client-go v8.0.0+incompatible h1:tTI4hRmb1DRMl4fG6Vclfdi6nTM82oIrTT7HfitmxC4=
+k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
+k8s.io/helm v2.9.1+incompatible h1:IafoSdCxLzN1yqabsnwwAMSyjuplWVO/jy+MTyHMLIE=
+k8s.io/helm v2.9.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
diff --git a/src/jobservice/common/rds/keys.go b/src/jobservice/common/rds/keys.go
index db6ae90e0..4f9f09ba4 100644
--- a/src/jobservice/common/rds/keys.go
+++ b/src/jobservice/common/rds/keys.go
@@ -34,7 +34,7 @@ func RedisKeyScheduled(namespace string) string {
// RedisKeyLastPeriodicEnqueue returns key of timestamp if last periodic enqueue.
func RedisKeyLastPeriodicEnqueue(namespace string) string {
- return RedisNamespacePrefix(namespace) + "last_periodic_enqueue"
+ return RedisNamespacePrefix(namespace) + "last_periodic_enqueue_h"
}
// KeyNamespacePrefix returns the based key based on the namespace.
diff --git a/src/jobservice/config.yml b/src/jobservice/config.yml
index 562317698..745e53f8b 100644
--- a/src/jobservice/config.yml
+++ b/src/jobservice/config.yml
@@ -19,8 +19,8 @@ worker_pool:
redis_pool:
#redis://[arbitrary_username:password@]ipaddress:port/database_index
#or ipaddress:port[,weight,password,database_index]
- redis_url: "localhost:6379"
- namespace: "harbor_job_service"
+ redis_url: "redis://localhost:6379/2"
+ namespace: "harbor_job_service_namespace"
#Loggers for the running job
job_loggers:
@@ -29,11 +29,11 @@ job_loggers:
- name: "FILE"
level: "DEBUG"
settings: # Customized settings of logger
- base_dir: "/Users/szou/tmp/job_logs"
+ base_dir: "/tmp/job_logs"
sweeper:
duration: 1 #days
settings: # Customized settings of sweeper
- work_dir: "/Users/szou/tmp/job_logs"
+ work_dir: "/tmp/job_logs"
#Loggers for the job service
loggers:
diff --git a/src/jobservice/config/config.go b/src/jobservice/config/config.go
index eec9c10f1..56614737b 100644
--- a/src/jobservice/config/config.go
+++ b/src/jobservice/config/config.go
@@ -24,7 +24,7 @@ import (
"strings"
"github.com/goharbor/harbor/src/jobservice/common/utils"
- "gopkg.in/yaml.v2"
+ yaml "gopkg.in/yaml.v2"
)
const (
@@ -37,6 +37,7 @@ const (
jobServiceRedisURL = "JOB_SERVICE_POOL_REDIS_URL"
jobServiceRedisNamespace = "JOB_SERVICE_POOL_REDIS_NAMESPACE"
jobServiceAuthSecret = "JOBSERVICE_SECRET"
+ coreURL = "CORE_URL"
// JobServiceProtocolHTTPS points to the 'https' protocol
JobServiceProtocolHTTPS = "https"
@@ -163,6 +164,11 @@ func GetAuthSecret() string {
return utils.ReadEnv(jobServiceAuthSecret)
}
+// GetCoreURL get the core url from the env
+func GetCoreURL() string {
+ return utils.ReadEnv(coreURL)
+}
+
// GetUIAuthSecret get the auth secret of UI side
func GetUIAuthSecret() string {
return utils.ReadEnv(uiAuthSecret)
diff --git a/src/jobservice/config/config_test.go b/src/jobservice/config/config_test.go
index 7385156fc..d6676b2e6 100644
--- a/src/jobservice/config/config_test.go
+++ b/src/jobservice/config/config_test.go
@@ -14,11 +14,12 @@
package config
import (
+ "os"
+ "testing"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
- "os"
- "testing"
)
// ConfigurationTestSuite tests the configuration loading
@@ -84,6 +85,7 @@ func (suite *ConfigurationTestSuite) TestConfigLoadingWithEnv() {
)
assert.Equal(suite.T(), "js_secret", GetAuthSecret(), "expect auth secret 'js_secret' but got '%s'", GetAuthSecret())
assert.Equal(suite.T(), "core_secret", GetUIAuthSecret(), "expect auth secret 'core_secret' but got '%s'", GetUIAuthSecret())
+ assert.Equal(suite.T(), "core_url", GetCoreURL(), "expect core url 'core_url' but got '%s'", GetCoreURL())
}
// TestDefaultConfig ...
@@ -134,6 +136,7 @@ func setENV() error {
err = os.Setenv("JOB_SERVICE_POOL_REDIS_NAMESPACE", "ut_namespace")
err = os.Setenv("JOBSERVICE_SECRET", "js_secret")
err = os.Setenv("CORE_SECRET", "core_secret")
+ err = os.Setenv("CORE_URL", "core_url")
return err
}
diff --git a/src/jobservice/hook/hook_client.go b/src/jobservice/hook/hook_client.go
index 075614322..820880f7b 100644
--- a/src/jobservice/hook/hook_client.go
+++ b/src/jobservice/hook/hook_client.go
@@ -21,18 +21,10 @@ import (
"io/ioutil"
"net"
"net/http"
- "net/url"
- "os"
"strings"
"time"
"context"
- "github.com/goharbor/harbor/src/jobservice/common/utils"
-)
-
-const (
- proxyEnvHTTP = "http_proxy"
- proxyEnvHTTPS = "https_proxy"
)
// Client for handling the hook events
@@ -60,19 +52,7 @@ func NewClient(ctx context.Context) Client {
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
- }
-
- // Get the http/https proxies
- proxyAddr, ok := os.LookupEnv(proxyEnvHTTP)
- if !ok {
- proxyAddr, ok = os.LookupEnv(proxyEnvHTTPS)
- }
-
- if ok && !utils.IsEmptyStr(proxyAddr) {
- proxyURL, err := url.Parse(proxyAddr)
- if err == nil {
- transport.Proxy = http.ProxyURL(proxyURL)
- }
+ Proxy: http.ProxyFromEnvironment,
}
client := &http.Client{
diff --git a/src/jobservice/job/impl/notification/webhook_job.go b/src/jobservice/job/impl/notification/webhook_job.go
new file mode 100644
index 000000000..b8c56966b
--- /dev/null
+++ b/src/jobservice/job/impl/notification/webhook_job.go
@@ -0,0 +1,99 @@
+package notification
+
+import (
+ "bytes"
+ "fmt"
+ commonhttp "github.com/goharbor/harbor/src/common/http"
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/jobservice/logger"
+ "net/http"
+ "os"
+ "strconv"
+)
+
+// Max retry has the same meaning as max fails.
+const maxFails = "JOBSERVICE_WEBHOOK_JOB_MAX_RETRY"
+
+// WebhookJob implements the job interface, which send notification by http or https.
+type WebhookJob struct {
+ client *http.Client
+ logger logger.Interface
+ ctx job.Context
+}
+
+// MaxFails returns that how many times this job can fail, get this value from ctx.
+func (wj *WebhookJob) MaxFails() uint {
+ if maxFails, exist := os.LookupEnv(maxFails); exist {
+ result, err := strconv.ParseUint(maxFails, 10, 32)
+ // Unable to log error message because the logger isn't initialized when calling this function.
+ if err == nil {
+ return uint(result)
+ }
+ }
+
+ // Default max fails count is 10, and its max retry interval is around 3h
+ // Large enough to ensure most situations can notify successfully
+ return 10
+}
+
+// ShouldRetry ...
+func (wj *WebhookJob) ShouldRetry() bool {
+ return true
+}
+
+// Validate implements the interface in job/Interface
+func (wj *WebhookJob) Validate(params job.Parameters) error {
+ return nil
+}
+
+// Run implements the interface in job/Interface
+func (wj *WebhookJob) Run(ctx job.Context, params job.Parameters) error {
+ if err := wj.init(ctx, params); err != nil {
+ return err
+ }
+
+ return wj.execute(ctx, params)
+}
+
+// init webhook job
+func (wj *WebhookJob) init(ctx job.Context, params map[string]interface{}) error {
+ wj.logger = ctx.GetLogger()
+ wj.ctx = ctx
+
+ // default insecureSkipVerify is false
+ insecureSkipVerify := false
+ if v, ok := params["skip_cert_verify"]; ok {
+ insecureSkipVerify = v.(bool)
+ }
+ wj.client = &http.Client{
+ Transport: commonhttp.GetHTTPTransport(insecureSkipVerify),
+ }
+
+ return nil
+}
+
+// execute webhook job
+func (wj *WebhookJob) execute(ctx job.Context, params map[string]interface{}) error {
+ payload := params["payload"].(string)
+ address := params["address"].(string)
+
+ req, err := http.NewRequest(http.MethodPost, address, bytes.NewReader([]byte(payload)))
+ if err != nil {
+ return err
+ }
+ if v, ok := params["auth_header"]; ok && len(v.(string)) > 0 {
+ req.Header.Set("Authorization", v.(string))
+ }
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := wj.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return fmt.Errorf("webhook job(target: %s) response code is %d", address, resp.StatusCode)
+ }
+
+ return nil
+}
diff --git a/src/jobservice/job/impl/notification/webhook_job_test.go b/src/jobservice/job/impl/notification/webhook_job_test.go
new file mode 100644
index 000000000..d5a1db69a
--- /dev/null
+++ b/src/jobservice/job/impl/notification/webhook_job_test.go
@@ -0,0 +1,75 @@
+package notification
+
+import (
+ "github.com/goharbor/harbor/src/jobservice/job/impl"
+ "github.com/stretchr/testify/assert"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+)
+
+func TestMaxFails(t *testing.T) {
+ rep := &WebhookJob{}
+ // test default max fails
+ assert.Equal(t, uint(10), rep.MaxFails())
+
+ // test user defined max fails
+ _ = os.Setenv(maxFails, "15")
+ assert.Equal(t, uint(15), rep.MaxFails())
+
+ // test user defined wrong max fails
+ _ = os.Setenv(maxFails, "abc")
+ assert.Equal(t, uint(10), rep.MaxFails())
+}
+
+func TestShouldRetry(t *testing.T) {
+ rep := &WebhookJob{}
+ assert.True(t, rep.ShouldRetry())
+}
+
+func TestValidate(t *testing.T) {
+ rep := &WebhookJob{}
+ assert.Nil(t, rep.Validate(nil))
+}
+
+func TestRun(t *testing.T) {
+ rep := &WebhookJob{}
+
+ // test webhook request
+ ts := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ body, _ := ioutil.ReadAll(r.Body)
+
+ // test request method
+ assert.Equal(t, http.MethodPost, r.Method)
+ // test request header
+ assert.Equal(t, "auth_test", r.Header.Get("Authorization"))
+ // test request body
+ assert.Equal(t, string(body), `{"key": "value"}`)
+ }))
+ defer ts.Close()
+ params := map[string]interface{}{
+ "skip_cert_verify": true,
+ "payload": `{"key": "value"}`,
+ "address": ts.URL,
+ "auth_header": "auth_test",
+ }
+ // test correct webhook response
+ assert.Nil(t, rep.Run(&impl.Context{}, params))
+
+ tsWrong := httptest.NewServer(
+ http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusUnauthorized)
+ }))
+ defer tsWrong.Close()
+ paramsWrong := map[string]interface{}{
+ "skip_cert_verify": true,
+ "payload": `{"key": "value"}`,
+ "address": tsWrong.URL,
+ "auth_header": "auth_test",
+ }
+ // test incorrect webhook response
+ assert.NotNil(t, rep.Run(&impl.Context{}, paramsWrong))
+}
diff --git a/src/jobservice/job/impl/replication/replication.go b/src/jobservice/job/impl/replication/replication.go
index 0543c90ab..849c5c0a2 100644
--- a/src/jobservice/job/impl/replication/replication.go
+++ b/src/jobservice/job/impl/replication/replication.go
@@ -34,6 +34,16 @@ import (
_ "github.com/goharbor/harbor/src/replication/adapter/native"
// register the Huawei adapter
_ "github.com/goharbor/harbor/src/replication/adapter/huawei"
+ // register the Google Gcr adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/googlegcr"
+ // register the AwsEcr adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/awsecr"
+ // register the AzureAcr adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/azurecr"
+ // register the AliACR adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/aliacr"
+ // register the Helm Hub adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/helmhub"
)
// Replication implements the job interface
diff --git a/src/jobservice/job/known_jobs.go b/src/jobservice/job/known_jobs.go
index 5fd50cde0..307141e2d 100644
--- a/src/jobservice/job/known_jobs.go
+++ b/src/jobservice/job/known_jobs.go
@@ -30,4 +30,8 @@ const (
Replication = "REPLICATION"
// ReplicationScheduler : the name of the replication scheduler job in job service
ReplicationScheduler = "IMAGE_REPLICATE"
+ // WebhookJob : the name of the webhook job in job service
+ WebhookJob = "WEBHOOK"
+ // Retention : the name of the retention job
+ Retention = "RETENTION"
)
diff --git a/src/jobservice/main.go b/src/jobservice/main.go
index a5a1706f3..ca146b102 100644
--- a/src/jobservice/main.go
+++ b/src/jobservice/main.go
@@ -19,7 +19,6 @@ import (
"errors"
"flag"
"fmt"
- "os"
"github.com/goharbor/harbor/src/common"
comcfg "github.com/goharbor/harbor/src/common/config"
@@ -64,7 +63,7 @@ func main() {
if utils.IsEmptyStr(secret) {
return nil, errors.New("empty auth secret")
}
- coreURL := os.Getenv("CORE_URL")
+ coreURL := config.GetCoreURL()
configURL := coreURL + common.CoreConfigPath
cfgMgr := comcfg.NewRESTCfgManager(configURL, secret)
jobCtx := impl.NewContext(ctx, cfgMgr)
diff --git a/src/jobservice/migration/manager.go b/src/jobservice/migration/manager.go
new file mode 100644
index 000000000..2c540d579
--- /dev/null
+++ b/src/jobservice/migration/manager.go
@@ -0,0 +1,148 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package migration
+
+import (
+ "github.com/Masterminds/semver"
+ "reflect"
+
+ "github.com/gomodule/redigo/redis"
+
+ "github.com/goharbor/harbor/src/jobservice/logger"
+ "github.com/pkg/errors"
+)
+
+// Manager for managing the related migrators
+type Manager interface {
+ // Register the specified migrator to the execution chain
+ Register(migratorFactory MigratorFactory)
+
+ // Migrate data
+ Migrate() error
+}
+
+// MigratorChainNode is a wrapper to append the migrator to the chain with a next reference
+type MigratorChainNode struct {
+ // Migrator implementation
+ migrator RDBMigrator
+ // Refer the next migration of the chain if existing
+ next *MigratorChainNode
+}
+
+// BasicManager is the default implementation of manager interface
+type BasicManager struct {
+ // The head of migrator chain
+ head *MigratorChainNode
+ // Pool for connecting to redis
+ pool *redis.Pool
+ // RDB namespace
+ namespace string
+}
+
+// New a basic manager
+func New(pool *redis.Pool, ns string) Manager {
+ return &BasicManager{
+ pool: pool,
+ namespace: ns,
+ }
+}
+
+// Register the migrator to the chain
+func (bm *BasicManager) Register(migratorFactory MigratorFactory) {
+ if migratorFactory == nil {
+ return // ignore, do nothing
+ }
+
+ migrator, err := migratorFactory(bm.pool, bm.namespace)
+ if err != nil {
+ logger.Errorf("migrator register error: %s", err)
+ return
+ }
+
+ newNode := &MigratorChainNode{
+ migrator: migrator,
+ next: nil,
+ }
+
+ if bm.head == nil {
+ bm.head = newNode
+ return
+ }
+
+ bm.head.next = newNode
+}
+
+// Migrate data
+func (bm *BasicManager) Migrate() error {
+ conn := bm.pool.Get()
+ defer func() {
+ _ = conn.Close()
+ }()
+
+ // Read schema version first
+ v, err := redis.String(conn.Do("GET", VersionKey(bm.namespace)))
+ if err != nil && err != redis.ErrNil {
+ return errors.Wrap(err, "read schema version failed")
+ }
+
+ if len(v) > 0 {
+ current, err := semver.NewVersion(v)
+ if err != nil {
+ return errors.Wrap(err, "malformed schema version")
+ }
+ nowV, _ := semver.NewVersion(SchemaVersion)
+
+ diff := nowV.Compare(current)
+ if diff < 0 {
+ return errors.Errorf("the schema version of migrator is smaller that the one in the rdb: %s<%s", nowV.String(), current.String())
+ } else if diff == 0 {
+ logger.Info("No migration needed")
+ return nil
+ }
+ }
+
+ if bm.head == nil {
+ logger.Warning("No migrator registered, passed migration")
+ return nil
+ }
+
+ logger.Info("Process for migrating data is started")
+
+ h := bm.head
+ for h != nil {
+ meta := h.migrator.Metadata()
+ if meta == nil {
+ // Make metadata required
+ return errors.Errorf("no metadata provided for the migrator %s", reflect.TypeOf(h.migrator).String())
+ }
+
+ logger.Infof("Migrate %s from %s to %s", meta.ObjectRef, meta.FromVersion, meta.ToVersion)
+ if err := h.migrator.Migrate(); err != nil {
+ return errors.Wrap(err, "migration chain calling failed")
+ }
+
+ // Next one if existing
+ h = h.next
+ }
+
+ // Set schema version
+ if _, err = conn.Do("SET", VersionKey(bm.namespace), SchemaVersion); err != nil {
+ return errors.Wrap(err, "write schema version failed")
+ }
+
+ logger.Infof("Data schema version upgraded to %s", SchemaVersion)
+
+ return nil
+}
diff --git a/src/jobservice/migration/manager_test.go b/src/jobservice/migration/manager_test.go
new file mode 100644
index 000000000..c63b0a076
--- /dev/null
+++ b/src/jobservice/migration/manager_test.go
@@ -0,0 +1,200 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package migration
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/jobservice/common/rds"
+ "github.com/goharbor/harbor/src/jobservice/common/utils"
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/jobservice/tests"
+ "github.com/gomodule/redigo/redis"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+// ManagerTestSuite tests functions of manager
+type ManagerTestSuite struct {
+ suite.Suite
+
+ pool *redis.Pool
+ namespace string
+
+ manager Manager
+
+ jobID string
+ numbericID int64
+}
+
+// TestManagerTestSuite is entry of executing ManagerTestSuite
+func TestManagerTestSuite(t *testing.T) {
+ suite.Run(t, new(ManagerTestSuite))
+}
+
+// SetupAllSuite sets up env for test suite
+func (suite *ManagerTestSuite) SetupSuite() {
+ suite.pool = tests.GiveMeRedisPool()
+ suite.namespace = tests.GiveMeTestNamespace()
+
+ suite.manager = New(suite.pool, suite.namespace)
+}
+
+// SetupTestSuite sets up env for each test case
+func (suite *ManagerTestSuite) SetupTest() {
+ // Mock fake data
+ conn := suite.pool.Get()
+ defer func() {
+ _ = conn.Close()
+ }()
+
+ id := utils.MakeIdentifier()
+ suite.jobID = id
+ // Mock stats of periodic job
+ args := []interface{}{
+ rds.KeyJobStats(suite.namespace, id),
+ "status_hook",
+ "http://core:8080/hook",
+ "id",
+ id,
+ "name",
+ job.ImageGC,
+ "kind",
+ job.KindPeriodic,
+ "unique",
+ 0,
+ "status",
+ job.SuccessStatus.String(), // v1.6 issue
+ "ref_link",
+ fmt.Sprintf("/api/v1/jobs/%s", id),
+ "enqueue_time",
+ time.Now().Unix(),
+ "update_time",
+ time.Now().Unix(),
+ "run_at",
+ time.Now().Add(5 * time.Minute).Unix(),
+ "cron_spec",
+ "0 0 17 * * *",
+ "multiple_executions", // V1.7
+ 1,
+ }
+ reply, err := redis.String(conn.Do("HMSET", args...))
+ require.NoError(suite.T(), err, "mock job stats data error")
+ require.Equal(suite.T(), "ok", strings.ToLower(reply), "ok expected")
+
+ // Mock periodic job policy object
+ params := make(map[string]interface{})
+ params["redis_url_reg"] = "redis://redis:6379/1"
+
+ policy := make(map[string]interface{})
+ policy["job_name"] = job.ImageGC
+ policy["job_params"] = params
+ policy["cron_spec"] = "0 0 17 * * *"
+
+ rawJSON, err := json.Marshal(&policy)
+ require.NoError(suite.T(), err, "mock periodic job policy error")
+
+ policy["cron_spec"] = "0 0 8 * * *"
+ duplicatedRawJSON, err := json.Marshal(&policy)
+ require.NoError(suite.T(), err, "mock duplicated periodic job policy error")
+
+ score := time.Now().Unix()
+ suite.numbericID = score
+ zaddArgs := []interface{}{
+ rds.KeyPeriodicPolicy(suite.namespace),
+ score,
+ rawJSON,
+ score - 10,
+ duplicatedRawJSON, // duplicated one
+ }
+ count, err := redis.Int(conn.Do("ZADD", zaddArgs...))
+ require.NoError(suite.T(), err, "add raw policy error")
+ require.Equal(suite.T(), 2, count)
+
+ // Mock key score mapping
+ keyScoreArgs := []interface{}{
+ fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(suite.namespace), "period:key_score"),
+ score,
+ id,
+ }
+
+ count, err = redis.Int(conn.Do("ZADD", keyScoreArgs...))
+ require.NoError(suite.T(), err, "add key score mapping error")
+ require.Equal(suite.T(), 1, count)
+}
+
+// SetupTestSuite clears up env for each test case
+func (suite *ManagerTestSuite) TearDownTest() {
+ conn := suite.pool.Get()
+ defer func() {
+ _ = conn.Close()
+ }()
+
+ err := tests.ClearAll(suite.namespace, conn)
+ assert.NoError(suite.T(), err, "clear all of redis db error")
+}
+
+// TestManager test the basic functions of the manager
+func (suite *ManagerTestSuite) TestManager() {
+ require.NotNil(suite.T(), suite.manager, "nil migration manager")
+
+ suite.manager.Register(PolicyMigratorFactory)
+ err := suite.manager.Migrate()
+ require.NoError(suite.T(), err, "migrating rdb error")
+
+ // Check data
+ conn := suite.pool.Get()
+ defer func() {
+ _ = conn.Close()
+ }()
+
+ count, err := redis.Int(conn.Do("ZCARD", rds.KeyPeriodicPolicy(suite.namespace)))
+ assert.NoError(suite.T(), err, "get count of policies error")
+ assert.Equal(suite.T(), 1, count)
+
+ innerConn := suite.pool.Get()
+ p, err := getPeriodicPolicy(suite.numbericID, innerConn, suite.namespace)
+ assert.NoError(suite.T(), err, "get migrated policy error")
+ assert.NotEmpty(suite.T(), p.ID, "ID of policy")
+ assert.NotEmpty(suite.T(), p.WebHookURL, "Web hook URL of policy")
+
+ key := fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(suite.namespace), "period:key_score")
+ count, err = redis.Int(conn.Do("EXISTS", key))
+ assert.NoError(suite.T(), err, "check existence of key score mapping error")
+ assert.Equal(suite.T(), 0, count)
+
+ hmGetArgs := []interface{}{
+ rds.KeyJobStats(suite.namespace, suite.jobID),
+ "id",
+ "status",
+ "web_hook_url",
+ "numeric_policy_id",
+ "multiple_executions",
+ "status_hook",
+ }
+ fields, err := redis.Values(conn.Do("HMGET", hmGetArgs...))
+ assert.NoError(suite.T(), err, "check migrated job stats error")
+ assert.Equal(suite.T(), suite.jobID, toString(fields[0]), "check job ID")
+ assert.Equal(suite.T(), job.ScheduledStatus.String(), toString(fields[1]), "check job status")
+ assert.Equal(suite.T(), "http://core:8080/hook", toString(fields[2]), "check web hook URL")
+ assert.Equal(suite.T(), suite.numbericID, toInt(fields[3]), "check numberic ID")
+ assert.Nil(suite.T(), fields[4], "'multiple_executions' removed")
+ assert.Nil(suite.T(), fields[5], "'status_hook' removed")
+}
diff --git a/src/jobservice/migration/migrator.go b/src/jobservice/migration/migrator.go
new file mode 100644
index 000000000..e7535c692
--- /dev/null
+++ b/src/jobservice/migration/migrator.go
@@ -0,0 +1,38 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package migration
+
+import (
+ "github.com/gomodule/redigo/redis"
+)
+
+// RDBMigrator defines the action to migrate redis data
+type RDBMigrator interface {
+ // Metadata info of the migrator
+ Metadata() *MigratorMeta
+
+ // Migrate executes the real migration work
+ Migrate() error
+}
+
+// MigratorMeta keeps the base info of the migrator
+type MigratorMeta struct {
+ FromVersion string
+ ToVersion string
+ ObjectRef string
+}
+
+// MigratorFactory is factory function to create RDBMigrator interface
+type MigratorFactory func(pool *redis.Pool, namespace string) (RDBMigrator, error)
diff --git a/src/jobservice/migration/migrator_v180.go b/src/jobservice/migration/migrator_v180.go
new file mode 100644
index 000000000..2a9a1c6b6
--- /dev/null
+++ b/src/jobservice/migration/migrator_v180.go
@@ -0,0 +1,381 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package migration
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/goharbor/harbor/src/jobservice/common/rds"
+ "github.com/goharbor/harbor/src/jobservice/common/utils"
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/jobservice/logger"
+ "github.com/goharbor/harbor/src/jobservice/period"
+ "github.com/gomodule/redigo/redis"
+ "github.com/pkg/errors"
+)
+
+// PolicyMigrator migrate the cron job policy to new schema
+type PolicyMigrator struct {
+ // namespace of rdb
+ namespace string
+
+ // Pool for connecting to redis
+ pool *redis.Pool
+}
+
+// PolicyMigratorFactory is a factory func to create PolicyMigrator
+func PolicyMigratorFactory(pool *redis.Pool, namespace string) (RDBMigrator, error) {
+ if pool == nil {
+ return nil, errors.New("PolicyMigratorFactory: missing pool")
+ }
+
+ if utils.IsEmptyStr(namespace) {
+ return nil, errors.New("PolicyMigratorFactory: missing namespace")
+ }
+
+ return &PolicyMigrator{
+ namespace: namespace,
+ pool: pool,
+ }, nil
+}
+
+// Metadata returns the base information of this migrator
+func (pm *PolicyMigrator) Metadata() *MigratorMeta {
+ return &MigratorMeta{
+ FromVersion: "<1.8.0",
+ ToVersion: "1.8.1",
+ ObjectRef: "{namespace}:period:policies",
+ }
+}
+
+// Migrate data
+func (pm *PolicyMigrator) Migrate() error {
+ conn := pm.pool.Get()
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logger.Errorf("close redis connection error: %s", err)
+ }
+ }()
+
+ allJobIDs, err := getAllJobStatsIDs(conn, pm.namespace)
+ if err != nil {
+ return errors.Wrap(err, "get job stats list error")
+ }
+
+ args := []interface{}{
+ "id_placeholder",
+ "id",
+ "kind",
+ "status",
+ "status_hook", // valid for 1.6 and 1.7,
+ "multiple_executions", // valid for 1.7
+ "numeric_policy_id", // valid for 1.8
+ }
+
+ count := 0
+ for _, fullID := range allJobIDs {
+ args[0] = fullID
+ values, err := redis.Values(conn.Do("HMGET", args...))
+ if err != nil {
+ logger.Errorf("Get stats fields of job %s failed with error: %s", fullID, err)
+ continue
+ }
+
+ pID := toString(values[0])
+ kind := toString(values[1])
+
+ if !utils.IsEmptyStr(pID) && job.KindPeriodic == kind {
+ logger.Debugf("Periodic job found: %s", pID)
+
+ // Data requires migration
+ // Missing 'numeric_policy_id' which is introduced in 1.8
+ if values[5] == nil {
+ logger.Infof("Migrate periodic job stats data is started: %s", pID)
+
+ numbericPolicyID, err := getScoreByID(pID, conn, pm.namespace)
+ if err != nil {
+ logger.Errorf("Get numberic ID of periodic job policy failed with error: %s", err)
+ continue
+ }
+
+ // Transaction
+ err = conn.Send("MULTI")
+ setArgs := []interface{}{
+ fullID,
+ "status",
+ job.ScheduledStatus.String(), // make sure the status of periodic job is "Scheduled"
+ "numeric_policy_id",
+ numbericPolicyID,
+ }
+ // If status hook existing
+ hookURL := toString(values[3])
+ if !utils.IsEmptyStr(hookURL) {
+ setArgs = append(setArgs, "web_hook_url", hookURL)
+ }
+ // Set fields
+ err = conn.Send("HMSET", setArgs...)
+
+ // Remove useless fields
+ rmArgs := []interface{}{
+ fullID,
+ "status_hook",
+ "multiple_executions",
+ }
+ err = conn.Send("HDEL", rmArgs...)
+
+ // Update periodic policy model
+ // conn is working, we need new conn
+ // this inner connection will be closed by the calling method
+ innerConn := pm.pool.Get()
+
+ policy, er := getPeriodicPolicy(numbericPolicyID, innerConn, pm.namespace)
+ if er == nil {
+ policy.ID = pID
+ if !utils.IsEmptyStr(hookURL) {
+ // Copy web hook URL
+ policy.WebHookURL = fmt.Sprintf("%s", hookURL)
+ }
+
+ if rawJSON, er := policy.Serialize(); er == nil {
+ // Remove the old one first
+ err = conn.Send("ZREMRANGEBYSCORE", rds.KeyPeriodicPolicy(pm.namespace), numbericPolicyID, numbericPolicyID)
+ // Save back to the rdb
+ err = conn.Send("ZADD", rds.KeyPeriodicPolicy(pm.namespace), numbericPolicyID, rawJSON)
+ } else {
+ logger.Errorf("Serialize policy %s failed with error: %s", pID, er)
+ }
+ } else {
+ logger.Errorf("Get periodic policy %s failed with error: %s", pID, er)
+ }
+
+ // Check error before executing
+ if err != nil {
+ logger.Errorf("Build redis transaction failed with error: %s", err)
+ continue
+ }
+
+ // Exec
+ if _, err := conn.Do("EXEC"); err != nil {
+ logger.Errorf("Migrate periodic job %s failed with error: %s", pID, err)
+ continue
+ }
+
+ count++
+ logger.Infof("Migrate periodic job stats data is completed: %s", pID)
+ }
+ }
+ }
+
+ logger.Infof("Migrate %d periodic policies", count)
+
+ delScoreZset(conn, pm.namespace)
+
+ return clearDuplicatedPolicies(conn, pm.namespace)
+}
+
+// getAllJobStatsIDs get all the IDs of the existing jobs
+func getAllJobStatsIDs(conn redis.Conn, ns string) ([]string, error) {
+ pattern := rds.KeyJobStats(ns, "*")
+ args := []interface{}{
+ 0,
+ "MATCH",
+ pattern,
+ "COUNT",
+ 100,
+ }
+
+ allFullIDs := make([]interface{}, 0)
+
+ for {
+ // Use SCAN to iterate the IDs
+ values, err := redis.Values(conn.Do("SCAN", args...))
+ if err != nil {
+ return nil, err
+ }
+
+ // In case something wrong happened
+ if len(values) != 2 {
+ return nil, errors.Errorf("Invalid result returned for the SCAN command: %#v", values)
+ }
+
+ if fullIDs, ok := values[1].([]interface{}); ok {
+ allFullIDs = append(allFullIDs, fullIDs...)
+ }
+
+ // Check the next cursor
+ cur := toInt(values[0])
+ if cur == -1 {
+ // No valid next cursor got
+ return nil, errors.Errorf("Failed to get the next SCAN cursor: %#v", values[0])
+ }
+
+ if cur != 0 {
+ args[0] = cur
+ } else {
+ // end
+ break
+ }
+ }
+
+ IDs := make([]string, 0)
+ for _, fullIDValue := range allFullIDs {
+ if fullID, ok := fullIDValue.([]byte); ok {
+ IDs = append(IDs, string(fullID))
+ } else {
+ logger.Debugf("Invalid job stats key: %#v", fullIDValue)
+ }
+ }
+
+ return IDs, nil
+}
+
+// Get the score with the provided ID
+func getScoreByID(id string, conn redis.Conn, ns string) (int64, error) {
+ scoreKey := fmt.Sprintf("%s%s:%s", rds.KeyNamespacePrefix(ns), "period", "key_score")
+ return redis.Int64(conn.Do("ZSCORE", scoreKey, id))
+}
+
+// Get periodic policy object by the numeric ID
+func getPeriodicPolicy(numericID int64, conn redis.Conn, ns string) (*period.Policy, error) {
+ // close this inner connection here
+ defer func() {
+ if err := conn.Close(); err != nil {
+ logger.Errorf("close redis connection error: %s", err)
+ }
+ }()
+
+ bytes, err := redis.Values(conn.Do("ZRANGEBYSCORE", rds.KeyPeriodicPolicy(ns), numericID, numericID))
+ if err != nil {
+ return nil, err
+ }
+
+ p := &period.Policy{}
+ if len(bytes) > 0 {
+ if rawPolicy, ok := bytes[0].([]byte); ok {
+ if err = p.DeSerialize(rawPolicy); err == nil {
+ return p, nil
+ }
+ }
+ }
+
+ if err == nil {
+ err = errors.Errorf("invalid data for periodic policy %d: %#v", numericID, bytes)
+ }
+
+ return nil, err
+}
+
+// Clear the duplicated policy entries for the job "IMAGE_GC" and "IMAGE_SCAN_ALL"
+func clearDuplicatedPolicies(conn redis.Conn, ns string) error {
+ hash := make(map[string]interface{})
+
+ bytes, err := redis.Values(conn.Do("ZREVRANGE", rds.KeyPeriodicPolicy(ns), 0, -1, "WITHSCORES"))
+ if err != nil {
+ return err
+ }
+
+ count := 0
+ for i, l := 0, len(bytes); i < l; i = i + 2 {
+ rawPolicy := bytes[i].([]byte)
+ p := &period.Policy{}
+
+ if err := p.DeSerialize(rawPolicy); err != nil {
+ logger.Errorf("DeSerialize policy: %s; error: %s\n", rawPolicy, err)
+ continue
+ }
+
+ if p.JobName == job.ImageScanAllJob ||
+ p.JobName == job.ImageGC ||
+ p.JobName == job.ReplicationScheduler {
+ score, _ := strconv.ParseInt(string(bytes[i+1].([]byte)), 10, 64)
+
+ key := hashKey(p)
+ if _, exists := hash[key]; exists {
+ // Already existing, remove the duplicated one
+ res, err := redis.Int(conn.Do("ZREMRANGEBYSCORE", rds.KeyPeriodicPolicy(ns), score, score))
+ if err != nil || res == 0 {
+ logger.Errorf("Failed to clear duplicated periodic policy: %s-%s:%v", p.JobName, p.ID, score)
+ } else {
+ logger.Infof("Remove duplicated periodic policy: %s-%s:%v", p.JobName, p.ID, score)
+ count++
+ }
+ } else {
+ hash[key] = score
+ }
+ }
+ }
+
+ logger.Infof("Clear %d duplicated periodic policies", count)
+
+ return nil
+}
+
+// Remove the non-used key
+func delScoreZset(conn redis.Conn, ns string) {
+ key := fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(ns), "period:key_score")
+ reply, err := redis.Int(conn.Do("EXISTS", key))
+ if err == nil && reply == 1 {
+ reply, err = redis.Int(conn.Do("DEL", key))
+ if err == nil && reply > 0 {
+ logger.Infof("%s removed", key)
+ return // success
+ }
+ }
+
+ if err != nil {
+ // Just logged
+ logger.Errorf("Remove %s failed with error: %s", key, err)
+ }
+}
+
+func toString(v interface{}) string {
+ if v == nil {
+ return ""
+ }
+
+ if bytes, ok := v.([]byte); ok {
+ return string(bytes)
+ }
+
+ return ""
+}
+
+func toInt(v interface{}) int64 {
+ if v == nil {
+ return -1
+ }
+
+ if bytes, ok := v.([]byte); ok {
+ if intV, err := strconv.ParseInt(string(bytes), 10, 64); err == nil {
+ return intV
+ }
+ }
+
+ return -1
+}
+
+func hashKey(p *period.Policy) string {
+ key := p.JobName
+ if p.JobParameters != nil && len(p.JobParameters) > 0 {
+ if bytes, err := json.Marshal(p.JobParameters); err == nil {
+ key = fmt.Sprintf("%s:%s", key, string(bytes))
+ }
+ }
+
+ return base64.StdEncoding.EncodeToString([]byte(key))
+}
diff --git a/src/jobservice/migration/version.go b/src/jobservice/migration/version.go
new file mode 100644
index 000000000..028b9b6a9
--- /dev/null
+++ b/src/jobservice/migration/version.go
@@ -0,0 +1,31 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package migration
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/jobservice/common/rds"
+)
+
+const (
+ // SchemaVersion identifies the schema version of RDB
+ SchemaVersion = "1.8.1"
+)
+
+// VersionKey returns the key of redis schema
+func VersionKey(ns string) string {
+ return fmt.Sprintf("%s%s", rds.KeyNamespacePrefix(ns), "_schema_version")
+}
diff --git a/src/jobservice/period/enqueuer_test.go b/src/jobservice/period/enqueuer_test.go
index 5c349e2a8..5c3cc4aff 100644
--- a/src/jobservice/period/enqueuer_test.go
+++ b/src/jobservice/period/enqueuer_test.go
@@ -16,6 +16,12 @@ package period
import (
"context"
"fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/pkg/errors"
+
"github.com/goharbor/harbor/src/jobservice/common/rds"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/env"
@@ -26,9 +32,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
- "sync"
- "testing"
- "time"
)
// EnqueuerTestSuite tests functions of enqueuer
@@ -89,19 +92,30 @@ func (suite *EnqueuerTestSuite) TestEnqueuer() {
suite.enqueuer.stopChan <- true
}()
- <-time.After(1 * time.Second)
-
key := rds.RedisKeyScheduled(suite.namespace)
conn := suite.pool.Get()
defer func() {
_ = conn.Close()
}()
- count, err := redis.Int(conn.Do("ZCARD", key))
- require.Nil(suite.T(), err, "count scheduled: nil error expected but got %s", err)
- assert.Condition(suite.T(), func() bool {
- return count > 0
- }, "count of scheduled jobs should be greater than 0 but got %d", count)
+ tk := time.NewTicker(500 * time.Millisecond)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-tk.C:
+ count, err := redis.Int(conn.Do("ZCARD", key))
+ require.Nil(suite.T(), err, "count scheduled: nil error expected but got %s", err)
+ if assert.Condition(suite.T(), func() (success bool) {
+ return count > 0
+ }, "at least one job should be scheduled for the periodic job policy") {
+ return
+ }
+ case <-time.After(15 * time.Second):
+ require.NoError(suite.T(), errors.New("timeout (15s): expect at 1 scheduled job but still get nothing"))
+ return
+ }
+ }
}()
err := suite.enqueuer.start()
@@ -112,7 +126,7 @@ func (suite *EnqueuerTestSuite) prepare() {
now := time.Now()
minute := now.Minute()
- coreSpec := fmt.Sprintf("30,50 %d * * * *", minute+2)
+ coreSpec := fmt.Sprintf("0-59 %d * * * *", minute)
// Prepare one
p := &Policy{
diff --git a/src/jobservice/runtime/bootstrap.go b/src/jobservice/runtime/bootstrap.go
index 6e722bcd8..88dac6081 100644
--- a/src/jobservice/runtime/bootstrap.go
+++ b/src/jobservice/runtime/bootstrap.go
@@ -17,13 +17,14 @@ package runtime
import (
"context"
"fmt"
- "github.com/goharbor/harbor/src/jobservice/mgt"
"os"
"os/signal"
"sync"
"syscall"
"time"
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+
"github.com/goharbor/harbor/src/jobservice/api"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/config"
@@ -32,13 +33,17 @@ import (
"github.com/goharbor/harbor/src/jobservice/hook"
"github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/jobservice/job/impl/gc"
+ "github.com/goharbor/harbor/src/jobservice/job/impl/notification"
"github.com/goharbor/harbor/src/jobservice/job/impl/replication"
"github.com/goharbor/harbor/src/jobservice/job/impl/sample"
"github.com/goharbor/harbor/src/jobservice/job/impl/scan"
"github.com/goharbor/harbor/src/jobservice/lcm"
"github.com/goharbor/harbor/src/jobservice/logger"
+ "github.com/goharbor/harbor/src/jobservice/mgt"
+ "github.com/goharbor/harbor/src/jobservice/migration"
"github.com/goharbor/harbor/src/jobservice/worker"
"github.com/goharbor/harbor/src/jobservice/worker/cworker"
+ "github.com/goharbor/harbor/src/pkg/retention"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
)
@@ -97,6 +102,14 @@ func (bs *Bootstrap) LoadAndRun(ctx context.Context, cancel context.CancelFunc)
// Get redis connection pool
redisPool := bs.getRedisPool(cfg.PoolConfig.RedisPoolCfg.RedisURL)
+ // Do data migration if necessary
+ rdbMigrator := migration.New(redisPool, namespace)
+ rdbMigrator.Register(migration.PolicyMigratorFactory)
+ if err := rdbMigrator.Migrate(); err != nil {
+ // Just logged, should not block the starting process
+ logger.Error(err)
+ }
+
// Create stats manager
manager = mgt.NewManager(ctx, namespace, redisPool)
// Create hook agent, it's a singleton object
@@ -229,11 +242,14 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool(
// Only for debugging and testing purpose
job.SampleJob: (*sample.Job)(nil),
// Functional jobs
- job.ImageScanJob: (*scan.ClairJob)(nil),
- job.ImageScanAllJob: (*scan.All)(nil),
- job.ImageGC: (*gc.GarbageCollector)(nil),
- job.Replication: (*replication.Replication)(nil),
- job.ReplicationScheduler: (*replication.Scheduler)(nil),
+ job.ImageScanJob: (*scan.ClairJob)(nil),
+ job.ImageScanAllJob: (*scan.All)(nil),
+ job.ImageGC: (*gc.GarbageCollector)(nil),
+ job.Replication: (*replication.Replication)(nil),
+ job.ReplicationScheduler: (*replication.Scheduler)(nil),
+ job.Retention: (*retention.Job)(nil),
+ scheduler.JobNameScheduler: (*scheduler.PeriodicJob)(nil),
+ job.WebhookJob: (*notification.WebhookJob)(nil),
}); err != nil {
// exit
return nil, err
@@ -249,9 +265,8 @@ func (bs *Bootstrap) loadAndRunRedisWorkerPool(
// Get a redis connection pool
func (bs *Bootstrap) getRedisPool(redisURL string) *redis.Pool {
return &redis.Pool{
- MaxActive: 6,
- MaxIdle: 6,
- Wait: true,
+ MaxIdle: 6,
+ Wait: true,
Dial: func() (redis.Conn, error) {
return redis.DialURL(
redisURL,
diff --git a/src/jobservice/runtime/bootstrap_test.go b/src/jobservice/runtime/bootstrap_test.go
index 74bfa9a59..257e58479 100644
--- a/src/jobservice/runtime/bootstrap_test.go
+++ b/src/jobservice/runtime/bootstrap_test.go
@@ -16,6 +16,7 @@ package runtime
import (
"context"
+ "fmt"
"github.com/goharbor/harbor/src/jobservice/common/utils"
"github.com/goharbor/harbor/src/jobservice/config"
"github.com/goharbor/harbor/src/jobservice/logger"
@@ -56,15 +57,16 @@ func (suite *BootStrapTestSuite) SetupSuite() {
// TearDownSuite clears the test suite
func (suite *BootStrapTestSuite) TearDownSuite() {
- suite.cancel()
-
pool := tests.GiveMeRedisPool()
conn := pool.Get()
defer func() {
_ = conn.Close()
}()
- _ = tests.ClearAll(tests.GiveMeTestNamespace(), conn)
+ err := tests.ClearAll(fmt.Sprintf("{%s}", tests.GiveMeTestNamespace()), conn)
+ require.NoError(suite.T(), err, "clear rdb error")
+
+ suite.cancel()
}
// TestBootStrapTestSuite is entry of go test
diff --git a/src/jobservice/worker/cworker/c_worker.go b/src/jobservice/worker/cworker/c_worker.go
index e9cafe0c1..6de36856f 100644
--- a/src/jobservice/worker/cworker/c_worker.go
+++ b/src/jobservice/worker/cworker/c_worker.go
@@ -17,6 +17,7 @@ package cworker
import (
"fmt"
"reflect"
+ "sync"
"time"
"github.com/gocraft/work"
@@ -30,7 +31,6 @@ import (
"github.com/goharbor/harbor/src/jobservice/worker"
"github.com/gomodule/redigo/redis"
"github.com/pkg/errors"
- "sync"
)
var (
@@ -66,7 +66,10 @@ type workerContext struct{}
// log the job
func (rpc *workerContext) logJob(job *work.Job, next work.NextMiddlewareFunc) error {
- jobInfo, _ := utils.SerializeJob(job)
+ jobCopy := *job
+ // as the args may contain sensitive information, ignore them when logging the detail
+ jobCopy.Args = nil
+ jobInfo, _ := utils.SerializeJob(&jobCopy)
logger.Infof("Job incoming: %s", jobInfo)
return next()
diff --git a/src/jobservice/worker/cworker/c_worker_test.go b/src/jobservice/worker/cworker/c_worker_test.go
index e80810dc4..be7931c17 100644
--- a/src/jobservice/worker/cworker/c_worker_test.go
+++ b/src/jobservice/worker/cworker/c_worker_test.go
@@ -188,11 +188,23 @@ func (suite *CWorkerTestSuite) TestStopJob() {
t, err := suite.lcmCtl.New(genericJob)
require.NoError(suite.T(), err, "new job stats: nil error expected but got %s", err)
- time.Sleep(3 * time.Second)
+ tk := time.NewTicker(500 * time.Millisecond)
+ defer tk.Stop()
- latest, err := t.Status()
- require.NoError(suite.T(), err, "get latest status: nil error expected but got %s", err)
- assert.EqualValues(suite.T(), job.RunningStatus, latest, "expect job is running now")
+LOOP:
+ for {
+ select {
+ case <-tk.C:
+ latest, err := t.Status()
+ require.NoError(suite.T(), err, "get latest status: nil error expected but got %s", err)
+ if latest.Compare(job.RunningStatus) == 0 {
+ break LOOP
+ }
+ case <-time.After(30 * time.Second):
+ require.NoError(suite.T(), errors.New("check running status time out"))
+ break LOOP
+ }
+ }
err = suite.cWorker.StopJob(genericJob.Info.JobID)
require.NoError(suite.T(), err, "stop job: nil error expected but got %s", err)
@@ -255,7 +267,7 @@ func (j *fakeLongRunJob) Validate(params job.Parameters) error {
}
func (j *fakeLongRunJob) Run(ctx job.Context, params job.Parameters) error {
- time.Sleep(5 * time.Second)
+ time.Sleep(3 * time.Second)
if _, stopped := ctx.OPCommand(); stopped {
return nil
diff --git a/src/pkg/authproxy/http.go b/src/pkg/authproxy/http.go
new file mode 100644
index 000000000..baeed17cf
--- /dev/null
+++ b/src/pkg/authproxy/http.go
@@ -0,0 +1,65 @@
+package authproxy
+
+import (
+ "encoding/json"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ k8s_api_v1beta1 "k8s.io/api/authentication/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+)
+
+// TokenReview ...
+func TokenReview(sessionID string, authProxyConfig *models.HTTPAuthProxy) (*k8s_api_v1beta1.TokenReview, error) {
+
+ // Init auth client with the auth proxy endpoint.
+ authClientCfg := &rest.Config{
+ Host: authProxyConfig.TokenReviewEndpoint,
+ ContentConfig: rest.ContentConfig{
+ GroupVersion: &schema.GroupVersion{},
+ NegotiatedSerializer: serializer.DirectCodecFactory{CodecFactory: scheme.Codecs},
+ },
+ BearerToken: sessionID,
+ TLSClientConfig: rest.TLSClientConfig{
+ Insecure: !authProxyConfig.VerifyCert,
+ },
+ }
+ authClient, err := rest.RESTClientFor(authClientCfg)
+ if err != nil {
+ return nil, err
+ }
+
+ // Do auth with the token.
+ tokenReviewRequest := &k8s_api_v1beta1.TokenReview{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "TokenReview",
+ APIVersion: "authentication.k8s.io/v1beta1",
+ },
+ Spec: k8s_api_v1beta1.TokenReviewSpec{
+ Token: sessionID,
+ },
+ }
+ res := authClient.Post().Body(tokenReviewRequest).Do()
+ err = res.Error()
+ if err != nil {
+ log.Errorf("fail to POST auth request, %v", err)
+ return nil, err
+ }
+ resRaw, err := res.Raw()
+ if err != nil {
+ log.Errorf("fail to get raw data of token review, %v", err)
+ return nil, err
+ }
+ // Parse the auth response, check the user name and authenticated status.
+ tokenReviewResponse := &k8s_api_v1beta1.TokenReview{}
+ err = json.Unmarshal(resRaw, &tokenReviewResponse)
+ if err != nil {
+ log.Errorf("fail to decode token review, %v", err)
+ return nil, err
+ }
+ return tokenReviewResponse, nil
+
+}
diff --git a/src/pkg/clients/core/chart.go b/src/pkg/clients/core/chart.go
new file mode 100644
index 000000000..75d8c3983
--- /dev/null
+++ b/src/pkg/clients/core/chart.go
@@ -0,0 +1,40 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/chartserver"
+)
+
+func (c *client) ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error) {
+ url := c.buildURL(fmt.Sprintf("/api/chartrepo/%s/charts/%s", project, repository))
+ var charts []*chartserver.ChartVersion
+ if err := c.httpclient.Get(url, &charts); err != nil {
+ return nil, err
+ }
+ return charts, nil
+}
+
+func (c *client) DeleteChart(project, repository, version string) error {
+ url := c.buildURL(fmt.Sprintf("/api/chartrepo/%s/charts/%s/%s", project, repository, version))
+ return c.httpclient.Delete(url)
+}
+
+func (c *client) DeleteChartRepository(project, repository string) error {
+ url := c.buildURL(fmt.Sprintf("/api/chartrepo/%s/charts/%s", project, repository))
+ return c.httpclient.Delete(url)
+}
diff --git a/src/pkg/clients/core/client.go b/src/pkg/clients/core/client.go
new file mode 100644
index 000000000..2234fd17c
--- /dev/null
+++ b/src/pkg/clients/core/client.go
@@ -0,0 +1,65 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/models"
+
+ "github.com/goharbor/harbor/src/chartserver"
+ chttp "github.com/goharbor/harbor/src/common/http"
+ "github.com/goharbor/harbor/src/common/http/modifier"
+)
+
+// Client defines the methods that a core client should implement
+// Currently, it contains only part of the whole method collection
+// and we should expand it when needed
+type Client interface {
+ ImageClient
+ ChartClient
+}
+
+// ImageClient defines the methods that an image client should implement
+type ImageClient interface {
+ ListAllImages(project, repository string) ([]*models.TagResp, error)
+ DeleteImage(project, repository, tag string) error
+ DeleteImageRepository(project, repository string) error
+}
+
+// ChartClient defines the methods that a chart client should implement
+type ChartClient interface {
+ ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error)
+ DeleteChart(project, repository, version string) error
+ DeleteChartRepository(project, repository string) error
+}
+
+// New returns an instance of the client which is a default implement for Client
+func New(url string, httpclient *http.Client, authorizer modifier.Modifier) Client {
+ return &client{
+ url: url,
+ httpclient: chttp.NewClient(httpclient, authorizer),
+ }
+}
+
+type client struct {
+ url string
+ httpclient *chttp.Client
+}
+
+func (c *client) buildURL(path string) string {
+ return fmt.Sprintf("%s/%s", c.url, path)
+}
diff --git a/src/pkg/clients/core/image.go b/src/pkg/clients/core/image.go
new file mode 100644
index 000000000..1b8811790
--- /dev/null
+++ b/src/pkg/clients/core/image.go
@@ -0,0 +1,40 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package core
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+func (c *client) ListAllImages(project, repository string) ([]*models.TagResp, error) {
+ url := c.buildURL(fmt.Sprintf("/api/repositories/%s/%s/tags", project, repository))
+ var images []*models.TagResp
+ if err := c.httpclient.GetAndIteratePagination(url, &images); err != nil {
+ return nil, err
+ }
+ return images, nil
+}
+
+func (c *client) DeleteImage(project, repository, tag string) error {
+ url := c.buildURL(fmt.Sprintf("/api/repositories/%s/%s/tags/%s", project, repository, tag))
+ return c.httpclient.Delete(url)
+}
+
+func (c *client) DeleteImageRepository(project, repository string) error {
+ url := c.buildURL(fmt.Sprintf("/api/repositories/%s/%s", project, repository))
+ return c.httpclient.Delete(url)
+}
diff --git a/src/pkg/notification/hook/hook.go b/src/pkg/notification/hook/hook.go
new file mode 100755
index 000000000..8524a0e0e
--- /dev/null
+++ b/src/pkg/notification/hook/hook.go
@@ -0,0 +1,85 @@
+package hook
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ cJob "github.com/goharbor/harbor/src/common/job"
+ "github.com/goharbor/harbor/src/common/job/models"
+ cModels "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/goharbor/harbor/src/core/utils"
+ "github.com/goharbor/harbor/src/pkg/notification/job"
+ "github.com/goharbor/harbor/src/pkg/notification/job/manager"
+)
+
+// Manager send hook
+type Manager interface {
+ StartHook(*model.HookEvent, *models.JobData) error
+}
+
+// DefaultManager ...
+type DefaultManager struct {
+ jobMgr job.Manager
+ client cJob.Client
+}
+
+// NewHookManager ...
+func NewHookManager() *DefaultManager {
+ return &DefaultManager{
+ jobMgr: manager.NewDefaultManager(),
+ client: utils.GetJobServiceClient(),
+ }
+}
+
+// StartHook create a notification job record in database, and submit it to jobservice
+func (hm *DefaultManager) StartHook(event *model.HookEvent, data *models.JobData) error {
+ payload, err := json.Marshal(event.Payload)
+ if err != nil {
+ return err
+ }
+
+ t := time.Now()
+ id, err := hm.jobMgr.Create(&cModels.NotificationJob{
+ PolicyID: event.PolicyID,
+ EventType: event.EventType,
+ NotifyType: event.Target.Type,
+ Status: cModels.JobPending,
+ CreationTime: t,
+ UpdateTime: t,
+ JobDetail: string(payload),
+ })
+ if err != nil {
+ return fmt.Errorf("failed to create the job record for notification based on policy %d: %v", event.PolicyID, err)
+ }
+ statusHookURL := fmt.Sprintf("%s/service/notifications/jobs/webhook/%d", config.InternalCoreURL(), id)
+ data.StatusHook = statusHookURL
+
+ log.Debugf("created a notification job %d for the policy %d", id, event.PolicyID)
+
+ // submit hook job to jobservice
+ jobUUID, err := hm.client.SubmitJob(data)
+ if err != nil {
+ log.Errorf("failed to submit job with notification event: %v", err)
+ e := hm.jobMgr.Update(&cModels.NotificationJob{
+ ID: id,
+ Status: cModels.JobError,
+ }, "Status")
+ if e != nil {
+ log.Errorf("failed to update the notification job status %d: %v", id, e)
+ }
+ return err
+ }
+
+ if err = hm.jobMgr.Update(&cModels.NotificationJob{
+ ID: id,
+ UUID: jobUUID,
+ }, "UUID"); err != nil {
+ log.Errorf("failed to update the notification job %d: %v", id, err)
+ return err
+ }
+ return nil
+}
diff --git a/src/pkg/notification/job/manager.go b/src/pkg/notification/job/manager.go
new file mode 100755
index 000000000..da8ac8027
--- /dev/null
+++ b/src/pkg/notification/job/manager.go
@@ -0,0 +1,20 @@
+package job
+
+import (
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// Manager manages notification jobs recorded in database
+type Manager interface {
+ // Create create a notification job
+ Create(job *models.NotificationJob) (int64, error)
+
+ // List list notification jobs
+ List(...*models.NotificationJobQuery) (int64, []*models.NotificationJob, error)
+
+ // Update update notification job
+ Update(job *models.NotificationJob, props ...string) error
+
+ // ListJobsGroupByEventType lists last triggered jobs group by event type
+ ListJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error)
+}
diff --git a/src/pkg/notification/job/manager/manager.go b/src/pkg/notification/job/manager/manager.go
new file mode 100755
index 000000000..8db3aecd6
--- /dev/null
+++ b/src/pkg/notification/job/manager/manager.go
@@ -0,0 +1,55 @@
+package manager
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/dao/notification"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/pkg/notification/job"
+)
+
+// DefaultManager ..
+type DefaultManager struct {
+}
+
+// NewDefaultManager ...
+func NewDefaultManager() job.Manager {
+ return &DefaultManager{}
+}
+
+// Create ...
+func (d *DefaultManager) Create(job *models.NotificationJob) (int64, error) {
+ return notification.AddNotificationJob(job)
+}
+
+// List ...
+func (d *DefaultManager) List(query ...*models.NotificationJobQuery) (int64, []*models.NotificationJob, error) {
+ total, err := notification.GetTotalCountOfNotificationJobs(query...)
+ if err != nil {
+ return 0, nil, err
+ }
+
+ executions, err := notification.GetNotificationJobs(query...)
+ if err != nil {
+ return 0, nil, err
+ }
+ return total, executions, nil
+}
+
+// Update ...
+func (d *DefaultManager) Update(job *models.NotificationJob, props ...string) error {
+ n, err := notification.UpdateNotificationJob(job, props...)
+ if err != nil {
+ return err
+ }
+
+ if n == 0 {
+ return fmt.Errorf("execution %d not found", job.ID)
+ }
+ return nil
+}
+
+// ListJobsGroupByEventType lists last triggered jobs group by event type
+func (d *DefaultManager) ListJobsGroupByEventType(policyID int64) ([]*models.NotificationJob, error) {
+ return notification.GetLastTriggerJobsGroupByEventType(policyID)
+}
diff --git a/src/pkg/notification/job/manager/manager_test.go b/src/pkg/notification/job/manager/manager_test.go
new file mode 100644
index 000000000..a373f618b
--- /dev/null
+++ b/src/pkg/notification/job/manager/manager_test.go
@@ -0,0 +1,22 @@
+package manager
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestNewDefaultManger(t *testing.T) {
+ tests := []struct {
+ name string
+ want *DefaultManager
+ }{
+ {want: &DefaultManager{}},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := NewDefaultManager(); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("NewDefaultManager() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/pkg/notification/model/const.go b/src/pkg/notification/model/const.go
new file mode 100644
index 000000000..51b8288ee
--- /dev/null
+++ b/src/pkg/notification/model/const.go
@@ -0,0 +1,16 @@
+package model
+
+// const definitions
+const (
+ EventTypePushImage = "pushImage"
+ EventTypePullImage = "pullImage"
+ EventTypeDeleteImage = "deleteImage"
+ EventTypeUploadChart = "uploadChart"
+ EventTypeDeleteChart = "deleteChart"
+ EventTypeDownloadChart = "downloadChart"
+ EventTypeScanningCompleted = "scanningCompleted"
+ EventTypeScanningFailed = "scanningFailed"
+ EventTypeTestEndpoint = "testEndpoint"
+
+ NotifyTypeHTTP = "http"
+)
diff --git a/src/pkg/notification/notification.go b/src/pkg/notification/notification.go
new file mode 100755
index 000000000..4de7479d1
--- /dev/null
+++ b/src/pkg/notification/notification.go
@@ -0,0 +1,63 @@
+package notification
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/notification/hook"
+ "github.com/goharbor/harbor/src/pkg/notification/job"
+ jobMgr "github.com/goharbor/harbor/src/pkg/notification/job/manager"
+ "github.com/goharbor/harbor/src/pkg/notification/model"
+ "github.com/goharbor/harbor/src/pkg/notification/policy"
+ "github.com/goharbor/harbor/src/pkg/notification/policy/manager"
+)
+
+var (
+ // PolicyMgr is a global notification policy manager
+ PolicyMgr policy.Manager
+
+ // JobMgr is a notification job controller
+ JobMgr job.Manager
+
+ // HookManager is a hook manager
+ HookManager hook.Manager
+
+ // SupportedEventTypes is a map to store supported event type, eg. pushImage, pullImage etc
+ SupportedEventTypes map[string]struct{}
+
+ // SupportedNotifyTypes is a map to store notification type, eg. HTTP, Email etc
+ SupportedNotifyTypes map[string]struct{}
+)
+
+// Init ...
+func Init() {
+ // init notification policy manager
+ PolicyMgr = manager.NewDefaultManger()
+ // init hook manager
+ HookManager = hook.NewHookManager()
+ // init notification job manager
+ JobMgr = jobMgr.NewDefaultManager()
+
+ SupportedEventTypes = make(map[string]struct{})
+ SupportedNotifyTypes = make(map[string]struct{})
+
+ initSupportedEventType(
+ model.EventTypePushImage, model.EventTypePullImage, model.EventTypeDeleteImage,
+ model.EventTypeUploadChart, model.EventTypeDeleteChart, model.EventTypeDownloadChart,
+ model.EventTypeScanningCompleted, model.EventTypeScanningFailed,
+ )
+
+ initSupportedNotifyType(model.NotifyTypeHTTP)
+
+ log.Info("notification initialization completed")
+}
+
+func initSupportedEventType(eventTypes ...string) {
+ for _, eventType := range eventTypes {
+ SupportedEventTypes[eventType] = struct{}{}
+ }
+}
+
+func initSupportedNotifyType(notifyTypes ...string) {
+ for _, notifyType := range notifyTypes {
+ SupportedNotifyTypes[notifyType] = struct{}{}
+ }
+}
diff --git a/src/pkg/notification/policy/manager.go b/src/pkg/notification/policy/manager.go
new file mode 100755
index 000000000..d08ffc3bd
--- /dev/null
+++ b/src/pkg/notification/policy/manager.go
@@ -0,0 +1,25 @@
+package policy
+
+import (
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// Manager manages the notification policies
+type Manager interface {
+ // Create new policy
+ Create(*models.NotificationPolicy) (int64, error)
+ // List the policies, returns the policy list and error
+ List(int64) ([]*models.NotificationPolicy, error)
+ // Get policy with specified ID
+ Get(int64) (*models.NotificationPolicy, error)
+ // GetByNameAndProjectID get policy by the name and projectID
+ GetByNameAndProjectID(string, int64) (*models.NotificationPolicy, error)
+ // Update the specified policy
+ Update(*models.NotificationPolicy) error
+ // Delete the specified policy
+ Delete(int64) error
+ // Test the specified policy
+ Test(*models.NotificationPolicy) error
+ // GetRelatedPolices get event type related policies in project
+ GetRelatedPolices(int64, string) ([]*models.NotificationPolicy, error)
+}
diff --git a/src/pkg/notification/policy/manager/manager.go b/src/pkg/notification/policy/manager/manager.go
new file mode 100755
index 000000000..c4f6681c2
--- /dev/null
+++ b/src/pkg/notification/policy/manager/manager.go
@@ -0,0 +1,159 @@
+package manager
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/dao/notification"
+ commonhttp "github.com/goharbor/harbor/src/common/http"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ notifierModel "github.com/goharbor/harbor/src/core/notifier/model"
+ "github.com/goharbor/harbor/src/pkg/notification/model"
+)
+
+// DefaultManager ...
+type DefaultManager struct {
+}
+
+// NewDefaultManger ...
+func NewDefaultManger() *DefaultManager {
+ return &DefaultManager{}
+}
+
+// Create notification policy
+func (m *DefaultManager) Create(policy *models.NotificationPolicy) (int64, error) {
+ t := time.Now()
+ policy.CreationTime = t
+ policy.UpdateTime = t
+
+ err := policy.ConvertToDBModel()
+ if err != nil {
+ return 0, err
+ }
+ return notification.AddNotificationPolicy(policy)
+}
+
+// List the notification policies, returns the policy list and error
+func (m *DefaultManager) List(projectID int64) ([]*models.NotificationPolicy, error) {
+ policies := []*models.NotificationPolicy{}
+ persisPolicies, err := notification.GetNotificationPolicies(projectID)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, policy := range persisPolicies {
+ err := policy.ConvertFromDBModel()
+ if err != nil {
+ return nil, err
+ }
+ policies = append(policies, policy)
+ }
+
+ return policies, nil
+}
+
+// Get notification policy with specified ID
+func (m *DefaultManager) Get(id int64) (*models.NotificationPolicy, error) {
+ policy, err := notification.GetNotificationPolicy(id)
+ if err != nil {
+ return nil, err
+ }
+ if policy == nil {
+ return nil, nil
+ }
+ err = policy.ConvertFromDBModel()
+ return policy, err
+}
+
+// GetByNameAndProjectID notification policy by the name and projectID
+func (m *DefaultManager) GetByNameAndProjectID(name string, projectID int64) (*models.NotificationPolicy, error) {
+ policy, err := notification.GetNotificationPolicyByName(name, projectID)
+ if err != nil {
+ return nil, err
+ }
+ err = policy.ConvertFromDBModel()
+ return policy, err
+}
+
+// Update the specified notification policy
+func (m *DefaultManager) Update(policy *models.NotificationPolicy) error {
+ policy.UpdateTime = time.Now()
+ err := policy.ConvertToDBModel()
+ if err != nil {
+ return err
+ }
+ return notification.UpdateNotificationPolicy(policy)
+}
+
+// Delete the specified notification policy
+func (m *DefaultManager) Delete(policyID int64) error {
+ return notification.DeleteNotificationPolicy(policyID)
+}
+
+// Test the specified notification policy, just test for network connection without request body
+func (m *DefaultManager) Test(policy *models.NotificationPolicy) error {
+ p, err := json.Marshal(notifierModel.Payload{
+ Type: model.EventTypeTestEndpoint,
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, target := range policy.Targets {
+ switch target.Type {
+ case "http":
+ return m.policyHTTPTest(target.Address, target.SkipCertVerify, p)
+ default:
+ return fmt.Errorf("invalid policy target type: %s", target.Type)
+ }
+ }
+ return nil
+}
+
+func (m *DefaultManager) policyHTTPTest(address string, skipCertVerify bool, p []byte) error {
+ req, err := http.NewRequest(http.MethodPost, address, nil)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+
+ client := http.Client{
+ Transport: commonhttp.GetHTTPTransport(skipCertVerify),
+ }
+
+ resp, err := client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ log.Debugf("policy test success with address %s, skip cert verify :%v", address, skipCertVerify)
+
+ return nil
+}
+
+// GetRelatedPolices get policies including event type in project
+func (m *DefaultManager) GetRelatedPolices(projectID int64, eventType string) ([]*models.NotificationPolicy, error) {
+ policies, err := m.List(projectID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get notification policies with projectID %d: %v", projectID, err)
+ }
+
+ var result []*models.NotificationPolicy
+
+ for _, ply := range policies {
+ if !ply.Enabled {
+ continue
+ }
+ for _, t := range ply.EventTypes {
+ if t != eventType {
+ continue
+ }
+ result = append(result, ply)
+ }
+ }
+ return result, nil
+}
diff --git a/src/pkg/notification/policy/manager/manager_test.go b/src/pkg/notification/policy/manager/manager_test.go
new file mode 100644
index 000000000..9dfd6970f
--- /dev/null
+++ b/src/pkg/notification/policy/manager/manager_test.go
@@ -0,0 +1,22 @@
+package manager
+
+import (
+ "reflect"
+ "testing"
+)
+
+func TestNewDefaultManger(t *testing.T) {
+ tests := []struct {
+ name string
+ want *DefaultManager
+ }{
+ {want: &DefaultManager{}},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := NewDefaultManger(); !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("NewDefaultManger() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/pkg/project/manager.go b/src/pkg/project/manager.go
new file mode 100644
index 000000000..f4d5a8910
--- /dev/null
+++ b/src/pkg/project/manager.go
@@ -0,0 +1,61 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package project
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// Manager is used for project management
+// currently, the interface only defines the methods needed for tag retention
+// will expand it when doing refactor
+type Manager interface {
+ // List projects according to the query
+ List(...*models.ProjectQueryParam) ([]*models.Project, error)
+ // Get the project specified by the ID or name
+ Get(interface{}) (*models.Project, error)
+}
+
+// New returns a default implementation of Manager
+func New() Manager {
+ return &manager{}
+}
+
+type manager struct{}
+
+// List projects according to the query
+func (m *manager) List(query ...*models.ProjectQueryParam) ([]*models.Project, error) {
+ var q *models.ProjectQueryParam
+ if len(query) > 0 {
+ q = query[0]
+ }
+ return dao.GetProjects(q)
+}
+
+// Get the project specified by the ID
+func (m *manager) Get(idOrName interface{}) (*models.Project, error) {
+ id, ok := idOrName.(int64)
+ if ok {
+ return dao.GetProjectByID(id)
+ }
+ name, ok := idOrName.(string)
+ if ok {
+ return dao.GetProjectByName(name)
+ }
+ return nil, fmt.Errorf("invalid parameter: %v, should be ID(int64) or name(string)", idOrName)
+}
diff --git a/src/pkg/repository/manager.go b/src/pkg/repository/manager.go
new file mode 100644
index 000000000..3631baac3
--- /dev/null
+++ b/src/pkg/repository/manager.go
@@ -0,0 +1,61 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package repository
+
+import (
+ "github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/pkg/project"
+)
+
+// Manager is used for repository management
+// currently, the interface only defines the methods needed for tag retention
+// will expand it when doing refactor
+type Manager interface {
+ // List image repositories under the project specified by the ID
+ ListImageRepositories(projectID int64) ([]*models.RepoRecord, error)
+ // List chart repositories under the project specified by the ID
+ ListChartRepositories(projectID int64) ([]*chartserver.ChartInfo, error)
+}
+
+// New returns a default implementation of Manager
+func New(projectMgr project.Manager, chartCtl *chartserver.Controller) Manager {
+ return &manager{
+ projectMgr: projectMgr,
+ chartCtl: chartCtl,
+ }
+}
+
+type manager struct {
+ projectMgr project.Manager
+ chartCtl *chartserver.Controller
+}
+
+// List image repositories under the project specified by the ID
+func (m *manager) ListImageRepositories(projectID int64) ([]*models.RepoRecord, error) {
+ return dao.GetRepositories(&models.RepositoryQuery{
+ ProjectIDs: []int64{projectID},
+ })
+}
+
+// List chart repositories under the project specified by the ID
+func (m *manager) ListChartRepositories(projectID int64) ([]*chartserver.ChartInfo, error) {
+ project, err := m.projectMgr.Get(projectID)
+ if err != nil {
+ return nil, err
+ }
+ return m.chartCtl.ListCharts(project.Name)
+}
diff --git a/src/pkg/retention/controller.go b/src/pkg/retention/controller.go
new file mode 100644
index 000000000..d12ef3996
--- /dev/null
+++ b/src/pkg/retention/controller.go
@@ -0,0 +1,280 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/project"
+ "github.com/goharbor/harbor/src/pkg/repository"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+)
+
+// APIController to handle the requests related with retention
+type APIController interface {
+ // Handle the related hooks from the job service and launch the corresponding actions if needed
+ //
+ // Arguments:
+ // PolicyID string : uuid of the retention policy
+ // event *job.StatusChange : event object sent by job service
+ //
+ // Returns:
+ // common error object if any errors occurred
+ HandleHook(policyID string, event *job.StatusChange) error
+
+ GetRetention(id int64) (*policy.Metadata, error)
+
+ CreateRetention(p *policy.Metadata) (int64, error)
+
+ UpdateRetention(p *policy.Metadata) error
+
+ DeleteRetention(id int64) error
+
+ TriggerRetentionExec(policyID int64, trigger string, dryRun bool) (int64, error)
+
+ OperateRetentionExec(eid int64, action string) error
+
+ GetRetentionExec(eid int64) (*Execution, error)
+
+ ListRetentionExecs(policyID int64, query *q.Query) ([]*Execution, error)
+
+ GetTotalOfRetentionExecs(policyID int64) (int64, error)
+
+ ListRetentionExecTasks(executionID int64, query *q.Query) ([]*Task, error)
+
+ GetTotalOfRetentionExecTasks(executionID int64) (int64, error)
+
+ GetRetentionExecTaskLog(taskID int64) ([]byte, error)
+}
+
+// DefaultAPIController ...
+type DefaultAPIController struct {
+ manager Manager
+ launcher Launcher
+ projectManager project.Manager
+ repositoryMgr repository.Manager
+ scheduler scheduler.Scheduler
+}
+
+const (
+ // SchedulerCallback ...
+ SchedulerCallback = "SchedulerCallback"
+)
+
+// TriggerParam ...
+type TriggerParam struct {
+ PolicyID int64
+ Trigger string
+}
+
+// GetRetention Get Retention
+func (r *DefaultAPIController) GetRetention(id int64) (*policy.Metadata, error) {
+ return r.manager.GetPolicy(id)
+}
+
+// CreateRetention Create Retention
+func (r *DefaultAPIController) CreateRetention(p *policy.Metadata) (int64, error) {
+ if p.Trigger.Kind == policy.TriggerKindSchedule {
+ cron, ok := p.Trigger.Settings[policy.TriggerSettingsCron]
+ if ok && len(cron.(string)) > 0 {
+ jobid, err := r.scheduler.Schedule(cron.(string), SchedulerCallback, TriggerParam{
+ PolicyID: p.ID,
+ Trigger: ExecutionTriggerSchedule,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if p.Trigger.References == nil {
+ p.Trigger.References = map[string]interface{}{}
+ }
+ p.Trigger.References[policy.TriggerReferencesJobid] = jobid
+ }
+ }
+ id, err := r.manager.CreatePolicy(p)
+ if err != nil {
+ return 0, err
+ }
+ return id, nil
+}
+
+// UpdateRetention Update Retention
+func (r *DefaultAPIController) UpdateRetention(p *policy.Metadata) error {
+ p0, err := r.manager.GetPolicy(p.ID)
+ if err != nil {
+ return err
+ }
+ needUn := false
+ needSch := false
+
+ if p0.Trigger.Kind != p.Trigger.Kind {
+ if p0.Trigger.Kind == policy.TriggerKindSchedule {
+ needUn = true
+ }
+
+ if p.Trigger.Kind == policy.TriggerKindSchedule {
+ needSch = true
+ }
+ } else {
+ switch p.Trigger.Kind {
+ case policy.TriggerKindSchedule:
+ if p0.Trigger.Settings["cron"] != p.Trigger.Settings["cron"] {
+ // unschedule old
+ if len(p0.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 {
+ needUn = true
+ }
+ // schedule new
+ if len(p.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 {
+ // valid cron
+ needSch = true
+ }
+ }
+ case "":
+
+ default:
+ return fmt.Errorf("not support Trigger %s", p.Trigger.Kind)
+ }
+ }
+ if needUn {
+ err = r.scheduler.UnSchedule(p0.Trigger.References[policy.TriggerReferencesJobid].(int64))
+ if err != nil {
+ return err
+ }
+ }
+ if needSch {
+ jobid, err := r.scheduler.Schedule(p.Trigger.Settings[policy.TriggerSettingsCron].(string), SchedulerCallback, TriggerParam{
+ PolicyID: p.ID,
+ Trigger: ExecutionTriggerSchedule,
+ })
+ if err != nil {
+ return err
+ }
+ p.Trigger.References[policy.TriggerReferencesJobid] = jobid
+ }
+
+ return r.manager.UpdatePolicy(p)
+}
+
+// DeleteRetention Delete Retention
+func (r *DefaultAPIController) DeleteRetention(id int64) error {
+ p, err := r.manager.GetPolicy(id)
+ if err != nil {
+ return err
+ }
+ if p.Trigger.Kind == policy.TriggerKindSchedule && len(p.Trigger.Settings[policy.TriggerSettingsCron].(string)) > 0 {
+ err = r.scheduler.UnSchedule(p.Trigger.References[policy.TriggerReferencesJobid].(int64))
+ if err != nil {
+ return err
+ }
+ }
+
+ return r.manager.DeletePolicyAndExec(id)
+}
+
+// TriggerRetentionExec Trigger Retention Execution
+func (r *DefaultAPIController) TriggerRetentionExec(policyID int64, trigger string, dryRun bool) (int64, error) {
+ p, err := r.manager.GetPolicy(policyID)
+ if err != nil {
+ return 0, err
+ }
+
+ exec := &Execution{
+ PolicyID: policyID,
+ StartTime: time.Now(),
+ Trigger: trigger,
+ DryRun: dryRun,
+ }
+ id, err := r.manager.CreateExecution(exec)
+ if _, err = r.launcher.Launch(p, id, dryRun); err != nil {
+ // clean execution if launch failed
+ _ = r.manager.DeleteExecution(id)
+ return 0, err
+ }
+ return id, err
+
+}
+
+// OperateRetentionExec Operate Retention Execution
+func (r *DefaultAPIController) OperateRetentionExec(eid int64, action string) error {
+ e, err := r.manager.GetExecution(eid)
+ if err != nil {
+ return err
+ }
+ if e == nil {
+ return fmt.Errorf("execution %d not found", eid)
+ }
+ switch action {
+ case "stop":
+ return r.launcher.Stop(eid)
+ default:
+ return fmt.Errorf("not support action %s", action)
+ }
+}
+
+// GetRetentionExec Get Retention Execution
+func (r *DefaultAPIController) GetRetentionExec(executionID int64) (*Execution, error) {
+ return r.manager.GetExecution(executionID)
+}
+
+// ListRetentionExecs List Retention Executions
+func (r *DefaultAPIController) ListRetentionExecs(policyID int64, query *q.Query) ([]*Execution, error) {
+ return r.manager.ListExecutions(policyID, query)
+}
+
+// GetTotalOfRetentionExecs Count Retention Executions
+func (r *DefaultAPIController) GetTotalOfRetentionExecs(policyID int64) (int64, error) {
+ return r.manager.GetTotalOfRetentionExecs(policyID)
+}
+
+// ListRetentionExecTasks List Retention Execution Histories
+func (r *DefaultAPIController) ListRetentionExecTasks(executionID int64, query *q.Query) ([]*Task, error) {
+ q1 := &q.TaskQuery{
+ ExecutionID: executionID,
+ }
+ if query != nil {
+ q1.PageSize = query.PageSize
+ q1.PageNumber = query.PageNumber
+ }
+ return r.manager.ListTasks(q1)
+}
+
+// GetTotalOfRetentionExecTasks Count Retention Execution Histories
+func (r *DefaultAPIController) GetTotalOfRetentionExecTasks(executionID int64) (int64, error) {
+ return r.manager.GetTotalOfTasks(executionID)
+}
+
+// GetRetentionExecTaskLog Get Retention Execution Task Log
+func (r *DefaultAPIController) GetRetentionExecTaskLog(taskID int64) ([]byte, error) {
+ return r.manager.GetTaskLog(taskID)
+}
+
+// HandleHook HandleHook
+func (r *DefaultAPIController) HandleHook(policyID string, event *job.StatusChange) error {
+ panic("implement me")
+}
+
+// NewAPIController ...
+func NewAPIController(retentionMgr Manager, projectManager project.Manager, repositoryMgr repository.Manager, scheduler scheduler.Scheduler, retentionLauncher Launcher) APIController {
+ return &DefaultAPIController{
+ manager: retentionMgr,
+ launcher: retentionLauncher,
+ projectManager: projectManager,
+ repositoryMgr: repositoryMgr,
+ scheduler: scheduler,
+ }
+}
diff --git a/src/pkg/retention/controller_test.go b/src/pkg/retention/controller_test.go
new file mode 100644
index 000000000..28202dd71
--- /dev/null
+++ b/src/pkg/retention/controller_test.go
@@ -0,0 +1,234 @@
+package retention
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/stretchr/testify/suite"
+)
+
+type ControllerTestSuite struct {
+ suite.Suite
+
+ oldClient dep.Client
+}
+
+// SetupSuite ...
+func (s *ControllerTestSuite) SetupSuite() {
+
+}
+
+// TestController ...
+func TestController(t *testing.T) {
+ suite.Run(t, new(ControllerTestSuite))
+}
+
+func (s *ControllerTestSuite) TestPolicy() {
+ projectMgr := &fakeProjectManager{}
+ repositoryMgr := &fakeRepositoryManager{}
+ retentionScheduler := &fakeRetentionScheduler{}
+ retentionLauncher := &fakeLauncher{}
+ retentionMgr := NewManager()
+ c := NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher)
+
+ p1 := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ {
+ ID: 2,
+ Priority: 1,
+ Template: "recentXdays",
+ Disabled: true,
+ Parameters: rule.Parameters{
+ "num": 3,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+
+ id, err := c.CreateRetention(p1)
+ s.Require().Nil(err)
+ s.Require().True(id > 0)
+
+ p1, err = c.GetRetention(id)
+ s.Require().Nil(err)
+ s.Require().EqualValues("project", p1.Scope.Level)
+ s.Require().True(p1.ID > 0)
+
+ p1.Scope.Level = "test"
+ err = c.UpdateRetention(p1)
+ s.Require().Nil(err)
+ p1, err = c.GetRetention(id)
+ s.Require().Nil(err)
+ s.Require().EqualValues("test", p1.Scope.Level)
+
+ err = c.DeleteRetention(id)
+ s.Require().Nil(err)
+
+ p1, err = c.GetRetention(id)
+ s.Require().Nil(err)
+ s.Require().Nil(p1)
+}
+
+func (s *ControllerTestSuite) TestExecution() {
+ projectMgr := &fakeProjectManager{}
+ repositoryMgr := &fakeRepositoryManager{}
+ retentionScheduler := &fakeRetentionScheduler{}
+ retentionLauncher := &fakeLauncher{}
+ retentionMgr := NewManager()
+ m := NewAPIController(retentionMgr, projectMgr, repositoryMgr, retentionScheduler, retentionLauncher)
+
+ p1 := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+
+ policyID, err := m.CreateRetention(p1)
+ s.Require().Nil(err)
+ s.Require().True(policyID > 0)
+
+ id, err := m.TriggerRetentionExec(policyID, ExecutionTriggerManual, false)
+ s.Require().Nil(err)
+ s.Require().True(id > 0)
+
+ e1, err := m.GetRetentionExec(id)
+ s.Require().Nil(err)
+ s.Require().NotNil(e1)
+ s.Require().EqualValues(id, e1.ID)
+
+ err = m.OperateRetentionExec(id, "stop")
+ s.Require().Nil(err)
+
+ es, err := m.ListRetentionExecs(policyID, nil)
+ s.Require().Nil(err)
+ s.Require().EqualValues(1, len(es))
+
+ ts, err := m.ListRetentionExecTasks(id, nil)
+ s.Require().Nil(err)
+ s.Require().EqualValues(0, len(ts))
+
+}
+
+type fakeRetentionScheduler struct {
+}
+
+func (f *fakeRetentionScheduler) Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) {
+ return 111, nil
+}
+
+func (f *fakeRetentionScheduler) UnSchedule(id int64) error {
+ return nil
+}
+
+type fakeLauncher struct {
+}
+
+func (f *fakeLauncher) Stop(executionID int64) error {
+ return nil
+}
+
+func (f *fakeLauncher) Launch(policy *policy.Metadata, executionID int64, isDryRun bool) (int64, error) {
+ return 0, nil
+}
diff --git a/src/pkg/retention/dao/models/retention.go b/src/pkg/retention/dao/models/retention.go
new file mode 100644
index 000000000..8e7d94590
--- /dev/null
+++ b/src/pkg/retention/dao/models/retention.go
@@ -0,0 +1,62 @@
+package models
+
+import (
+ "time"
+
+ "github.com/astaxie/beego/orm"
+)
+
+// const definitions
+const (
+ ExecutionStatusInProgress string = "InProgress"
+ ExecutionStatusSucceed string = "Succeed"
+ ExecutionStatusFailed string = "Failed"
+ ExecutionStatusStopped string = "Stopped"
+)
+
+func init() {
+ orm.RegisterModel(
+ new(RetentionPolicy),
+ new(RetentionExecution),
+ new(RetentionTask),
+ )
+}
+
+// RetentionPolicy Retention Policy
+type RetentionPolicy struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ // 'system', 'project' and 'repository'
+ ScopeLevel string
+ ScopeReference int64
+ TriggerKind string
+ // json format, include algorithm, rules, exclusions
+ Data string
+ CreateTime time.Time
+ UpdateTime time.Time
+}
+
+// RetentionExecution Retention Execution
+type RetentionExecution struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ PolicyID int64 `orm:"column(policy_id)"`
+ DryRun bool
+ // manual, scheduled
+ Trigger string
+ StartTime time.Time
+ EndTime time.Time `orm:"-"`
+ Status string `orm:"-"`
+}
+
+// RetentionTask ...
+type RetentionTask struct {
+ ID int64 `orm:"pk;auto;column(id)"`
+ ExecutionID int64 `orm:"column(execution_id)"`
+ Repository string `orm:"column(repository)"`
+ JobID string `orm:"column(job_id)"`
+ Status string `orm:"column(status)"`
+ StatusCode int `orm:"column(status_code)"`
+ StartTime time.Time `orm:"column(start_time)"`
+ EndTime time.Time `orm:"column(end_time)"`
+ Total int `orm:"column(total)"`
+ Retained int `orm:"column(retained)"`
+}
diff --git a/src/pkg/retention/dao/retention.go b/src/pkg/retention/dao/retention.go
new file mode 100644
index 000000000..2c810923d
--- /dev/null
+++ b/src/pkg/retention/dao/retention.go
@@ -0,0 +1,291 @@
+package dao
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/dao"
+ jobmodels "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/pkg/retention/dao/models"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+)
+
+// CreatePolicy Create Policy
+func CreatePolicy(p *models.RetentionPolicy) (int64, error) {
+ o := dao.GetOrmer()
+ return o.Insert(p)
+}
+
+// UpdatePolicy Update Policy
+func UpdatePolicy(p *models.RetentionPolicy, cols ...string) error {
+ o := dao.GetOrmer()
+ _, err := o.Update(p, cols...)
+ return err
+}
+
+// DeletePolicyAndExec Delete Policy and Exec
+func DeletePolicyAndExec(id int64) error {
+ o := dao.GetOrmer()
+ if _, err := o.Raw("delete from retention_task where execution_id in (select id from retention_execution where policy_id = ?) ", id).Exec(); err != nil {
+ return nil
+ }
+ if _, err := o.Raw("delete from retention_execution where policy_id = ?", id).Exec(); err != nil {
+ return err
+ }
+ if _, err := o.Delete(&models.RetentionExecution{
+ PolicyID: id,
+ }); err != nil {
+ return err
+ }
+ _, err := o.Delete(&models.RetentionPolicy{
+ ID: id,
+ })
+ return err
+}
+
+// GetPolicy Get Policy
+func GetPolicy(id int64) (*models.RetentionPolicy, error) {
+ o := dao.GetOrmer()
+ p := &models.RetentionPolicy{
+ ID: id,
+ }
+ if err := o.Read(p); err != nil {
+ return nil, err
+ }
+ return p, nil
+}
+
+// CreateExecution Create Execution
+func CreateExecution(e *models.RetentionExecution) (int64, error) {
+ o := dao.GetOrmer()
+ return o.Insert(e)
+}
+
+// UpdateExecution Update Execution
+func UpdateExecution(e *models.RetentionExecution, cols ...string) error {
+ o := dao.GetOrmer()
+ _, err := o.Update(e, cols...)
+ return err
+}
+
+// DeleteExecution Delete Execution
+func DeleteExecution(id int64) error {
+ o := dao.GetOrmer()
+ _, err := o.Delete(&models.RetentionExecution{
+ ID: id,
+ })
+ return err
+}
+
+// GetExecution Get Execution
+func GetExecution(id int64) (*models.RetentionExecution, error) {
+ o := dao.GetOrmer()
+ e := &models.RetentionExecution{
+ ID: id,
+ }
+ if err := o.Read(e); err != nil {
+ return nil, err
+ }
+ if err := fillStatus(e); err != nil {
+ return nil, err
+ }
+ return e, nil
+}
+
+// fillStatus the priority is InProgress Stopped Failed Succeed
+func fillStatus(exec *models.RetentionExecution) error {
+ o := dao.GetOrmer()
+ var r orm.Params
+ if _, err := o.Raw("select status, count(*) num from retention_task where execution_id = ? group by status", exec.ID).
+ RowsToMap(&r, "status", "num"); err != nil {
+ return err
+ }
+ var (
+ total, running, succeed, failed, stopped int64
+ )
+ for k, s := range r {
+ v, err := strconv.ParseInt(s.(string), 10, 64)
+ if err != nil {
+ return err
+ }
+ total += v
+ switch k {
+ case jobmodels.JobScheduled:
+ running += v
+ case jobmodels.JobPending:
+ running += v
+ case jobmodels.JobRunning:
+ running += v
+ case jobmodels.JobRetrying:
+ running += v
+ case jobmodels.JobFinished:
+ succeed += v
+ case jobmodels.JobCanceled:
+ stopped += v
+ case jobmodels.JobStopped:
+ stopped += v
+ case jobmodels.JobError:
+ failed += v
+ }
+ }
+ if total == 0 {
+ exec.Status = models.ExecutionStatusSucceed
+ exec.EndTime = exec.StartTime
+ return nil
+ }
+ if running > 0 {
+ exec.Status = models.ExecutionStatusInProgress
+ } else if stopped > 0 {
+ exec.Status = models.ExecutionStatusStopped
+ } else if failed > 0 {
+ exec.Status = models.ExecutionStatusFailed
+ } else {
+ exec.Status = models.ExecutionStatusSucceed
+ }
+ if exec.Status != models.ExecutionStatusInProgress {
+ if err := o.Raw("select max(end_time) from retention_task where execution_id = ?", exec.ID).
+ QueryRow(&exec.EndTime); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ListExecutions List Executions
+func ListExecutions(policyID int64, query *q.Query) ([]*models.RetentionExecution, error) {
+ o := dao.GetOrmer()
+ qs := o.QueryTable(new(models.RetentionExecution))
+
+ qs = qs.Filter("policy_id", policyID)
+ qs = qs.OrderBy("-id")
+ if query != nil {
+ qs = qs.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize)
+ }
+ var execs []*models.RetentionExecution
+ _, err := qs.All(&execs)
+ if err != nil {
+ return nil, err
+ }
+ for _, e := range execs {
+ if err := fillStatus(e); err != nil {
+ return nil, err
+ }
+ }
+ return execs, nil
+}
+
+// GetTotalOfRetentionExecs Count Executions
+func GetTotalOfRetentionExecs(policyID int64) (int64, error) {
+ o := dao.GetOrmer()
+ qs := o.QueryTable(new(models.RetentionExecution))
+
+ qs = qs.Filter("policy_id", policyID)
+ return qs.Count()
+}
+
+/*
+// ListExecHistories List Execution Histories
+func ListExecHistories(executionID int64, query *q.Query) ([]*models.RetentionTask, error) {
+ o := dao.GetOrmer()
+ qs := o.QueryTable(new(models.RetentionTask))
+ qs = qs.Filter("Execution_ID", executionID)
+ if query != nil {
+ qs = qs.Limit(query.PageSize, (query.PageNumber-1)*query.PageSize)
+ }
+ var tasks []*models.RetentionTask
+ _, err := qs.All(&tasks)
+ if err != nil {
+ return nil, err
+ }
+ return tasks, nil
+}
+
+// AppendExecHistory Append Execution History
+func AppendExecHistory(t *models.RetentionTask) (int64, error) {
+ o := dao.GetOrmer()
+ return o.Insert(t)
+}
+*/
+
+// CreateTask creates task record in database
+func CreateTask(task *models.RetentionTask) (int64, error) {
+ if task == nil {
+ return 0, errors.New("nil task")
+ }
+ return dao.GetOrmer().Insert(task)
+}
+
+// UpdateTask updates the task record in database
+func UpdateTask(task *models.RetentionTask, cols ...string) error {
+ if task == nil {
+ return errors.New("nil task")
+ }
+ if task.ID <= 0 {
+ return fmt.Errorf("invalid task ID: %d", task.ID)
+ }
+ _, err := dao.GetOrmer().Update(task, cols...)
+ return err
+}
+
+// UpdateTaskStatus updates the status of task whose status code is less than the statusCode provided
+func UpdateTaskStatus(taskID int64, status string, statusCode int) error {
+ _, err := dao.GetOrmer().QueryTable(&models.RetentionTask{}).
+ Filter("ID", taskID).
+ Filter("StatusCode__lt", statusCode).
+ Update(orm.Params{
+ "Status": status,
+ "StatusCode": statusCode,
+ })
+ return err
+}
+
+// DeleteTask deletes the task record specified by ID in database
+func DeleteTask(id int64) error {
+ _, err := dao.GetOrmer().Delete(&models.RetentionTask{
+ ID: id,
+ })
+ return err
+}
+
+// GetTask get the task record specified by ID in database
+func GetTask(id int64) (*models.RetentionTask, error) {
+ task := &models.RetentionTask{
+ ID: id,
+ }
+ if err := dao.GetOrmer().Read(task); err != nil {
+ return nil, err
+ }
+ return task, nil
+}
+
+// ListTask lists the tasks according to the query
+func ListTask(query ...*q.TaskQuery) ([]*models.RetentionTask, error) {
+ qs := dao.GetOrmer().QueryTable(&models.RetentionTask{})
+ if len(query) > 0 && query[0] != nil {
+ q := query[0]
+ if q.ExecutionID > 0 {
+ qs = qs.Filter("ExecutionID", q.ExecutionID)
+ }
+ if len(q.Status) > 0 {
+ qs = qs.Filter("Status", q.Status)
+ }
+ if q.PageSize > 0 {
+ qs = qs.Limit(q.PageSize)
+ if q.PageNumber > 0 {
+ qs = qs.Offset((q.PageNumber - 1) * q.PageSize)
+ }
+ }
+ }
+ tasks := []*models.RetentionTask{}
+ _, err := qs.All(&tasks)
+ return tasks, err
+}
+
+// GetTotalOfTasks Count tasks
+func GetTotalOfTasks(executionID int64) (int64, error) {
+ qs := dao.GetOrmer().QueryTable(&models.RetentionTask{})
+ qs = qs.Filter("ExecutionID", executionID)
+ return qs.Count()
+}
diff --git a/src/pkg/retention/dao/retention_test.go b/src/pkg/retention/dao/retention_test.go
new file mode 100644
index 000000000..597b86c40
--- /dev/null
+++ b/src/pkg/retention/dao/retention_test.go
@@ -0,0 +1,220 @@
+package dao
+
+import (
+ "encoding/json"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/pkg/retention/dao/models"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+ os.Exit(m.Run())
+}
+
+func TestPolicy(t *testing.T) {
+ p := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+ p1 := &models.RetentionPolicy{
+ ScopeLevel: p.Scope.Level,
+ TriggerKind: p.Trigger.Kind,
+ CreateTime: time.Now(),
+ UpdateTime: time.Now(),
+ }
+ data, _ := json.Marshal(p)
+ p1.Data = string(data)
+
+ id, err := CreatePolicy(p1)
+ assert.Nil(t, err)
+ assert.True(t, id > 0)
+
+ p1, err = GetPolicy(id)
+ assert.Nil(t, err)
+ assert.EqualValues(t, "project", p1.ScopeLevel)
+ assert.True(t, p1.ID > 0)
+
+ p1.ScopeLevel = "test"
+ err = UpdatePolicy(p1)
+ assert.Nil(t, err)
+ p1, err = GetPolicy(id)
+ assert.Nil(t, err)
+ assert.EqualValues(t, "test", p1.ScopeLevel)
+
+ err = DeletePolicyAndExec(id)
+ assert.Nil(t, err)
+
+ p1, err = GetPolicy(id)
+ assert.NotNil(t, err)
+ assert.True(t, strings.Contains(err.Error(), "no row found"))
+}
+
+func TestExecution(t *testing.T) {
+ p := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+ p1 := &models.RetentionPolicy{
+ ScopeLevel: p.Scope.Level,
+ TriggerKind: p.Trigger.Kind,
+ CreateTime: time.Now(),
+ UpdateTime: time.Now(),
+ }
+ data, _ := json.Marshal(p)
+ p1.Data = string(data)
+
+ policyID, err := CreatePolicy(p1)
+ assert.Nil(t, err)
+ assert.True(t, policyID > 0)
+
+ e := &models.RetentionExecution{
+ PolicyID: policyID,
+ DryRun: false,
+ Trigger: "manual",
+ StartTime: time.Now(),
+ }
+ id, err := CreateExecution(e)
+ assert.Nil(t, err)
+ assert.True(t, id > 0)
+
+ e1, err := GetExecution(id)
+ assert.Nil(t, err)
+ assert.NotNil(t, e1)
+ assert.EqualValues(t, id, e1.ID)
+
+ es, err := ListExecutions(policyID, nil)
+ assert.Nil(t, err)
+ assert.EqualValues(t, 1, len(es))
+}
+
+func TestTask(t *testing.T) {
+ task := &models.RetentionTask{
+ ExecutionID: 1,
+ Status: "pending",
+ }
+ // create
+ id, err := CreateTask(task)
+ require.Nil(t, err)
+
+ // get
+ tk, err := GetTask(id)
+ require.Nil(t, err)
+ require.Equal(t, id, tk.ID)
+ require.Equal(t, "pending", tk.Status)
+
+ // update
+ task.ID = id
+ task.Total = 1
+ err = UpdateTask(task, "Total")
+ require.Nil(t, err)
+
+ // update status
+ err = UpdateTaskStatus(id, "running", 1)
+ require.Nil(t, err)
+
+ // list
+ tasks, err := ListTask(&q.TaskQuery{
+ ExecutionID: 1,
+ Status: "running",
+ })
+ require.Nil(t, err)
+ require.Equal(t, 1, len(tasks))
+ assert.Equal(t, 1, tasks[0].Total)
+ assert.Equal(t, int64(1), tasks[0].ExecutionID)
+ assert.Equal(t, "running", tasks[0].Status)
+ assert.Equal(t, 1, tasks[0].StatusCode)
+
+ // delete
+ err = DeleteTask(id)
+ require.Nil(t, err)
+}
diff --git a/src/pkg/retention/dep/client.go b/src/pkg/retention/dep/client.go
new file mode 100644
index 000000000..9ccb951f7
--- /dev/null
+++ b/src/pkg/retention/dep/client.go
@@ -0,0 +1,181 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dep
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/goharbor/harbor/src/common/http/modifier/auth"
+ "github.com/goharbor/harbor/src/jobservice/config"
+ "github.com/goharbor/harbor/src/pkg/clients/core"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+// DefaultClient for the retention
+var DefaultClient = NewClient()
+
+// Client is designed to access core service to get required infos
+type Client interface {
+ // Get the tag candidates under the repository
+ //
+ // Arguments:
+ // repo *res.Repository : repository info
+ //
+ // Returns:
+ // []*res.Candidate : candidates returned
+ // error : common error if any errors occurred
+ GetCandidates(repo *res.Repository) ([]*res.Candidate, error)
+
+ // Delete the given repository
+ //
+ // Arguments:
+ // repo *res.Repository : repository info
+ //
+ // Returns:
+ // error : common error if any errors occurred
+ DeleteRepository(repo *res.Repository) error
+
+ // Delete the specified candidate
+ //
+ // Arguments:
+ // candidate *res.Candidate : the deleting candidate
+ //
+ // Returns:
+ // error : common error if any errors occurred
+ Delete(candidate *res.Candidate) error
+}
+
+// NewClient new a basic client
+func NewClient(client ...*http.Client) Client {
+ var c *http.Client
+ if len(client) > 0 {
+ c = client[0]
+ }
+ if c == nil {
+ c = http.DefaultClient
+ }
+
+ // init core client
+ internalCoreURL := config.GetCoreURL()
+ jobserviceSecret := config.GetAuthSecret()
+ authorizer := auth.NewSecretAuthorizer(jobserviceSecret)
+ coreClient := core.New(internalCoreURL, c, authorizer)
+
+ return &basicClient{
+ internalCoreURL: internalCoreURL,
+ coreClient: coreClient,
+ }
+}
+
+// basicClient is a default
+type basicClient struct {
+ internalCoreURL string
+ coreClient core.Client
+}
+
+// GetCandidates gets the tag candidates under the repository
+func (bc *basicClient) GetCandidates(repository *res.Repository) ([]*res.Candidate, error) {
+ if repository == nil {
+ return nil, errors.New("repository is nil")
+ }
+ candidates := make([]*res.Candidate, 0)
+ switch repository.Kind {
+ case res.Image:
+ images, err := bc.coreClient.ListAllImages(repository.Namespace, repository.Name)
+ if err != nil {
+ return nil, err
+ }
+ for _, image := range images {
+ labels := make([]string, 0)
+ for _, label := range image.Labels {
+ labels = append(labels, label.Name)
+ }
+ candidate := &res.Candidate{
+ Kind: res.Image,
+ Namespace: repository.Namespace,
+ Repository: repository.Name,
+ Tag: image.Name,
+ Labels: labels,
+ CreationTime: image.Created.Unix(),
+ PulledTime: image.PullTime.Unix(),
+ PushedTime: image.PushTime.Unix(),
+ }
+ candidates = append(candidates, candidate)
+ }
+ /*
+ case res.Chart:
+ charts, err := bc.coreClient.ListAllCharts(repository.Namespace, repository.Name)
+ if err != nil {
+ return nil, err
+ }
+ for _, chart := range charts {
+ labels := make([]string, 0)
+ for _, label := range chart.Labels {
+ labels = append(labels, label.Name)
+ }
+ candidate := &res.Candidate{
+ Kind: res.Chart,
+ Namespace: repository.Namespace,
+ Repository: repository.Name,
+ Tag: chart.Name,
+ Labels: labels,
+ CreationTime: chart.Created.Unix(),
+ PushedTime: ,
+ PulledTime: ,
+ }
+ candidates = append(candidates, candidate)
+ }
+ */
+ default:
+ return nil, fmt.Errorf("unsupported repository kind: %s", repository.Kind)
+ }
+ return candidates, nil
+}
+
+// DeleteRepository deletes the specified repository
+func (bc *basicClient) DeleteRepository(repo *res.Repository) error {
+ if repo == nil {
+ return errors.New("repository is nil")
+ }
+ switch repo.Kind {
+ case res.Image:
+ return bc.coreClient.DeleteImageRepository(repo.Namespace, repo.Name)
+ /*
+ case res.Chart:
+ return bc.coreClient.DeleteChartRepository(repo.Namespace, repo.Name)
+ */
+ default:
+ return fmt.Errorf("unsupported repository kind: %s", repo.Kind)
+ }
+}
+
+// Deletes the specified candidate
+func (bc *basicClient) Delete(candidate *res.Candidate) error {
+ if candidate == nil {
+ return errors.New("candidate is nil")
+ }
+ switch candidate.Kind {
+ case res.Image:
+ return bc.coreClient.DeleteImage(candidate.Namespace, candidate.Repository, candidate.Tag)
+ /*
+ case res.Chart:
+ return bc.coreClient.DeleteChart(candidate.Namespace, candidate.Repository, candidate.Tag)
+ */
+ default:
+ return fmt.Errorf("unsupported candidate kind: %s", candidate.Kind)
+ }
+}
diff --git a/src/pkg/retention/dep/client_test.go b/src/pkg/retention/dep/client_test.go
new file mode 100644
index 000000000..071cc230c
--- /dev/null
+++ b/src/pkg/retention/dep/client_test.go
@@ -0,0 +1,138 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dep
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/chartserver"
+ jmodels "github.com/goharbor/harbor/src/common/job/models"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/goharbor/harbor/src/testing/clients"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/helm/pkg/proto/hapi/chart"
+ "k8s.io/helm/pkg/repo"
+)
+
+type fakeCoreClient struct {
+ clients.DumbCoreClient
+}
+
+func (f *fakeCoreClient) ListAllImages(project, repository string) ([]*models.TagResp, error) {
+ image := &models.TagResp{}
+ image.Name = "latest"
+ return []*models.TagResp{image}, nil
+}
+
+func (f *fakeCoreClient) ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error) {
+ metadata := &chart.Metadata{
+ Name: "1.0",
+ }
+ chart := &chartserver.ChartVersion{}
+ chart.ChartVersion = repo.ChartVersion{
+ Metadata: metadata,
+ }
+ return []*chartserver.ChartVersion{chart}, nil
+}
+
+type fakeJobserviceClient struct{}
+
+func (f *fakeJobserviceClient) SubmitJob(*jmodels.JobData) (string, error) {
+ return "1", nil
+}
+func (f *fakeJobserviceClient) GetJobLog(uuid string) ([]byte, error) {
+ return nil, nil
+}
+func (f *fakeJobserviceClient) PostAction(uuid, action string) error {
+ return nil
+}
+func (f *fakeJobserviceClient) GetExecutions(uuid string) ([]job.Stats, error) {
+ return nil, nil
+}
+
+type clientTestSuite struct {
+ suite.Suite
+}
+
+func (c *clientTestSuite) TestGetCandidates() {
+ client := &basicClient{}
+ client.coreClient = &fakeCoreClient{}
+ var repository *res.Repository
+ // nil repository
+ candidates, err := client.GetCandidates(repository)
+ require.NotNil(c.T(), err)
+
+ // image repository
+ repository = &res.Repository{}
+ repository.Kind = res.Image
+ repository.Namespace = "library"
+ repository.Name = "hello-world"
+ candidates, err = client.GetCandidates(repository)
+ require.Nil(c.T(), err)
+ assert.Equal(c.T(), 1, len(candidates))
+ assert.Equal(c.T(), res.Image, candidates[0].Kind)
+ assert.Equal(c.T(), "library", candidates[0].Namespace)
+ assert.Equal(c.T(), "hello-world", candidates[0].Repository)
+ assert.Equal(c.T(), "latest", candidates[0].Tag)
+
+ /*
+ // chart repository
+ repository.Kind = res.Chart
+ repository.Namespace = "goharbor"
+ repository.Name = "harbor"
+ candidates, err = client.GetCandidates(repository)
+ require.Nil(c.T(), err)
+ assert.Equal(c.T(), 1, len(candidates))
+ assert.Equal(c.T(), res.Chart, candidates[0].Kind)
+ assert.Equal(c.T(), "goharbor", candidates[0].Namespace)
+ assert.Equal(c.T(), "1.0", candidates[0].Tag)
+ */
+}
+
+func (c *clientTestSuite) TestDelete() {
+ client := &basicClient{}
+ client.coreClient = &fakeCoreClient{}
+
+ var candidate *res.Candidate
+ // nil candidate
+ err := client.Delete(candidate)
+ require.NotNil(c.T(), err)
+
+ // image
+ candidate = &res.Candidate{}
+ candidate.Kind = res.Image
+ err = client.Delete(candidate)
+ require.Nil(c.T(), err)
+
+ /*
+ // chart
+ candidate.Kind = res.Chart
+ err = client.Delete(candidate)
+ require.Nil(c.T(), err)
+ */
+
+ // unsupported type
+ candidate.Kind = "unsupported"
+ err = client.Delete(candidate)
+ require.NotNil(c.T(), err)
+}
+
+func TestClientTestSuite(t *testing.T) {
+ suite.Run(t, new(clientTestSuite))
+}
diff --git a/src/pkg/retention/job.go b/src/pkg/retention/job.go
new file mode 100644
index 000000000..533e9dddc
--- /dev/null
+++ b/src/pkg/retention/job.go
@@ -0,0 +1,271 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/jobservice/logger"
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/lwp"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/olekukonko/tablewriter"
+ "github.com/pkg/errors"
+)
+
+const (
+ actionMarkRetain = "RETAIN"
+ actionMarkDeletion = "DEL"
+ actionMarkError = "ERR"
+)
+
+// Job of running retention process
+type Job struct{}
+
+// MaxFails of the job
+func (pj *Job) MaxFails() uint {
+ return 3
+}
+
+// ShouldRetry indicates job can be retried if failed
+func (pj *Job) ShouldRetry() bool {
+ return true
+}
+
+// Validate the parameters
+func (pj *Job) Validate(params job.Parameters) (err error) {
+ if _, err = getParamRepo(params); err == nil {
+ if _, err = getParamMeta(params); err == nil {
+ _, err = getParamDryRun(params)
+ }
+ }
+
+ return
+}
+
+// Run the job
+func (pj *Job) Run(ctx job.Context, params job.Parameters) error {
+ // logger for logging
+ myLogger := ctx.GetLogger()
+
+ // Parameters have been validated, ignore error checking
+ repo, _ := getParamRepo(params)
+ liteMeta, _ := getParamMeta(params)
+ isDryRun, _ := getParamDryRun(params)
+
+ // Log stage: start
+ repoPath := fmt.Sprintf("%s/%s", repo.Namespace, repo.Name)
+ myLogger.Infof("Run retention process.\n Repository: %s \n Rule Algorithm: %s \n Dry Run: %v", repoPath, liteMeta.Algorithm, isDryRun)
+
+ // Stop check point 1:
+ if isStopped(ctx) {
+ logStop(myLogger)
+ return nil
+ }
+
+ // Retrieve all the candidates under the specified repository
+ allCandidates, err := dep.DefaultClient.GetCandidates(repo)
+ if err != nil {
+ return logError(myLogger, err)
+ }
+
+ // Log stage: load candidates
+ myLogger.Infof("Load %d candidates from repository %s", len(allCandidates), repoPath)
+
+ // Build the processor
+ builder := policy.NewBuilder(allCandidates)
+ processor, err := builder.Build(liteMeta, isDryRun)
+ if err != nil {
+ return logError(myLogger, err)
+ }
+
+ // Stop check point 2:
+ if isStopped(ctx) {
+ logStop(myLogger)
+ return nil
+ }
+
+ // Run the flow
+ results, err := processor.Process(allCandidates)
+ if err != nil {
+ return logError(myLogger, err)
+ }
+
+ // Log stage: results with table view
+ logResults(myLogger, allCandidates, results)
+
+ // Save retain and total num in DB
+ return saveRetainNum(ctx, results, allCandidates)
+}
+
+func saveRetainNum(ctx job.Context, retained []*res.Result, allCandidates []*res.Candidate) error {
+ var delNum int
+ for _, r := range retained {
+ if r.Error == nil {
+ delNum++
+ }
+ }
+ retainObj := struct {
+ Total int `json:"total"`
+ Retained int `json:"retained"`
+ }{
+ Total: len(allCandidates),
+ Retained: len(allCandidates) - delNum,
+ }
+ c, err := json.Marshal(retainObj)
+ if err != nil {
+ return err
+ }
+ _ = ctx.Checkin(string(c))
+ return nil
+}
+
+func logResults(logger logger.Interface, all []*res.Candidate, results []*res.Result) {
+ hash := make(map[string]error, len(results))
+ for _, r := range results {
+ if r.Target != nil {
+ hash[r.Target.Hash()] = r.Error
+ }
+ }
+
+ op := func(art *res.Candidate) string {
+ if e, exists := hash[art.Hash()]; exists {
+ if e != nil {
+ return actionMarkError
+ }
+
+ return actionMarkDeletion
+ }
+
+ return actionMarkRetain
+ }
+
+ var buf bytes.Buffer
+
+ data := make([][]string, len(all))
+
+ for _, c := range all {
+ row := []string{
+ arn(c),
+ c.Kind,
+ strings.Join(c.Labels, ","),
+ t(c.PushedTime),
+ t(c.PulledTime),
+ t(c.CreationTime),
+ op(c),
+ }
+ data = append(data, row)
+ }
+
+ table := tablewriter.NewWriter(&buf)
+ table.SetHeader([]string{"Artifact", "Kind", "labels", "PushedTime", "PulledTime", "CreatedTime", "Retention"})
+ table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
+ table.SetCenterSeparator("|")
+ table.AppendBulk(data)
+ table.Render()
+
+ logger.Infof("\n%s", buf.String())
+
+ // log all the concrete errors if have
+ for _, r := range results {
+ if r.Error != nil {
+ logger.Infof("Retention error for artifact %s:%s : %s", r.Target.Kind, arn(r.Target), r.Error)
+ }
+ }
+}
+
+func arn(art *res.Candidate) string {
+ return fmt.Sprintf("%s/%s:%s", art.Namespace, art.Repository, art.Tag)
+}
+
+func t(tm int64) string {
+ return time.Unix(tm, 0).Format("2006/01/02 15:04:05")
+}
+
+func isStopped(ctx job.Context) (stopped bool) {
+ cmd, ok := ctx.OPCommand()
+ stopped = ok && cmd == job.StopCommand
+
+ return
+}
+
+func logStop(logger logger.Interface) {
+ logger.Info("Retention job is stopped")
+}
+
+func logError(logger logger.Interface, err error) error {
+ wrappedErr := errors.Wrap(err, "retention job")
+ logger.Error(wrappedErr)
+
+ return wrappedErr
+}
+
+func getParamDryRun(params job.Parameters) (bool, error) {
+ v, ok := params[ParamDryRun]
+ if !ok {
+ return false, errors.Errorf("missing parameter: %s", ParamDryRun)
+ }
+
+ dryRun, ok := v.(bool)
+ if !ok {
+ return false, errors.Errorf("invalid parameter: %s", ParamDryRun)
+ }
+
+ return dryRun, nil
+}
+
+func getParamRepo(params job.Parameters) (*res.Repository, error) {
+ v, ok := params[ParamRepo]
+ if !ok {
+ return nil, errors.Errorf("missing parameter: %s", ParamRepo)
+ }
+
+ repoJSON, ok := v.(string)
+ if !ok {
+ return nil, errors.Errorf("invalid parameter: %s", ParamRepo)
+ }
+
+ repo := &res.Repository{}
+ if err := repo.FromJSON(repoJSON); err != nil {
+ return nil, errors.Wrap(err, "parse repository from JSON")
+ }
+
+ return repo, nil
+}
+
+func getParamMeta(params job.Parameters) (*lwp.Metadata, error) {
+ v, ok := params[ParamMeta]
+ if !ok {
+ return nil, errors.Errorf("missing parameter: %s", ParamMeta)
+ }
+
+ metaJSON, ok := v.(string)
+ if !ok {
+ return nil, errors.Errorf("invalid parameter: %s", ParamMeta)
+ }
+
+ meta := &lwp.Metadata{}
+ if err := meta.FromJSON(metaJSON); err != nil {
+ return nil, errors.Wrap(err, "parse retention policy from JSON")
+ }
+
+ return meta, nil
+}
diff --git a/src/pkg/retention/job_test.go b/src/pkg/retention/job_test.go
new file mode 100644
index 000000000..cd9c137f1
--- /dev/null
+++ b/src/pkg/retention/job_test.go
@@ -0,0 +1,224 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import (
+ "context"
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/jobservice/logger"
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/lwp"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar"
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+// JobTestSuite is test suite for testing job
+type JobTestSuite struct {
+ suite.Suite
+
+ oldClient dep.Client
+}
+
+// TestJob is entry of running JobTestSuite
+func TestJob(t *testing.T) {
+ suite.Run(t, new(JobTestSuite))
+}
+
+// SetupSuite ...
+func (suite *JobTestSuite) SetupSuite() {
+ suite.oldClient = dep.DefaultClient
+ dep.DefaultClient = &fakeRetentionClient{}
+}
+
+// TearDownSuite ...
+func (suite *JobTestSuite) TearDownSuite() {
+ dep.DefaultClient = suite.oldClient
+}
+
+func (suite *JobTestSuite) TestRunSuccess() {
+ params := make(job.Parameters)
+ params[ParamDryRun] = false
+ repository := &res.Repository{
+ Namespace: "library",
+ Name: "harbor",
+ Kind: res.Image,
+ }
+ repoJSON, err := repository.ToJSON()
+ require.Nil(suite.T(), err)
+ params[ParamRepo] = repoJSON
+
+ scopeSelectors := make(map[string][]*rule.Selector)
+ scopeSelectors["project"] = []*rule.Selector{{
+ Kind: doublestar.Kind,
+ Decoration: doublestar.RepoMatches,
+ Pattern: "{harbor}",
+ }}
+
+ ruleParams := make(rule.Parameters)
+ ruleParams[latestps.ParameterK] = 10
+
+ meta := &lwp.Metadata{
+ Algorithm: policy.AlgorithmOR,
+ Rules: []*rule.Metadata{
+ {
+ ID: 1,
+ Priority: 999,
+ Action: action.Retain,
+ Template: latestps.TemplateID,
+ Parameters: ruleParams,
+ TagSelectors: []*rule.Selector{{
+ Kind: label.Kind,
+ Decoration: label.With,
+ Pattern: "L3",
+ }, {
+ Kind: doublestar.Kind,
+ Decoration: doublestar.Matches,
+ Pattern: "**",
+ }},
+ ScopeSelectors: scopeSelectors,
+ },
+ },
+ }
+ metaJSON, err := meta.ToJSON()
+ require.Nil(suite.T(), err)
+ params[ParamMeta] = metaJSON
+
+ j := &Job{}
+ err = j.Validate(params)
+ require.NoError(suite.T(), err)
+
+ err = j.Run(&fakeJobContext{}, params)
+ require.NoError(suite.T(), err)
+}
+
+type fakeRetentionClient struct{}
+
+// GetCandidates ...
+func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) {
+ return []*res.Candidate{
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix() - 11,
+ PulledTime: time.Now().Unix() - 2,
+ CreationTime: time.Now().Unix() - 10,
+ Labels: []string{"L1", "L2"},
+ },
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "dev",
+ PushedTime: time.Now().Unix() - 10,
+ PulledTime: time.Now().Unix() - 3,
+ CreationTime: time.Now().Unix() - 20,
+ Labels: []string{"L3"},
+ },
+ }, nil
+}
+
+// Delete ...
+func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error {
+ return nil
+}
+
+// SubmitTask ...
+func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error {
+ return nil
+}
+
+type fakeLogger struct{}
+
+// For debuging
+func (l *fakeLogger) Debug(v ...interface{}) {}
+
+// For debuging with format
+func (l *fakeLogger) Debugf(format string, v ...interface{}) {}
+
+// For logging info
+func (l *fakeLogger) Info(v ...interface{}) {
+ fmt.Println(v...)
+}
+
+// For logging info with format
+func (l *fakeLogger) Infof(format string, v ...interface{}) {
+ fmt.Printf(format+"\n", v...)
+}
+
+// For warning
+func (l *fakeLogger) Warning(v ...interface{}) {}
+
+// For warning with format
+func (l *fakeLogger) Warningf(format string, v ...interface{}) {}
+
+// For logging error
+func (l *fakeLogger) Error(v ...interface{}) {
+ fmt.Println(v...)
+}
+
+// For logging error with format
+func (l *fakeLogger) Errorf(format string, v ...interface{}) {
+}
+
+// For fatal error
+func (l *fakeLogger) Fatal(v ...interface{}) {}
+
+// For fatal error with error
+func (l *fakeLogger) Fatalf(format string, v ...interface{}) {}
+
+type fakeJobContext struct{}
+
+func (c *fakeJobContext) Build(tracker job.Tracker) (job.Context, error) {
+ return nil, nil
+}
+
+func (c *fakeJobContext) Get(prop string) (interface{}, bool) {
+ return nil, false
+}
+
+func (c *fakeJobContext) SystemContext() context.Context {
+ return context.TODO()
+}
+
+func (c *fakeJobContext) Checkin(status string) error {
+ fmt.Printf("Check in: %s\n", status)
+
+ return nil
+}
+
+func (c *fakeJobContext) OPCommand() (job.OPCommand, bool) {
+ return "", false
+}
+
+func (c *fakeJobContext) GetLogger() logger.Interface {
+ return &fakeLogger{}
+}
+
+func (c *fakeJobContext) Tracker() job.Tracker {
+ return nil
+}
diff --git a/src/pkg/retention/launcher.go b/src/pkg/retention/launcher.go
new file mode 100644
index 000000000..8bdf91421
--- /dev/null
+++ b/src/pkg/retention/launcher.go
@@ -0,0 +1,379 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index"
+
+ cjob "github.com/goharbor/harbor/src/common/job"
+ "github.com/goharbor/harbor/src/common/job/models"
+ cmodels "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/pkg/project"
+ "github.com/goharbor/harbor/src/pkg/repository"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/lwp"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/pkg/errors"
+)
+
+const (
+ // ParamRepo ...
+ ParamRepo = "repository"
+ // ParamMeta ...
+ ParamMeta = "liteMeta"
+ // ParamDryRun ...
+ ParamDryRun = "dryRun"
+)
+
+// Launcher provides function to launch the async jobs to run retentions based on the provided policy.
+type Launcher interface {
+ // Launch async jobs for the retention policy
+ // A separate job will be launched for each repository
+ //
+ // Arguments:
+ // policy *policy.Metadata: the policy info
+ // executionID int64 : the execution ID
+ // isDryRun bool : indicate if it is a dry run
+ //
+ // Returns:
+ // int64 : the count of tasks
+ // error : common error if any errors occurred
+ Launch(policy *policy.Metadata, executionID int64, isDryRun bool) (int64, error)
+ // Stop the jobs for one execution
+ //
+ // Arguments:
+ // executionID int64 : the execution ID
+ //
+ // Returns:
+ // error : common error if any errors occurred
+ Stop(executionID int64) error
+}
+
+// NewLauncher returns an instance of Launcher
+func NewLauncher(projectMgr project.Manager, repositoryMgr repository.Manager,
+ retentionMgr Manager) Launcher {
+ return &launcher{
+ projectMgr: projectMgr,
+ repositoryMgr: repositoryMgr,
+ retentionMgr: retentionMgr,
+ jobserviceClient: cjob.GlobalClient,
+ internalCoreURL: config.InternalCoreURL(),
+ chartServerEnabled: config.WithChartMuseum(),
+ }
+}
+
+type jobData struct {
+ TaskID int64
+ Repository res.Repository
+ JobName string
+ JobParams map[string]interface{}
+}
+
+type launcher struct {
+ retentionMgr Manager
+ projectMgr project.Manager
+ repositoryMgr repository.Manager
+ jobserviceClient cjob.Client
+ internalCoreURL string
+ chartServerEnabled bool
+}
+
+func (l *launcher) Launch(ply *policy.Metadata, executionID int64, isDryRun bool) (int64, error) {
+ if ply == nil {
+ return 0, launcherError(fmt.Errorf("the policy is nil"))
+ }
+ // no rules, return directly
+ if len(ply.Rules) == 0 {
+ log.Debugf("no rules for policy %d, skip", ply.ID)
+ return 0, nil
+ }
+ scope := ply.Scope
+ if scope == nil {
+ return 0, launcherError(fmt.Errorf("the scope of policy is nil"))
+ }
+ repositoryRules := make(map[res.Repository]*lwp.Metadata, 0)
+ level := scope.Level
+ var allProjects []*res.Candidate
+ var err error
+ if level == "system" {
+ // get projects
+ allProjects, err = getProjects(l.projectMgr)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+ }
+
+ for _, rule := range ply.Rules {
+ if rule.Disabled {
+ log.Infof("Policy %d rule %d %s is disabled", ply.ID, rule.ID, rule.Template)
+ continue
+ }
+ projectCandidates := allProjects
+ switch level {
+ case "system":
+ // filter projects according to the project selectors
+ for _, projectSelector := range rule.ScopeSelectors["project"] {
+ selector, err := index.Get(projectSelector.Kind, projectSelector.Decoration,
+ projectSelector.Pattern)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+ projectCandidates, err = selector.Select(projectCandidates)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+ }
+ case "project":
+ projectCandidates = append(projectCandidates, &res.Candidate{
+ NamespaceID: scope.Reference,
+ })
+ }
+
+ var repositoryCandidates []*res.Candidate
+ // get repositories of projects
+ for _, projectCandidate := range projectCandidates {
+ repositories, err := getRepositories(l.projectMgr, l.repositoryMgr, projectCandidate.NamespaceID, l.chartServerEnabled)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+ for _, repository := range repositories {
+ repositoryCandidates = append(repositoryCandidates, repository)
+ }
+ }
+ // filter repositories according to the repository selectors
+ for _, repositorySelector := range rule.ScopeSelectors["repository"] {
+ selector, err := index.Get(repositorySelector.Kind, repositorySelector.Decoration,
+ repositorySelector.Pattern)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+ repositoryCandidates, err = selector.Select(repositoryCandidates)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+ }
+
+ for _, repositoryCandidate := range repositoryCandidates {
+ reposit := res.Repository{
+ Namespace: repositoryCandidate.Namespace,
+ Name: repositoryCandidate.Repository,
+ Kind: repositoryCandidate.Kind,
+ }
+ if repositoryRules[reposit] == nil {
+ repositoryRules[reposit] = &lwp.Metadata{
+ Algorithm: ply.Algorithm,
+ }
+ }
+ r := rule
+ repositoryRules[reposit].Rules = append(repositoryRules[reposit].Rules, &r)
+ }
+ }
+
+ // create job data list
+ jobDatas, err := createJobs(repositoryRules, isDryRun)
+ if err != nil {
+ return 0, launcherError(err)
+ }
+
+ // no jobs, return directly
+ if len(jobDatas) == 0 {
+ log.Debugf("no candidates for policy %d, skip", ply.ID)
+ return 0, nil
+ }
+
+ // create task records in database
+ if err = l.createTasks(executionID, jobDatas); err != nil {
+ return 0, launcherError(err)
+ }
+
+ // submit jobs to jobservice
+ if err = l.submitJobs(jobDatas); err != nil {
+ return 0, launcherError(err)
+ }
+
+ return int64(len(jobDatas)), nil
+}
+
+func createJobs(repositoryRules map[res.Repository]*lwp.Metadata, isDryRun bool) ([]*jobData, error) {
+ jobDatas := []*jobData{}
+ for repository, policy := range repositoryRules {
+ jobData := &jobData{
+ Repository: repository,
+ JobName: job.Retention,
+ JobParams: make(map[string]interface{}, 3),
+ }
+ // set dry run
+ jobData.JobParams[ParamDryRun] = isDryRun
+ // set repository
+ repoJSON, err := repository.ToJSON()
+ if err != nil {
+ return nil, err
+ }
+ jobData.JobParams[ParamRepo] = repoJSON
+ // set retention policy
+ policyJSON, err := policy.ToJSON()
+ if err != nil {
+ return nil, err
+ }
+ jobData.JobParams[ParamMeta] = policyJSON
+ jobDatas = append(jobDatas, jobData)
+ }
+ return jobDatas, nil
+}
+
+// create task records in database
+func (l *launcher) createTasks(executionID int64, jobDatas []*jobData) error {
+ now := time.Now()
+ for _, jobData := range jobDatas {
+ taskID, err := l.retentionMgr.CreateTask(&Task{
+ ExecutionID: executionID,
+ Repository: jobData.Repository.Name,
+ StartTime: now,
+ })
+ if err != nil {
+ return err
+ }
+ jobData.TaskID = taskID
+ }
+ return nil
+}
+
+// create task records in database
+func (l *launcher) submitJobs(jobDatas []*jobData) error {
+ allFailed := true
+ for _, jobData := range jobDatas {
+ task := &Task{
+ ID: jobData.TaskID,
+ }
+ props := []string{"Status"}
+ j := &models.JobData{
+ Name: jobData.JobName,
+ Metadata: &models.JobMetadata{
+ JobKind: job.KindGeneric,
+ },
+ StatusHook: fmt.Sprintf("%s/service/notifications/jobs/retention/task/%d", l.internalCoreURL, jobData.TaskID),
+ Parameters: jobData.JobParams,
+ }
+ // Submit job
+ jobID, err := l.jobserviceClient.SubmitJob(j)
+ if err != nil {
+ log.Error(launcherError(fmt.Errorf("failed to submit task %d: %v", jobData.TaskID, err)))
+ task.Status = cmodels.JobError
+ task.EndTime = time.Now()
+ props = append(props, "EndTime")
+ } else {
+ allFailed = false
+ task.JobID = jobID
+ task.Status = cmodels.JobPending
+ props = append(props, "JobID")
+ }
+ if err = l.retentionMgr.UpdateTask(task, props...); err != nil {
+ log.Errorf("failed to update the status of task %d: %v", task.ID, err)
+ }
+ }
+ if allFailed {
+ return launcherError(fmt.Errorf("all tasks failed"))
+ }
+ return nil
+}
+
+func (l *launcher) Stop(executionID int64) error {
+ if executionID <= 0 {
+ return launcherError(fmt.Errorf("invalid execution ID: %d", executionID))
+ }
+ tasks, err := l.retentionMgr.ListTasks(&q.TaskQuery{
+ ExecutionID: executionID,
+ })
+ if err != nil {
+ return err
+ }
+ for _, task := range tasks {
+ if err = l.jobserviceClient.PostAction(task.JobID, cjob.JobActionStop); err != nil {
+ log.Errorf("failed to stop task %d, job ID: %s : %v", task.ID, task.JobID, err)
+ continue
+ }
+ }
+ return nil
+}
+
+func launcherError(err error) error {
+ return errors.Wrap(err, "launcher")
+}
+
+func getProjects(projectMgr project.Manager) ([]*res.Candidate, error) {
+ projects, err := projectMgr.List()
+ if err != nil {
+ return nil, err
+ }
+ var candidates []*res.Candidate
+ for _, pro := range projects {
+ candidates = append(candidates, &res.Candidate{
+ NamespaceID: pro.ProjectID,
+ Namespace: pro.Name,
+ })
+ }
+ return candidates, nil
+}
+
+func getRepositories(projectMgr project.Manager, repositoryMgr repository.Manager,
+ projectID int64, chartServerEnabled bool) ([]*res.Candidate, error) {
+ var candidates []*res.Candidate
+ /*
+ pro, err := projectMgr.Get(projectID)
+ if err != nil {
+ return nil, err
+ }
+ */
+ // get image repositories
+ imageRepositories, err := repositoryMgr.ListImageRepositories(projectID)
+ if err != nil {
+ return nil, err
+ }
+ for _, r := range imageRepositories {
+ namespace, repo := utils.ParseRepository(r.Name)
+ candidates = append(candidates, &res.Candidate{
+ Namespace: namespace,
+ Repository: repo,
+ Kind: "image",
+ })
+ }
+ // currently, doesn't support retention for chart
+ /*
+ if chartServerEnabled {
+ // get chart repositories when chart server is enabled
+ chartRepositories, err := repositoryMgr.ListChartRepositories(projectID)
+ if err != nil {
+ return nil, err
+ }
+ for _, r := range chartRepositories {
+ candidates = append(candidates, &res.Candidate{
+ Namespace: pro.Name,
+ Repository: r.Name,
+ Kind: "chart",
+ })
+ }
+ }
+ */
+
+ return candidates, nil
+}
diff --git a/src/pkg/retention/launcher_test.go b/src/pkg/retention/launcher_test.go
new file mode 100644
index 000000000..27ce757b6
--- /dev/null
+++ b/src/pkg/retention/launcher_test.go
@@ -0,0 +1,301 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common/job"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/pkg/project"
+ "github.com/goharbor/harbor/src/pkg/repository"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+ _ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar"
+ hjob "github.com/goharbor/harbor/src/testing/job"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type fakeProjectManager struct {
+ projects []*models.Project
+}
+
+func (f *fakeProjectManager) List(...*models.ProjectQueryParam) ([]*models.Project, error) {
+ return f.projects, nil
+}
+func (f *fakeProjectManager) Get(idOrName interface{}) (*models.Project, error) {
+ id, ok := idOrName.(int64)
+ if ok {
+ for _, pro := range f.projects {
+ if pro.ProjectID == id {
+ return pro, nil
+ }
+ }
+ return nil, nil
+ }
+ name, ok := idOrName.(string)
+ if ok {
+ for _, pro := range f.projects {
+ if pro.Name == name {
+ return pro, nil
+ }
+ }
+ return nil, nil
+ }
+ return nil, fmt.Errorf("invalid parameter: %v, should be ID(int64) or name(string)", idOrName)
+}
+
+type fakeRepositoryManager struct {
+ imageRepositories []*models.RepoRecord
+ chartRepositories []*chartserver.ChartInfo
+}
+
+func (f *fakeRepositoryManager) ListImageRepositories(projectID int64) ([]*models.RepoRecord, error) {
+ return f.imageRepositories, nil
+}
+func (f *fakeRepositoryManager) ListChartRepositories(projectID int64) ([]*chartserver.ChartInfo, error) {
+ return f.chartRepositories, nil
+}
+
+type fakeRetentionManager struct{}
+
+func (f *fakeRetentionManager) GetTotalOfRetentionExecs(policyID int64) (int64, error) {
+ return 0, nil
+}
+
+func (f *fakeRetentionManager) GetTotalOfTasks(executionID int64) (int64, error) {
+ return 0, nil
+}
+
+func (f *fakeRetentionManager) CreatePolicy(p *policy.Metadata) (int64, error) {
+ return 0, nil
+}
+func (f *fakeRetentionManager) UpdatePolicy(p *policy.Metadata) error {
+ return nil
+}
+func (f *fakeRetentionManager) DeletePolicyAndExec(ID int64) error {
+ return nil
+}
+func (f *fakeRetentionManager) GetPolicy(ID int64) (*policy.Metadata, error) {
+ return nil, nil
+}
+func (f *fakeRetentionManager) CreateExecution(execution *Execution) (int64, error) {
+ return 0, nil
+}
+func (f *fakeRetentionManager) UpdateExecution(execution *Execution) error {
+ return nil
+}
+func (f *fakeRetentionManager) GetExecution(eid int64) (*Execution, error) {
+ return nil, nil
+}
+func (f *fakeRetentionManager) DeleteExecution(eid int64) error {
+ return nil
+}
+func (f *fakeRetentionManager) ListTasks(query ...*q.TaskQuery) ([]*Task, error) {
+ return []*Task{
+ {
+ ID: 1,
+ ExecutionID: 1,
+ JobID: "1",
+ },
+ }, nil
+}
+func (f *fakeRetentionManager) GetTask(taskID int64) (*Task, error) {
+ return nil, nil
+}
+func (f *fakeRetentionManager) CreateTask(task *Task) (int64, error) {
+ return 0, nil
+}
+func (f *fakeRetentionManager) UpdateTask(task *Task, cols ...string) error {
+ return nil
+}
+func (f *fakeRetentionManager) UpdateTaskStatus(int64, string) error {
+ return nil
+}
+func (f *fakeRetentionManager) GetTaskLog(taskID int64) ([]byte, error) {
+ return nil, nil
+}
+func (f *fakeRetentionManager) ListExecutions(policyID int64, query *q.Query) ([]*Execution, error) {
+ return nil, nil
+}
+func (f *fakeRetentionManager) AppendHistory(history *History) (int64, error) {
+ return 0, nil
+}
+func (f *fakeRetentionManager) ListHistories(executionID int64, query *q.Query) ([]*History, error) {
+ return nil, nil
+}
+
+type launchTestSuite struct {
+ suite.Suite
+ projectMgr project.Manager
+ repositoryMgr repository.Manager
+ retentionMgr Manager
+ jobserviceClient job.Client
+}
+
+func (l *launchTestSuite) SetupTest() {
+ pro1 := &models.Project{
+ ProjectID: 1,
+ Name: "library",
+ }
+ pro2 := &models.Project{
+ ProjectID: 2,
+ Name: "test",
+ }
+ l.projectMgr = &fakeProjectManager{
+ projects: []*models.Project{
+ pro1, pro2,
+ }}
+ l.repositoryMgr = &fakeRepositoryManager{
+ imageRepositories: []*models.RepoRecord{
+ {
+ Name: "library/image",
+ },
+ {
+ Name: "test/image",
+ },
+ },
+ chartRepositories: []*chartserver.ChartInfo{
+ {
+ Name: "chart",
+ },
+ },
+ }
+ l.retentionMgr = &fakeRetentionManager{}
+ l.jobserviceClient = &hjob.MockJobClient{
+ JobUUID: []string{"1"},
+ }
+}
+
+func (l *launchTestSuite) TestGetProjects() {
+ projects, err := getProjects(l.projectMgr)
+ require.Nil(l.T(), err)
+ assert.Equal(l.T(), 2, len(projects))
+ assert.Equal(l.T(), int64(1), projects[0].NamespaceID)
+ assert.Equal(l.T(), "library", projects[0].Namespace)
+}
+
+func (l *launchTestSuite) TestGetRepositories() {
+ repositories, err := getRepositories(l.projectMgr, l.repositoryMgr, 1, true)
+ require.Nil(l.T(), err)
+ assert.Equal(l.T(), 2, len(repositories))
+ assert.Equal(l.T(), "library", repositories[0].Namespace)
+ assert.Equal(l.T(), "image", repositories[0].Repository)
+ assert.Equal(l.T(), "image", repositories[0].Kind)
+}
+
+func (l *launchTestSuite) TestLaunch() {
+ launcher := &launcher{
+ projectMgr: l.projectMgr,
+ repositoryMgr: l.repositoryMgr,
+ retentionMgr: l.retentionMgr,
+ jobserviceClient: l.jobserviceClient,
+ chartServerEnabled: true,
+ }
+
+ var ply *policy.Metadata
+ // nil policy
+ n, err := launcher.Launch(ply, 1, false)
+ require.NotNil(l.T(), err)
+
+ // nil rules
+ ply = &policy.Metadata{}
+ n, err = launcher.Launch(ply, 1, false)
+ require.Nil(l.T(), err)
+ assert.Equal(l.T(), int64(0), n)
+
+ // nil scope
+ ply = &policy.Metadata{
+ Rules: []rule.Metadata{
+ {},
+ },
+ }
+ _, err = launcher.Launch(ply, 1, false)
+ require.NotNil(l.T(), err)
+
+ // system scope
+ ply = &policy.Metadata{
+ Scope: &policy.Scope{
+ Level: "system",
+ },
+ Rules: []rule.Metadata{
+ {
+ ScopeSelectors: map[string][]*rule.Selector{
+ "project": {
+ {
+ Kind: "doublestar",
+ Decoration: "nsMatches",
+ Pattern: "library",
+ },
+ },
+ "repository": {
+ {
+ Kind: "doublestar",
+ Decoration: "repoMatches",
+ Pattern: "**",
+ },
+ },
+ },
+ },
+ {
+ Disabled: true,
+ ScopeSelectors: map[string][]*rule.Selector{
+ "project": {
+ {
+ Kind: "doublestar",
+ Decoration: "nsMatches",
+ Pattern: "library1",
+ },
+ },
+ "repository": {
+ {
+ Kind: "doublestar",
+ Decoration: "repoMatches",
+ Pattern: "**",
+ },
+ },
+ },
+ },
+ },
+ }
+ n, err = launcher.Launch(ply, 1, false)
+ require.Nil(l.T(), err)
+ assert.Equal(l.T(), int64(2), n)
+}
+
+func (l *launchTestSuite) TestStop() {
+ t := l.T()
+ launcher := &launcher{
+ projectMgr: l.projectMgr,
+ repositoryMgr: l.repositoryMgr,
+ retentionMgr: l.retentionMgr,
+ jobserviceClient: l.jobserviceClient,
+ }
+ // invalid execution ID
+ err := launcher.Stop(0)
+ require.NotNil(t, err)
+
+ err = launcher.Stop(1)
+ require.Nil(t, err)
+}
+
+func TestLaunchTestSuite(t *testing.T) {
+ suite.Run(t, new(launchTestSuite))
+}
diff --git a/src/pkg/retention/manager.go b/src/pkg/retention/manager.go
new file mode 100644
index 000000000..c93f92256
--- /dev/null
+++ b/src/pkg/retention/manager.go
@@ -0,0 +1,310 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/astaxie/beego/orm"
+ cjob "github.com/goharbor/harbor/src/common/job"
+ "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/retention/dao"
+ "github.com/goharbor/harbor/src/pkg/retention/dao/models"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+)
+
+// Manager defines operations of managing policy
+type Manager interface {
+ // Create new policy and return ID
+ CreatePolicy(p *policy.Metadata) (int64, error)
+ // Update the existing policy
+ // Full update
+ UpdatePolicy(p *policy.Metadata) error
+ // Delete the specified policy
+ // No actual use so far
+ DeletePolicyAndExec(ID int64) error
+ // Get the specified policy
+ GetPolicy(ID int64) (*policy.Metadata, error)
+ // Create a new retention execution
+ CreateExecution(execution *Execution) (int64, error)
+ // Delete a new retention execution
+ DeleteExecution(int64) error
+ // Get the specified execution
+ GetExecution(eid int64) (*Execution, error)
+ // List executions
+ ListExecutions(policyID int64, query *q.Query) ([]*Execution, error)
+ // GetTotalOfRetentionExecs Count Retention Executions
+ GetTotalOfRetentionExecs(policyID int64) (int64, error)
+ // List tasks histories
+ ListTasks(query ...*q.TaskQuery) ([]*Task, error)
+ // GetTotalOfTasks Count Tasks
+ GetTotalOfTasks(executionID int64) (int64, error)
+ // Create a new retention task
+ CreateTask(task *Task) (int64, error)
+ // Update the specified task
+ UpdateTask(task *Task, cols ...string) error
+ // Update the status of the specified task
+ // The status is updated only when it is behind the one stored
+ // in the database.
+ // e.g. if the status is running but the status stored
+ // in database is failed, the updating doesn't take effect
+ UpdateTaskStatus(taskID int64, status string) error
+ // Get the task specified by the task ID
+ GetTask(taskID int64) (*Task, error)
+ // Get the log of the specified task
+ GetTaskLog(taskID int64) ([]byte, error)
+}
+
+// DefaultManager ...
+type DefaultManager struct {
+}
+
+// CreatePolicy Create Policy
+func (d *DefaultManager) CreatePolicy(p *policy.Metadata) (int64, error) {
+ p1 := &models.RetentionPolicy{}
+ p1.ScopeLevel = p.Scope.Level
+ p1.ScopeReference = p.Scope.Reference
+ p1.TriggerKind = p.Trigger.Kind
+ data, _ := json.Marshal(p)
+ p1.Data = string(data)
+ p1.CreateTime = time.Now()
+ p1.UpdateTime = p1.CreateTime
+ return dao.CreatePolicy(p1)
+}
+
+// UpdatePolicy Update Policy
+func (d *DefaultManager) UpdatePolicy(p *policy.Metadata) error {
+ p1 := &models.RetentionPolicy{}
+ p1.ID = p.ID
+ p1.ScopeLevel = p.Scope.Level
+ p1.ScopeReference = p.Scope.Reference
+ p1.TriggerKind = p.Trigger.Kind
+ p.ID = 0
+ data, _ := json.Marshal(p)
+ p.ID = p1.ID
+ p1.Data = string(data)
+ p1.UpdateTime = time.Now()
+ return dao.UpdatePolicy(p1, "scope_level", "trigger_kind", "data", "update_time")
+}
+
+// DeletePolicyAndExec Delete Policy
+func (d *DefaultManager) DeletePolicyAndExec(id int64) error {
+ return dao.DeletePolicyAndExec(id)
+}
+
+// GetPolicy Get Policy
+func (d *DefaultManager) GetPolicy(id int64) (*policy.Metadata, error) {
+ p1, err := dao.GetPolicy(id)
+ if err != nil {
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+ p := &policy.Metadata{}
+ if err = json.Unmarshal([]byte(p1.Data), p); err != nil {
+ return nil, err
+ }
+ p.ID = id
+ if p.Trigger.Settings != nil {
+ if _, ok := p.Trigger.References[policy.TriggerReferencesJobid]; ok {
+ p.Trigger.References[policy.TriggerReferencesJobid] = int64(p.Trigger.References[policy.TriggerReferencesJobid].(float64))
+ }
+ }
+ return p, nil
+}
+
+// CreateExecution Create Execution
+func (d *DefaultManager) CreateExecution(execution *Execution) (int64, error) {
+ exec := &models.RetentionExecution{}
+ exec.PolicyID = execution.PolicyID
+ exec.StartTime = time.Now()
+ exec.DryRun = execution.DryRun
+ exec.Trigger = execution.Trigger
+ return dao.CreateExecution(exec)
+}
+
+// DeleteExecution Delete Execution
+func (d *DefaultManager) DeleteExecution(eid int64) error {
+ return dao.DeleteExecution(eid)
+}
+
+// ListExecutions List Executions
+func (d *DefaultManager) ListExecutions(policyID int64, query *q.Query) ([]*Execution, error) {
+ execs, err := dao.ListExecutions(policyID, query)
+ if err != nil {
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+ var execs1 []*Execution
+ for _, e := range execs {
+ e1 := &Execution{}
+ e1.ID = e.ID
+ e1.PolicyID = e.PolicyID
+ e1.Status = e.Status
+ e1.StartTime = e.StartTime
+ e1.EndTime = e.EndTime
+ e1.DryRun = e.DryRun
+ execs1 = append(execs1, e1)
+ }
+ return execs1, nil
+}
+
+// GetTotalOfRetentionExecs Count Executions
+func (d *DefaultManager) GetTotalOfRetentionExecs(policyID int64) (int64, error) {
+ return dao.GetTotalOfRetentionExecs(policyID)
+}
+
+// GetExecution Get Execution
+func (d *DefaultManager) GetExecution(eid int64) (*Execution, error) {
+ e, err := dao.GetExecution(eid)
+ if err != nil {
+ return nil, err
+ }
+ e1 := &Execution{}
+ e1.ID = e.ID
+ e1.PolicyID = e.PolicyID
+ e1.Status = e.Status
+ e1.StartTime = e.StartTime
+ e1.EndTime = e.EndTime
+ e1.DryRun = e.DryRun
+ return e1, nil
+}
+
+// CreateTask creates task record
+func (d *DefaultManager) CreateTask(task *Task) (int64, error) {
+ if task == nil {
+ return 0, errors.New("nil task")
+ }
+ t := &models.RetentionTask{
+ ExecutionID: task.ExecutionID,
+ Repository: task.Repository,
+ JobID: task.JobID,
+ Status: task.Status,
+ StartTime: task.StartTime,
+ EndTime: task.EndTime,
+ Total: task.Total,
+ Retained: task.Retained,
+ }
+ return dao.CreateTask(t)
+}
+
+// ListTasks lists tasks according to the query
+func (d *DefaultManager) ListTasks(query ...*q.TaskQuery) ([]*Task, error) {
+ ts, err := dao.ListTask(query...)
+ if err != nil {
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+ tasks := make([]*Task, 0)
+ for _, t := range ts {
+ tasks = append(tasks, &Task{
+ ID: t.ID,
+ ExecutionID: t.ExecutionID,
+ Repository: t.Repository,
+ JobID: t.JobID,
+ Status: t.Status,
+ StatusCode: t.StatusCode,
+ StartTime: t.StartTime,
+ EndTime: t.EndTime,
+ Total: t.Total,
+ Retained: t.Retained,
+ })
+ }
+ return tasks, nil
+}
+
+// GetTotalOfTasks Count tasks
+func (d *DefaultManager) GetTotalOfTasks(executionID int64) (int64, error) {
+ return dao.GetTotalOfTasks(executionID)
+}
+
+// UpdateTask updates the task
+func (d *DefaultManager) UpdateTask(task *Task, cols ...string) error {
+ if task == nil {
+ return errors.New("nil task")
+ }
+ if task.ID <= 0 {
+ return fmt.Errorf("invalid task ID: %d", task.ID)
+ }
+ return dao.UpdateTask(&models.RetentionTask{
+ ID: task.ID,
+ ExecutionID: task.ExecutionID,
+ Repository: task.Repository,
+ JobID: task.JobID,
+ Status: task.Status,
+ StartTime: task.StartTime,
+ EndTime: task.EndTime,
+ Total: task.Total,
+ Retained: task.Retained,
+ }, cols...)
+}
+
+// UpdateTaskStatus updates the status of the specified task
+func (d *DefaultManager) UpdateTaskStatus(taskID int64, status string) error {
+ if taskID <= 0 {
+ return fmt.Errorf("invalid task ID: %d", taskID)
+ }
+ st := job.Status(status)
+ return dao.UpdateTaskStatus(taskID, status, st.Code())
+}
+
+// GetTask returns the task specified by task ID
+func (d *DefaultManager) GetTask(taskID int64) (*Task, error) {
+ if taskID <= 0 {
+ return nil, fmt.Errorf("invalid task ID: %d", taskID)
+ }
+ task, err := dao.GetTask(taskID)
+ if err != nil {
+ return nil, err
+ }
+ return &Task{
+ ID: task.ID,
+ ExecutionID: task.ExecutionID,
+ Repository: task.Repository,
+ JobID: task.JobID,
+ Status: task.Status,
+ StatusCode: task.StatusCode,
+ StartTime: task.StartTime,
+ EndTime: task.EndTime,
+ Total: task.Total,
+ Retained: task.Retained,
+ }, nil
+}
+
+// GetTaskLog gets the logs of task
+func (d *DefaultManager) GetTaskLog(taskID int64) ([]byte, error) {
+ task, err := d.GetTask(taskID)
+ if err != nil {
+ return nil, err
+ }
+ if task == nil {
+ return nil, fmt.Errorf("task %d not found", taskID)
+ }
+ return cjob.GlobalClient.GetJobLog(task.JobID)
+}
+
+// NewManager ...
+func NewManager() Manager {
+ return &DefaultManager{}
+}
diff --git a/src/pkg/retention/manager_test.go b/src/pkg/retention/manager_test.go
new file mode 100644
index 000000000..cbcbf5f10
--- /dev/null
+++ b/src/pkg/retention/manager_test.go
@@ -0,0 +1,221 @@
+package retention
+
+import (
+ "os"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/job"
+ jjob "github.com/goharbor/harbor/src/jobservice/job"
+ "github.com/goharbor/harbor/src/pkg/retention/policy"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/q"
+ tjob "github.com/goharbor/harbor/src/testing/job"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+ os.Exit(m.Run())
+}
+
+func TestPolicy(t *testing.T) {
+ m := NewManager()
+ p1 := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+
+ id, err := m.CreatePolicy(p1)
+ assert.Nil(t, err)
+ assert.True(t, id > 0)
+
+ p1, err = m.GetPolicy(id)
+ assert.Nil(t, err)
+ assert.EqualValues(t, "project", p1.Scope.Level)
+ assert.True(t, p1.ID > 0)
+
+ p1.Scope.Level = "test"
+ err = m.UpdatePolicy(p1)
+ assert.Nil(t, err)
+ p1, err = m.GetPolicy(id)
+ assert.Nil(t, err)
+ assert.EqualValues(t, "test", p1.Scope.Level)
+
+ err = m.DeletePolicyAndExec(id)
+ assert.Nil(t, err)
+
+ p1, err = m.GetPolicy(id)
+ assert.Nil(t, err)
+ assert.Nil(t, p1)
+}
+
+func TestExecution(t *testing.T) {
+ m := NewManager()
+ p1 := &policy.Metadata{
+ Algorithm: "or",
+ Rules: []rule.Metadata{
+ {
+ ID: 1,
+ Priority: 1,
+ Template: "recentXdays",
+ Parameters: rule.Parameters{
+ "num": 10,
+ },
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: "label",
+ Decoration: "with",
+ Pattern: "latest",
+ },
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: "release-[\\d\\.]+",
+ },
+ },
+ ScopeSelectors: map[string][]*rule.Selector{
+ "repository": {
+ {
+ Kind: "regularExpression",
+ Decoration: "matches",
+ Pattern: ".+",
+ },
+ },
+ },
+ },
+ },
+ Trigger: &policy.Trigger{
+ Kind: "Schedule",
+ Settings: map[string]interface{}{
+ "cron": "* 22 11 * * *",
+ },
+ },
+ Scope: &policy.Scope{
+ Level: "project",
+ Reference: 1,
+ },
+ }
+
+ policyID, err := m.CreatePolicy(p1)
+ assert.Nil(t, err)
+ assert.True(t, policyID > 0)
+
+ e1 := &Execution{
+ PolicyID: policyID,
+ StartTime: time.Now(),
+ Trigger: ExecutionTriggerManual,
+ DryRun: false,
+ }
+ id, err := m.CreateExecution(e1)
+ assert.Nil(t, err)
+ assert.True(t, id > 0)
+
+ e1, err = m.GetExecution(id)
+ assert.Nil(t, err)
+ assert.NotNil(t, e1)
+ assert.EqualValues(t, id, e1.ID)
+
+ es, err := m.ListExecutions(policyID, nil)
+ assert.Nil(t, err)
+ assert.EqualValues(t, 1, len(es))
+
+ err = m.DeleteExecution(id)
+ assert.Nil(t, err)
+}
+
+func TestTask(t *testing.T) {
+ m := NewManager()
+ task := &Task{
+ ExecutionID: 1,
+ JobID: "1",
+ Status: jjob.PendingStatus.String(),
+ StatusCode: jjob.PendingStatus.Code(),
+ Total: 0,
+ StartTime: time.Now(),
+ }
+ // create
+ id, err := m.CreateTask(task)
+ require.Nil(t, err)
+
+ // get
+ tk, err := m.GetTask(id)
+ require.Nil(t, err)
+ assert.EqualValues(t, 1, tk.ExecutionID)
+
+ // update
+ task.ID = id
+ task.Total = 1
+ err = m.UpdateTask(task, "Total")
+ require.Nil(t, err)
+
+ // update status to success which is a final status
+ err = m.UpdateTaskStatus(id, jjob.SuccessStatus.String())
+ require.Nil(t, err)
+
+ // try to update status to running, as the status has already
+ // been updated to a final status, this updating shouldn't take effect
+ err = m.UpdateTaskStatus(id, jjob.RunningStatus.String())
+ require.Nil(t, err)
+
+ // list
+ tasks, err := m.ListTasks(&q.TaskQuery{
+ ExecutionID: 1,
+ })
+ require.Nil(t, err)
+ require.Equal(t, 1, len(tasks))
+ assert.Equal(t, int64(1), tasks[0].ExecutionID)
+ assert.Equal(t, 1, tasks[0].Total)
+ assert.Equal(t, jjob.SuccessStatus.String(), tasks[0].Status)
+ assert.Equal(t, jjob.SuccessStatus.Code(), tasks[0].StatusCode)
+
+ // get task log
+ job.GlobalClient = &tjob.MockJobClient{
+ JobUUID: []string{"1"},
+ }
+ data, err := m.GetTaskLog(task.ID)
+ require.Nil(t, err)
+ assert.Equal(t, "some log", string(data))
+}
diff --git a/src/pkg/retention/models.go b/src/pkg/retention/models.go
new file mode 100644
index 000000000..1e4219937
--- /dev/null
+++ b/src/pkg/retention/models.go
@@ -0,0 +1,69 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package retention
+
+import "time"
+
+// const definitions
+const (
+ ExecutionStatusInProgress string = "InProgress"
+ ExecutionStatusSucceed string = "Succeed"
+ ExecutionStatusFailed string = "Failed"
+ ExecutionStatusStopped string = "Stopped"
+
+ CandidateKindImage string = "image"
+ CandidateKindChart string = "chart"
+
+ ExecutionTriggerManual string = "Manual"
+ ExecutionTriggerSchedule string = "Schedule"
+)
+
+// Execution of retention
+type Execution struct {
+ ID int64 `json:"id"`
+ PolicyID int64 `json:"policy_id"`
+ StartTime time.Time `json:"start_time"`
+ EndTime time.Time `json:"end_time,omitempty"`
+ Status string `json:"status"`
+ Trigger string `json:"Trigger"`
+ DryRun bool `json:"dry_run"`
+}
+
+// Task of retention
+type Task struct {
+ ID int64 `json:"id"`
+ ExecutionID int64 `json:"execution_id"`
+ Repository string `json:"repository"`
+ JobID string `json:"job_id"`
+ Status string `json:"status"`
+ StatusCode int `json:"status_code"`
+ StartTime time.Time `json:"start_time"`
+ EndTime time.Time `json:"end_time"`
+ Total int `json:"total"`
+ Retained int `json:"retained"`
+}
+
+// History of retention
+type History struct {
+ ID int64 `json:"id,omitempty"`
+ ExecutionID int64 `json:"execution_id"`
+ Rule struct {
+ ID int `json:"id"`
+ DisplayText string `json:"display_text"`
+ } `json:"rule_id"`
+ // full path: :ns/:repo:tag
+ Artifact string `json:"tag"`
+ Timestamp time.Time `json:"timestamp"`
+}
diff --git a/src/pkg/retention/policy/action/index/index.go b/src/pkg/retention/policy/action/index/index.go
new file mode 100644
index 000000000..3b371d5df
--- /dev/null
+++ b/src/pkg/retention/policy/action/index/index.go
@@ -0,0 +1,59 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+ "sync"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/pkg/errors"
+)
+
+// index for keeping the mapping action and its performer
+var index sync.Map
+
+func init() {
+ // Register retain action
+ Register(action.Retain, action.NewRetainAction)
+}
+
+// Register the performer with the corresponding action
+func Register(action string, factory action.PerformerFactory) {
+ if len(action) == 0 || factory == nil {
+ // do nothing
+ return
+ }
+
+ index.Store(action, factory)
+}
+
+// Get performer with the provided action
+func Get(act string, params interface{}, isDryRun bool) (action.Performer, error) {
+ if len(act) == 0 {
+ return nil, errors.New("empty action")
+ }
+
+ v, ok := index.Load(act)
+ if !ok {
+ return nil, errors.Errorf("action %s is not registered", act)
+ }
+
+ factory, ok := v.(action.PerformerFactory)
+ if !ok {
+ return nil, errors.Errorf("invalid action performer registered for action %s", act)
+ }
+
+ return factory(params, isDryRun), nil
+}
diff --git a/src/pkg/retention/policy/action/index/index_test.go b/src/pkg/retention/policy/action/index/index_test.go
new file mode 100644
index 000000000..f9d4f57e5
--- /dev/null
+++ b/src/pkg/retention/policy/action/index/index_test.go
@@ -0,0 +1,95 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+// IndexTestSuite tests the rule index
+type IndexTestSuite struct {
+ suite.Suite
+
+ candidates []*res.Candidate
+}
+
+// TestIndexEntry is entry of IndexTestSuite
+func TestIndexEntry(t *testing.T) {
+ suite.Run(t, new(IndexTestSuite))
+}
+
+// SetupSuite ...
+func (suite *IndexTestSuite) SetupSuite() {
+ Register("fakeAction", newFakePerformer)
+
+ suite.candidates = []*res.Candidate{{
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L1", "L2"},
+ }}
+}
+
+// TestRegister tests register
+func (suite *IndexTestSuite) TestGet() {
+ p, err := Get("fakeAction", nil, false)
+ require.NoError(suite.T(), err)
+ require.NotNil(suite.T(), p)
+
+ results, err := p.Perform(suite.candidates)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(results))
+ assert.Condition(suite.T(), func() (success bool) {
+ r := results[0]
+ success = r.Target != nil &&
+ r.Error == nil &&
+ r.Target.Repository == "harbor" &&
+ r.Target.Tag == "latest"
+
+ return
+ })
+}
+
+type fakePerformer struct {
+ parameters interface{}
+ isDryRun bool
+}
+
+// Perform the artifacts
+func (p *fakePerformer) Perform(candidates []*res.Candidate) (results []*res.Result, err error) {
+ for _, c := range candidates {
+ results = append(results, &res.Result{
+ Target: c,
+ })
+ }
+
+ return
+}
+
+func newFakePerformer(params interface{}, isDryRun bool) action.Performer {
+ return &fakePerformer{
+ parameters: params,
+ isDryRun: isDryRun,
+ }
+}
diff --git a/src/pkg/retention/policy/action/performer.go b/src/pkg/retention/policy/action/performer.go
new file mode 100644
index 000000000..72d34d612
--- /dev/null
+++ b/src/pkg/retention/policy/action/performer.go
@@ -0,0 +1,94 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package action
+
+import (
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // Retain artifacts
+ Retain = "retain"
+)
+
+// Performer performs the related actions targeting the candidates
+type Performer interface {
+ // Perform the action
+ //
+ // Arguments:
+ // candidates []*res.Candidate : the targets to perform
+ //
+ // Returns:
+ // []*res.Result : result infos
+ // error : common error if any errors occurred
+ Perform(candidates []*res.Candidate) ([]*res.Result, error)
+}
+
+// PerformerFactory is factory method for creating Performer
+type PerformerFactory func(params interface{}, isDryRun bool) Performer
+
+// retainAction make sure all the candidates will be retained and others will be cleared
+type retainAction struct {
+ all []*res.Candidate
+ // Indicate if it is a dry run
+ isDryRun bool
+}
+
+// Perform the action
+func (ra *retainAction) Perform(candidates []*res.Candidate) (results []*res.Result, err error) {
+ retained := make(map[string]bool)
+ for _, c := range candidates {
+ retained[c.Hash()] = true
+ }
+
+ // start to delete
+ if len(ra.all) > 0 {
+ for _, art := range ra.all {
+ if _, ok := retained[art.Hash()]; !ok {
+ result := &res.Result{
+ Target: art,
+ }
+
+ if !ra.isDryRun {
+ if err := dep.DefaultClient.Delete(art); err != nil {
+ result.Error = err
+ }
+ }
+
+ results = append(results, result)
+ }
+ }
+ }
+
+ return
+}
+
+// NewRetainAction is factory method for RetainAction
+func NewRetainAction(params interface{}, isDryRun bool) Performer {
+ if params != nil {
+ if all, ok := params.([]*res.Candidate); ok {
+ return &retainAction{
+ all: all,
+ isDryRun: isDryRun,
+ }
+ }
+ }
+
+ return &retainAction{
+ all: make([]*res.Candidate, 0),
+ isDryRun: isDryRun,
+ }
+}
diff --git a/src/pkg/retention/policy/action/performer_test.go b/src/pkg/retention/policy/action/performer_test.go
new file mode 100644
index 000000000..2f6a6be15
--- /dev/null
+++ b/src/pkg/retention/policy/action/performer_test.go
@@ -0,0 +1,112 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package action
+
+import (
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+// TestPerformerSuite tests the performer related function
+type TestPerformerSuite struct {
+ suite.Suite
+
+ oldClient dep.Client
+ all []*res.Candidate
+}
+
+// TestPerformer is the entry of the TestPerformerSuite
+func TestPerformer(t *testing.T) {
+ suite.Run(t, new(TestPerformerSuite))
+}
+
+// SetupSuite ...
+func (suite *TestPerformerSuite) SetupSuite() {
+ suite.all = []*res.Candidate{
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L1", "L2"},
+ },
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "dev",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L3"},
+ },
+ }
+
+ suite.oldClient = dep.DefaultClient
+ dep.DefaultClient = &fakeRetentionClient{}
+}
+
+// TearDownSuite ...
+func (suite *TestPerformerSuite) TearDownSuite() {
+ dep.DefaultClient = suite.oldClient
+}
+
+// TestPerform tests Perform action
+func (suite *TestPerformerSuite) TestPerform() {
+ p := &retainAction{
+ all: suite.all,
+ }
+
+ candidates := []*res.Candidate{
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L1", "L2"},
+ },
+ }
+
+ results, err := p.Perform(candidates)
+ require.NoError(suite.T(), err)
+ require.Equal(suite.T(), 1, len(results))
+ require.NotNil(suite.T(), results[0].Target)
+ assert.NoError(suite.T(), results[0].Error)
+ assert.Equal(suite.T(), "dev", results[0].Target.Tag)
+}
+
+type fakeRetentionClient struct{}
+
+// GetCandidates ...
+func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) {
+ return nil, errors.New("not implemented")
+}
+
+// Delete ...
+func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error {
+ return nil
+}
+
+// DeleteRepository ...
+func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error {
+ panic("implement me")
+}
diff --git a/src/pkg/retention/policy/alg/index/index.go b/src/pkg/retention/policy/alg/index/index.go
new file mode 100644
index 000000000..ad5a6a7f2
--- /dev/null
+++ b/src/pkg/retention/policy/alg/index/index.go
@@ -0,0 +1,57 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+ "sync"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/alg"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/alg/or"
+ "github.com/pkg/errors"
+)
+
+const (
+ // AlgorithmOR for || algorithm
+ AlgorithmOR = "or"
+)
+
+// index for keeping the mapping between algorithm and its processor
+var index sync.Map
+
+func init() {
+ // Register or
+ Register(AlgorithmOR, or.New)
+}
+
+// Register processor with the algorithm
+func Register(algorithm string, processor alg.Factory) {
+ if len(algorithm) > 0 && processor != nil {
+ index.Store(algorithm, processor)
+ }
+}
+
+// Get Processor
+func Get(algorithm string, params []*alg.Parameter) (alg.Processor, error) {
+ v, ok := index.Load(algorithm)
+ if !ok {
+ return nil, errors.Errorf("no processor registered with algorithm: %s", algorithm)
+ }
+
+ if fac, ok := v.(alg.Factory); ok {
+ return fac(params), nil
+ }
+
+ return nil, errors.Errorf("no valid processor registered for algorithm: %s", algorithm)
+}
diff --git a/src/pkg/retention/policy/alg/or/processor.go b/src/pkg/retention/policy/alg/or/processor.go
new file mode 100644
index 000000000..623e4f050
--- /dev/null
+++ b/src/pkg/retention/policy/alg/or/processor.go
@@ -0,0 +1,216 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package or
+
+import (
+ "sync"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/alg"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/pkg/errors"
+)
+
+// processor to handle the rules with OR mapping ways
+type processor struct {
+ // keep evaluator and its related selector if existing
+ // attentions here, the selectors can be empty/nil, that means match all "**"
+ evaluators map[*rule.Evaluator][]res.Selector
+ // action performer
+ performers map[string]action.Performer
+}
+
+// New processor
+func New(parameters []*alg.Parameter) alg.Processor {
+ p := &processor{
+ evaluators: make(map[*rule.Evaluator][]res.Selector),
+ performers: make(map[string]action.Performer),
+ }
+
+ if len(parameters) > 0 {
+ for _, param := range parameters {
+ if param.Evaluator != nil {
+ if len(param.Selectors) > 0 {
+ p.evaluators[¶m.Evaluator] = param.Selectors
+ }
+
+ if param.Performer != nil {
+ p.performers[param.Evaluator.Action()] = param.Performer
+ }
+ }
+ }
+ }
+
+ return p
+}
+
+// Process the candidates with the rules
+func (p *processor) Process(artifacts []*res.Candidate) ([]*res.Result, error) {
+ if len(artifacts) == 0 {
+ log.Debug("no artifacts to retention")
+ return make([]*res.Result, 0), nil
+ }
+
+ var (
+ // collect errors by wrapping
+ err error
+ // collect processed candidates
+ processedCandidates = make(map[string]cHash)
+ )
+
+ // for sync
+ type chanItem struct {
+ action string
+ processed []*res.Candidate
+ }
+
+ resChan := make(chan *chanItem, 1)
+ // handle error
+ errChan := make(chan error, 1)
+ // control chan
+ done := make(chan bool, 1)
+
+ // go routine for receiving results/error
+ go func() {
+ defer func() {
+ // done
+ done <- true
+ }()
+
+ for {
+ select {
+ case result := <-resChan:
+ if result == nil {
+ // chan is closed
+ return
+ }
+
+ if _, ok := processedCandidates[result.action]; !ok {
+ processedCandidates[result.action] = make(cHash)
+ }
+
+ listByAction := processedCandidates[result.action]
+ for _, rp := range result.processed {
+ // remove duplicated ones
+ listByAction[rp.Hash()] = rp
+ }
+ case e := <-errChan:
+ if err == nil {
+ err = errors.Wrap(e, "artifact processing error")
+ } else {
+ err = errors.Wrap(e, err.Error())
+ }
+ }
+ }
+ }()
+
+ wg := new(sync.WaitGroup)
+ wg.Add(len(p.evaluators))
+
+ for eva, selectors := range p.evaluators {
+ var evaluator = *eva
+
+ go func(evaluator rule.Evaluator, selectors []res.Selector) {
+ var (
+ processed []*res.Candidate
+ err error
+ )
+
+ defer func() {
+ wg.Done()
+ }()
+
+ // init
+ // pass array copy to the selector
+ processed = append(processed, artifacts...)
+
+ if len(selectors) > 0 {
+ // selecting artifacts one by one
+ // `&&` mappings
+ for _, s := range selectors {
+ if processed, err = s.Select(processed); err != nil {
+ errChan <- err
+ return
+ }
+ }
+ }
+
+ if processed, err = evaluator.Process(processed); err != nil {
+ errChan <- err
+ return
+ }
+
+ // Pass to the outside
+ resChan <- &chanItem{
+ action: evaluator.Action(),
+ processed: processed,
+ }
+ }(evaluator, selectors)
+ }
+
+ // waiting for all the rules are evaluated
+ wg.Wait()
+ // close result chan
+ close(resChan)
+ // check if the receiving loop exists
+ <-done
+
+ if err != nil {
+ return nil, err
+ }
+
+ results := make([]*res.Result, 0)
+ // Perform actions
+ for act, hash := range processedCandidates {
+ var attachedErr error
+
+ cl := hash.toList()
+
+ if pf, ok := p.performers[act]; ok {
+ if theRes, err := pf.Perform(cl); err != nil {
+ attachedErr = err
+ } else {
+ results = append(results, theRes...)
+ }
+ } else {
+ attachedErr = errors.Errorf("no performer added for action %s in OR processor", act)
+ }
+
+ if attachedErr != nil {
+ for _, c := range cl {
+ results = append(results, &res.Result{
+ Target: c,
+ Error: attachedErr,
+ })
+ }
+ }
+ }
+
+ return results, nil
+}
+
+type cHash map[string]*res.Candidate
+
+func (ch cHash) toList() []*res.Candidate {
+ l := make([]*res.Candidate, 0)
+
+ for _, v := range ch {
+ l = append(l, v)
+ }
+
+ return l
+}
diff --git a/src/pkg/retention/policy/alg/or/processor_test.go b/src/pkg/retention/policy/alg/or/processor_test.go
new file mode 100644
index 000000000..d37a602c9
--- /dev/null
+++ b/src/pkg/retention/policy/alg/or/processor_test.go
@@ -0,0 +1,176 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package or
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/alg"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/always"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/lastx"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar"
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+// ProcessorTestSuite is suite for testing processor
+type ProcessorTestSuite struct {
+ suite.Suite
+
+ all []*res.Candidate
+
+ oldClient dep.Client
+}
+
+// TestProcessor is entrance for ProcessorTestSuite
+func TestProcessor(t *testing.T) {
+ suite.Run(t, new(ProcessorTestSuite))
+}
+
+// SetupSuite ...
+func (suite *ProcessorTestSuite) SetupSuite() {
+ suite.all = []*res.Candidate{
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L1", "L2"},
+ },
+ {
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "dev",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L3"},
+ },
+ }
+
+ suite.oldClient = dep.DefaultClient
+ dep.DefaultClient = &fakeRetentionClient{}
+}
+
+// TearDownSuite ...
+func (suite *ProcessorTestSuite) TearDownSuite() {
+ dep.DefaultClient = suite.oldClient
+}
+
+// TestProcess tests process method
+func (suite *ProcessorTestSuite) TestProcess() {
+
+ perf := action.NewRetainAction(suite.all, false)
+
+ params := make([]*alg.Parameter, 0)
+ lastxParams := make(map[string]rule.Parameter)
+ lastxParams[lastx.ParameterX] = 10
+ params = append(params, &alg.Parameter{
+ Evaluator: lastx.New(lastxParams),
+ Selectors: []res.Selector{
+ doublestar.New(doublestar.Matches, "*dev*"),
+ label.New(label.With, "L1,L2"),
+ },
+ Performer: perf,
+ })
+
+ latestKParams := make(map[string]rule.Parameter)
+ latestKParams[latestps.ParameterK] = 10
+ params = append(params, &alg.Parameter{
+ Evaluator: latestps.New(latestKParams),
+ Selectors: []res.Selector{
+ label.New(label.With, "L3"),
+ },
+ Performer: perf,
+ })
+
+ p := New(params)
+
+ results, err := p.Process(suite.all)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(results))
+ assert.Condition(suite.T(), func() bool {
+ for _, r := range results {
+ if r.Error != nil {
+ return false
+ }
+ }
+
+ return true
+ }, "no errors in the returned result list")
+}
+
+// TestProcess2 ...
+func (suite *ProcessorTestSuite) TestProcess2() {
+ perf := action.NewRetainAction(suite.all, false)
+
+ params := make([]*alg.Parameter, 0)
+ alwaysParams := make(map[string]rule.Parameter)
+ params = append(params, &alg.Parameter{
+ Evaluator: always.New(alwaysParams),
+ Selectors: []res.Selector{
+ doublestar.New(doublestar.Matches, "latest"),
+ label.New(label.With, ""),
+ },
+ Performer: perf,
+ })
+
+ p := New(params)
+
+ results, err := p.Process(suite.all)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(results))
+ assert.Condition(suite.T(), func() bool {
+ found := false
+ for _, r := range results {
+ if r.Error != nil {
+ return false
+ }
+
+ if r.Target.Tag == "dev" {
+ found = true
+ }
+ }
+
+ return found
+ }, "no errors in the returned result list")
+
+}
+
+type fakeRetentionClient struct{}
+
+// GetCandidates ...
+func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) {
+ return nil, errors.New("not implemented")
+}
+
+// Delete ...
+func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error {
+ return nil
+}
+
+// DeleteRepository ...
+func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error {
+ panic("implement me")
+}
diff --git a/src/pkg/retention/policy/alg/processor.go b/src/pkg/retention/policy/alg/processor.go
new file mode 100644
index 000000000..4f7103a5f
--- /dev/null
+++ b/src/pkg/retention/policy/alg/processor.go
@@ -0,0 +1,52 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package alg
+
+import (
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+// Processor processing the whole policy targeting a repository.
+// Methods are defined to reflect the standard structure of the policy:
+// list of rules with corresponding selectors plus an action performer.
+type Processor interface {
+ // Process the artifact candidates
+ //
+ // Arguments:
+ // artifacts []*res.Candidate : process the retention candidates
+ //
+ // Returns:
+ // []*res.Result : the processed results
+ // error : common error object if any errors occurred
+ Process(artifacts []*res.Candidate) ([]*res.Result, error)
+}
+
+// Parameter for constructing a processor
+// Represents one rule
+type Parameter struct {
+ // Evaluator for the rule
+ Evaluator rule.Evaluator
+
+ // Selectors for the rule
+ Selectors []res.Selector
+
+ // Performer for the rule evaluator
+ Performer action.Performer
+}
+
+// Factory for creating processor
+type Factory func([]*Parameter) Processor
diff --git a/src/pkg/retention/policy/builder.go b/src/pkg/retention/policy/builder.go
new file mode 100644
index 000000000..88443fb6b
--- /dev/null
+++ b/src/pkg/retention/policy/builder.go
@@ -0,0 +1,102 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "fmt"
+
+ index4 "github.com/goharbor/harbor/src/pkg/retention/policy/action/index"
+
+ index3 "github.com/goharbor/harbor/src/pkg/retention/policy/alg/index"
+
+ index2 "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/index"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/alg"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/lwp"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/pkg/errors"
+)
+
+// Builder builds the runnable processor from the raw policy
+type Builder interface {
+ // Builds runnable processor
+ //
+ // Arguments:
+ // policy *Metadata : the simple metadata of retention policy
+ // isDryRun bool : indicate if we need to build a processor for dry run
+ //
+ // Returns:
+ // Processor : a processor implementation to process the candidates
+ // error : common error object if any errors occurred
+ Build(policy *lwp.Metadata, isDryRun bool) (alg.Processor, error)
+}
+
+// NewBuilder news a basic builder
+func NewBuilder(all []*res.Candidate) Builder {
+ return &basicBuilder{
+ allCandidates: all,
+ }
+}
+
+// basicBuilder is default implementation of Builder interface
+type basicBuilder struct {
+ allCandidates []*res.Candidate
+}
+
+// Build policy processor from the raw policy
+func (bb *basicBuilder) Build(policy *lwp.Metadata, isDryRun bool) (alg.Processor, error) {
+ if policy == nil {
+ return nil, errors.New("nil policy to build processor")
+ }
+
+ params := make([]*alg.Parameter, 0)
+
+ for _, r := range policy.Rules {
+ evaluator, err := index.Get(r.Template, r.Parameters)
+ if err != nil {
+ return nil, err
+ }
+
+ perf, err := index4.Get(r.Action, bb.allCandidates, isDryRun)
+ if err != nil {
+ return nil, errors.Wrap(err, "get action performer by metadata")
+ }
+
+ sl := make([]res.Selector, 0)
+ for _, s := range r.TagSelectors {
+ sel, err := index2.Get(s.Kind, s.Decoration, s.Pattern)
+ if err != nil {
+ return nil, errors.Wrap(err, "get selector by metadata")
+ }
+
+ sl = append(sl, sel)
+ }
+
+ params = append(params, &alg.Parameter{
+ Evaluator: evaluator,
+ Selectors: sl,
+ Performer: perf,
+ })
+ }
+
+ p, err := index3.Get(policy.Algorithm, params)
+ if err != nil {
+ return nil, errors.Wrap(err, fmt.Sprintf("get processor for algorithm: %s", policy.Algorithm))
+ }
+
+ return p, nil
+}
diff --git a/src/pkg/retention/policy/builder_test.go b/src/pkg/retention/policy/builder_test.go
new file mode 100644
index 000000000..fb1f4271a
--- /dev/null
+++ b/src/pkg/retention/policy/builder_test.go
@@ -0,0 +1,181 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "testing"
+ "time"
+
+ index3 "github.com/goharbor/harbor/src/pkg/retention/policy/action/index"
+
+ index2 "github.com/goharbor/harbor/src/pkg/retention/policy/alg/index"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/index"
+
+ "github.com/goharbor/harbor/src/pkg/retention/dep"
+
+ "github.com/pkg/errors"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/alg/or"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/lwp"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+
+ "github.com/stretchr/testify/suite"
+)
+
+// TestBuilderSuite is the suite to test builder
+type TestBuilderSuite struct {
+ suite.Suite
+
+ all []*res.Candidate
+ oldClient dep.Client
+}
+
+// TestBuilder is the entry of testing TestBuilderSuite
+func TestBuilder(t *testing.T) {
+ suite.Run(t, new(TestBuilderSuite))
+}
+
+// SetupSuite prepares the testing content if needed
+func (suite *TestBuilderSuite) SetupSuite() {
+ suite.all = []*res.Candidate{
+ {
+ NamespaceID: 1,
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L1", "L2"},
+ },
+ {
+ NamespaceID: 1,
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "dev",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L3"},
+ },
+ }
+
+ index2.Register(index2.AlgorithmOR, or.New)
+ index.Register(doublestar.Kind, []string{
+ doublestar.Matches,
+ doublestar.Excludes,
+ doublestar.RepoMatches,
+ doublestar.RepoExcludes,
+ doublestar.NSMatches,
+ doublestar.NSExcludes,
+ }, doublestar.New)
+ index.Register(label.Kind, []string{label.With, label.Without}, label.New)
+ index3.Register(action.Retain, action.NewRetainAction)
+
+ suite.oldClient = dep.DefaultClient
+ dep.DefaultClient = &fakeRetentionClient{}
+}
+
+// TearDownSuite ...
+func (suite *TestBuilderSuite) TearDownSuite() {
+ dep.DefaultClient = suite.oldClient
+}
+
+// TestBuild tests the Build function
+func (suite *TestBuilderSuite) TestBuild() {
+ b := &basicBuilder{suite.all}
+
+ params := make(rule.Parameters)
+ params[latestps.ParameterK] = 10
+
+ scopeSelectors := make(map[string][]*rule.Selector, 1)
+ scopeSelectors["repository"] = []*rule.Selector{{
+ Kind: doublestar.Kind,
+ Decoration: doublestar.RepoMatches,
+ Pattern: "**",
+ }}
+
+ lm := &lwp.Metadata{
+ Algorithm: AlgorithmOR,
+ Rules: []*rule.Metadata{{
+ ID: 1,
+ Priority: 999,
+ Action: action.Retain,
+ Template: latestps.TemplateID,
+ Parameters: params,
+ ScopeSelectors: scopeSelectors,
+ TagSelectors: []*rule.Selector{
+ {
+ Kind: doublestar.Kind,
+ Decoration: doublestar.Matches,
+ Pattern: "latest",
+ },
+ },
+ }},
+ }
+
+ p, err := b.Build(lm, false)
+ require.NoError(suite.T(), err)
+ require.NotNil(suite.T(), p)
+
+ results, err := p.Process(suite.all)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(results))
+ assert.Condition(suite.T(), func() (success bool) {
+ art := results[0]
+ success = art.Error == nil &&
+ art.Target != nil &&
+ art.Target.Repository == "harbor" &&
+ art.Target.Tag == "dev"
+
+ return
+ })
+}
+
+type fakeRetentionClient struct{}
+
+func (frc *fakeRetentionClient) DeleteRepository(repo *res.Repository) error {
+ panic("implement me")
+}
+
+// GetCandidates ...
+func (frc *fakeRetentionClient) GetCandidates(repo *res.Repository) ([]*res.Candidate, error) {
+ return nil, errors.New("not implemented")
+}
+
+// Delete ...
+func (frc *fakeRetentionClient) Delete(candidate *res.Candidate) error {
+ return nil
+}
+
+// SubmitTask ...
+func (frc *fakeRetentionClient) SubmitTask(taskID int64, repository *res.Repository, meta *lwp.Metadata) (string, error) {
+ return "", errors.New("not implemented")
+}
diff --git a/src/pkg/retention/policy/lwp/models.go b/src/pkg/retention/policy/lwp/models.go
new file mode 100644
index 000000000..61d48efe5
--- /dev/null
+++ b/src/pkg/retention/policy/lwp/models.go
@@ -0,0 +1,53 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package lwp = lightweight policy
+package lwp
+
+import (
+ "encoding/json"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/pkg/errors"
+)
+
+// Metadata contains partial metadata of policy
+// It's a lightweight version of policy.Metadata
+type Metadata struct {
+ // Algorithm applied to the rules
+ // "OR" / "AND"
+ Algorithm string `json:"algorithm"`
+
+ // Rule collection
+ Rules []*rule.Metadata `json:"rules"`
+}
+
+// ToJSON marshals metadata to JSON string
+func (m *Metadata) ToJSON() (string, error) {
+ jsonData, err := json.Marshal(m)
+ if err != nil {
+ return "", errors.Wrap(err, "marshal repository")
+ }
+
+ return string(jsonData), nil
+}
+
+// FromJSON constructs the metadata from json data
+func (m *Metadata) FromJSON(jsonData string) error {
+ if len(jsonData) == 0 {
+ return errors.New("empty json data to construct repository")
+ }
+
+ return json.Unmarshal([]byte(jsonData), m)
+}
diff --git a/src/pkg/retention/policy/models.go b/src/pkg/retention/policy/models.go
new file mode 100644
index 000000000..7fd48c205
--- /dev/null
+++ b/src/pkg/retention/policy/models.go
@@ -0,0 +1,98 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package policy
+
+import (
+ "github.com/astaxie/beego/validation"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+)
+
+const (
+ // AlgorithmOR for OR algorithm
+ AlgorithmOR = "or"
+
+ // TriggerKindSchedule Schedule
+ TriggerKindSchedule = "Schedule"
+
+ // TriggerReferencesJobid job_id
+ TriggerReferencesJobid = "job_id"
+ // TriggerSettingsCron cron
+ TriggerSettingsCron = "cron"
+
+ // ScopeLevelProject project
+ ScopeLevelProject = "project"
+)
+
+// Metadata of policy
+type Metadata struct {
+ // ID of the policy
+ ID int64 `json:"id"`
+
+ // Algorithm applied to the rules
+ // "OR" / "AND"
+ Algorithm string `json:"algorithm" valid:"Required;Match(or)"`
+
+ // Rule collection
+ Rules []rule.Metadata `json:"rules"`
+
+ // Trigger about how to launch the policy
+ Trigger *Trigger `json:"trigger" valid:"Required"`
+
+ // Which scope the policy will be applied to
+ Scope *Scope `json:"scope" valid:"Required"`
+
+ // The max number of rules in a policy
+ Capacity int `json:"cap"`
+}
+
+// Valid Valid
+func (m *Metadata) Valid(v *validation.Validation) {
+ if m.Trigger != nil && m.Trigger.Kind == TriggerKindSchedule {
+ if m.Trigger.Settings == nil {
+ _ = v.SetError("Trigger.Settings", "Trigger.Settings is required")
+ } else {
+ if _, ok := m.Trigger.Settings[TriggerSettingsCron]; !ok {
+ _ = v.SetError("Trigger.Settings", "cron in Trigger.Settings is required")
+ }
+ }
+
+ }
+}
+
+// Trigger of the policy
+type Trigger struct {
+ // Const string to declare the trigger type
+ // 'Schedule'
+ Kind string `json:"kind" valid:"Required"`
+
+ // Settings for the specified trigger
+ // '[cron]="* 22 11 * * *"' for the 'Schedule'
+ Settings map[string]interface{} `json:"settings" valid:"Required"`
+
+ // References of the trigger
+ // e.g: schedule job ID
+ References map[string]interface{} `json:"references"`
+}
+
+// Scope definition
+type Scope struct {
+ // Scope level declaration
+ // 'system', 'project' and 'repository'
+ Level string `json:"level" valid:"Required;Match(/^(project)$/)"`
+
+ // The reference identity for the specified level
+ // 0 for 'system', project ID for 'project' and repo ID for 'repository'
+ Reference int64 `json:"ref" valid:"Required"`
+}
diff --git a/src/pkg/retention/policy/rule/always/evaluator.go b/src/pkg/retention/policy/rule/always/evaluator.go
new file mode 100644
index 000000000..1cd4f4eb4
--- /dev/null
+++ b/src/pkg/retention/policy/rule/always/evaluator.go
@@ -0,0 +1,42 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package always
+
+import (
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of the always retain rule
+ TemplateID = "always"
+)
+
+type evaluator struct{}
+
+// Process for the "always" Evaluator simply returns the input with no error
+func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) {
+ return artifacts, nil
+}
+
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New returns an "always" Evaluator. It requires no parameters.
+func New(_ rule.Parameters) rule.Evaluator {
+ return &evaluator{}
+}
diff --git a/src/pkg/retention/policy/rule/always/evaluator_test.go b/src/pkg/retention/policy/rule/always/evaluator_test.go
new file mode 100644
index 000000000..9e7c53b77
--- /dev/null
+++ b/src/pkg/retention/policy/rule/always/evaluator_test.go
@@ -0,0 +1,49 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package always
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ sut := New(rule.Parameters{})
+
+ require.NotNil(e.T(), sut)
+ require.IsType(e.T(), &evaluator{}, sut)
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ sut := New(rule.Parameters{})
+ input := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}}
+
+ result, err := sut.Process(input)
+
+ require.NoError(e.T(), err)
+ require.Len(e.T(), result, len(input))
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator.go b/src/pkg/retention/policy/rule/dayspl/evaluator.go
new file mode 100644
index 000000000..c0fd76256
--- /dev/null
+++ b/src/pkg/retention/policy/rule/dayspl/evaluator.go
@@ -0,0 +1,70 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dayspl
+
+import (
+ "time"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of the rule
+ TemplateID = "nDaysSinceLastPull"
+
+ // ParameterN is the name of the metadata parameter for the N value
+ ParameterN = TemplateID
+
+ // DefaultN is the default number of days that an artifact must have
+ // been pulled within to retain the tag or artifact.
+ DefaultN = 30
+)
+
+type evaluator struct {
+ n int
+}
+
+func (e *evaluator) Process(artifacts []*res.Candidate) (result []*res.Candidate, err error) {
+ minPullTime := time.Now().UTC().Add(time.Duration(-1*24*e.n) * time.Hour).Unix()
+ for _, a := range artifacts {
+ if a.PulledTime >= minPullTime {
+ result = append(result, a)
+ }
+ }
+
+ return
+}
+
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New constructs a new 'Days Since Last Pull' evaluator
+func New(params rule.Parameters) rule.Evaluator {
+ if params != nil {
+ if p, ok := params[ParameterN]; ok {
+ if v, ok := p.(float64); ok && v >= 0 {
+ return &evaluator{n: int(v)}
+ }
+ }
+ }
+
+ log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID)
+
+ return &evaluator{n: DefaultN}
+}
diff --git a/src/pkg/retention/policy/rule/dayspl/evaluator_test.go b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go
new file mode 100644
index 000000000..a8587ccd8
--- /dev/null
+++ b/src/pkg/retention/policy/rule/dayspl/evaluator_test.go
@@ -0,0 +1,104 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dayspl
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ tests := []struct {
+ Name string
+ args rule.Parameters
+ expectedN int
+ }{
+ {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5},
+ {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN},
+ {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN},
+ {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(tt.Name, func(t *testing.T) {
+ e := New(tt.args).(*evaluator)
+
+ require.Equal(t, tt.expectedN, e.n)
+ })
+ }
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ now := time.Now().UTC()
+ data := []*res.Candidate{
+ {PulledTime: daysAgo(now, 1)},
+ {PulledTime: daysAgo(now, 2)},
+ {PulledTime: daysAgo(now, 3)},
+ {PulledTime: daysAgo(now, 4)},
+ {PulledTime: daysAgo(now, 5)},
+ {PulledTime: daysAgo(now, 10)},
+ {PulledTime: daysAgo(now, 20)},
+ {PulledTime: daysAgo(now, 30)},
+ }
+
+ tests := []struct {
+ n float64
+ expected int
+ minPullTime int64
+ }{
+ {n: 0, expected: 0, minPullTime: 0},
+ {n: 1, expected: 1, minPullTime: daysAgo(now, 1)},
+ {n: 2, expected: 2, minPullTime: daysAgo(now, 2)},
+ {n: 3, expected: 3, minPullTime: daysAgo(now, 3)},
+ {n: 4, expected: 4, minPullTime: daysAgo(now, 4)},
+ {n: 5, expected: 5, minPullTime: daysAgo(now, 5)},
+ {n: 15, expected: 6, minPullTime: daysAgo(now, 10)},
+ {n: 90, expected: 8, minPullTime: daysAgo(now, 30)},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) {
+ sut := New(map[string]rule.Parameter{ParameterN: tt.n})
+
+ result, err := sut.Process(data)
+
+ require.NoError(t, err)
+ require.Len(t, result, tt.expected)
+
+ for _, v := range result {
+ assert.False(t, v.PulledTime < tt.minPullTime)
+ }
+ })
+ }
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
+
+func daysAgo(from time.Time, n int) int64 {
+ return from.Add(time.Duration(-1*24*n) * time.Hour).Unix()
+}
diff --git a/src/pkg/retention/policy/rule/daysps/evaluator.go b/src/pkg/retention/policy/rule/daysps/evaluator.go
new file mode 100644
index 000000000..ee4dd436d
--- /dev/null
+++ b/src/pkg/retention/policy/rule/daysps/evaluator.go
@@ -0,0 +1,70 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package daysps
+
+import (
+ "time"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of the rule
+ TemplateID = "nDaysSinceLastPush"
+
+ // ParameterN is the name of the metadata parameter for the N value
+ ParameterN = TemplateID
+
+ // DefaultN is the default number of days that an artifact must have
+ // been pulled within to retain the tag or artifact.
+ DefaultN = 30
+)
+
+type evaluator struct {
+ n int
+}
+
+func (e *evaluator) Process(artifacts []*res.Candidate) (result []*res.Candidate, err error) {
+ minPushTime := time.Now().UTC().Add(time.Duration(-1*24*e.n) * time.Hour).Unix()
+ for _, a := range artifacts {
+ if a.PushedTime >= minPushTime {
+ result = append(result, a)
+ }
+ }
+
+ return
+}
+
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New constructs a new 'Days Since Last Push' evaluator
+func New(params rule.Parameters) rule.Evaluator {
+ if params != nil {
+ if p, ok := params[ParameterN]; ok {
+ if v, ok := p.(float64); ok && v >= 0 {
+ return &evaluator{n: int(v)}
+ }
+ }
+ }
+
+ log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID)
+
+ return &evaluator{n: DefaultN}
+}
diff --git a/src/pkg/retention/policy/rule/daysps/evaluator_test.go b/src/pkg/retention/policy/rule/daysps/evaluator_test.go
new file mode 100644
index 000000000..75287ce4f
--- /dev/null
+++ b/src/pkg/retention/policy/rule/daysps/evaluator_test.go
@@ -0,0 +1,104 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package daysps
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ tests := []struct {
+ Name string
+ args rule.Parameters
+ expectedN int
+ }{
+ {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedN: 5},
+ {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedN: DefaultN},
+ {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedN: DefaultN},
+ {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedN: DefaultN},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(tt.Name, func(t *testing.T) {
+ e := New(tt.args).(*evaluator)
+
+ require.Equal(t, tt.expectedN, e.n)
+ })
+ }
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ now := time.Now().UTC()
+ data := []*res.Candidate{
+ {PushedTime: daysAgo(now, 1)},
+ {PushedTime: daysAgo(now, 2)},
+ {PushedTime: daysAgo(now, 3)},
+ {PushedTime: daysAgo(now, 4)},
+ {PushedTime: daysAgo(now, 5)},
+ {PushedTime: daysAgo(now, 10)},
+ {PushedTime: daysAgo(now, 20)},
+ {PushedTime: daysAgo(now, 30)},
+ }
+
+ tests := []struct {
+ n float64
+ expected int
+ minPushTime int64
+ }{
+ {n: 0, expected: 0, minPushTime: 0},
+ {n: 1, expected: 1, minPushTime: daysAgo(now, 1)},
+ {n: 2, expected: 2, minPushTime: daysAgo(now, 2)},
+ {n: 3, expected: 3, minPushTime: daysAgo(now, 3)},
+ {n: 4, expected: 4, minPushTime: daysAgo(now, 4)},
+ {n: 5, expected: 5, minPushTime: daysAgo(now, 5)},
+ {n: 15, expected: 6, minPushTime: daysAgo(now, 10)},
+ {n: 90, expected: 8, minPushTime: daysAgo(now, 30)},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) {
+ sut := New(map[string]rule.Parameter{ParameterN: tt.n})
+
+ result, err := sut.Process(data)
+
+ require.NoError(t, err)
+ require.Len(t, result, tt.expected)
+
+ for _, v := range result {
+ assert.False(t, v.PushedTime < tt.minPushTime)
+ }
+ })
+ }
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
+
+func daysAgo(from time.Time, n int) int64 {
+ return from.Add(time.Duration(-1*24*n) * time.Hour).Unix()
+}
diff --git a/src/pkg/retention/policy/rule/evaluator.go b/src/pkg/retention/policy/rule/evaluator.go
new file mode 100644
index 000000000..91ec27913
--- /dev/null
+++ b/src/pkg/retention/policy/rule/evaluator.go
@@ -0,0 +1,36 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rule
+
+import "github.com/goharbor/harbor/src/pkg/retention/res"
+
+// Evaluator defines method of executing rule
+type Evaluator interface {
+ // Filter the inputs and return the filtered outputs
+ //
+ // Arguments:
+ // artifacts []*res.Candidate : candidates for processing
+ //
+ // Returns:
+ // []*res.Candidate : matched candidates for next stage
+ // error : common error object if any errors occurred
+ Process(artifacts []*res.Candidate) ([]*res.Candidate, error)
+
+ // Specify what action is performed to the candidates processed by this evaluator
+ Action() string
+}
+
+// Factory defines a factory method for creating rule evaluator
+type Factory func(parameters Parameters) Evaluator
diff --git a/src/pkg/retention/policy/rule/index/index.go b/src/pkg/retention/policy/rule/index/index.go
new file mode 100644
index 000000000..40a4cccc0
--- /dev/null
+++ b/src/pkg/retention/policy/rule/index/index.go
@@ -0,0 +1,227 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+ "sync"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/always"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/dayspl"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/daysps"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/lastx"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestk"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestpl"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/latestps"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule/nothing"
+ "github.com/pkg/errors"
+)
+
+// index for keeping the mapping between template ID and evaluator
+var index sync.Map
+
+// Metadata defines metadata for rule registration
+type Metadata struct {
+ TemplateID string `json:"rule_template"`
+
+ // Action of the rule performs
+ // "retain"
+ Action string `json:"action"`
+
+ Parameters []*IndexedParam `json:"params"`
+}
+
+// IndexedParam declares the param info
+type IndexedParam struct {
+ Name string `json:"name"`
+
+ // Type of the param
+ // "int", "string" or "[]string"
+ Type string `json:"type"`
+
+ Unit string `json:"unit"`
+
+ Required bool `json:"required"`
+}
+
+// indexedItem is the item saved in the sync map
+type indexedItem struct {
+ Meta *Metadata
+
+ Factory rule.Factory
+}
+
+func init() {
+ // Register latest pushed
+ Register(&Metadata{
+ TemplateID: latestps.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{
+ {
+ Name: latestps.ParameterK,
+ Type: "int",
+ Unit: "count",
+ Required: true,
+ },
+ },
+ }, latestps.New)
+
+ // Register latest pulled
+ Register(&Metadata{
+ TemplateID: latestpl.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{
+ {
+ Name: latestpl.ParameterN,
+ Type: "int",
+ Unit: "count",
+ Required: true,
+ },
+ },
+ }, latestpl.New)
+
+ // Register latest active
+ Register(&Metadata{
+ TemplateID: latestk.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{
+ {
+ Name: latestk.ParameterK,
+ Type: "int",
+ Unit: "count",
+ Required: true,
+ },
+ },
+ }, latestk.New)
+
+ // Register lastx
+ Register(&Metadata{
+ TemplateID: lastx.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{
+ {
+ Name: lastx.ParameterX,
+ Type: "int",
+ Unit: "days",
+ Required: true,
+ },
+ },
+ }, lastx.New)
+
+ // Register nothing
+ Register(&Metadata{
+ TemplateID: nothing.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{},
+ }, nothing.New)
+
+ // Register always
+ Register(&Metadata{
+ TemplateID: always.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{},
+ }, always.New)
+
+ // Register dayspl
+ Register(&Metadata{
+ TemplateID: dayspl.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{
+ {
+ Name: dayspl.ParameterN,
+ Type: "int",
+ Unit: "days",
+ Required: true,
+ },
+ },
+ }, dayspl.New)
+
+ // Register daysps
+ Register(&Metadata{
+ TemplateID: daysps.TemplateID,
+ Action: action.Retain,
+ Parameters: []*IndexedParam{
+ {
+ Name: daysps.ParameterN,
+ Type: "int",
+ Unit: "days",
+ Required: true,
+ },
+ },
+ }, daysps.New)
+}
+
+// Register the rule evaluator with the corresponding rule template
+func Register(meta *Metadata, factory rule.Factory) {
+ if meta == nil || factory == nil || len(meta.TemplateID) == 0 {
+ // do nothing
+ return
+ }
+
+ index.Store(meta.TemplateID, &indexedItem{
+ Meta: meta,
+ Factory: factory,
+ })
+}
+
+// Get rule evaluator with the provided template ID
+func Get(templateID string, parameters rule.Parameters) (rule.Evaluator, error) {
+ if len(templateID) == 0 {
+ return nil, errors.New("empty rule template ID")
+ }
+
+ v, ok := index.Load(templateID)
+ if !ok {
+ return nil, errors.Errorf("rule evaluator %s is not registered", templateID)
+ }
+
+ item := v.(*indexedItem)
+
+ // We can check more things if we want to do in the future
+ if len(item.Meta.Parameters) > 0 {
+ for _, p := range item.Meta.Parameters {
+ if p.Required {
+ exists := parameters != nil
+ if exists {
+ _, exists = parameters[p.Name]
+ }
+
+ if !exists {
+ return nil, errors.Errorf("missing required parameter %s for rule %s", p.Name, templateID)
+ }
+ }
+ }
+ }
+ factory := item.Factory
+
+ return factory(parameters), nil
+}
+
+// Index returns all the metadata of the registered rules
+func Index() []*Metadata {
+ res := make([]*Metadata, 0)
+
+ index.Range(func(k, v interface{}) bool {
+ if item, ok := v.(*indexedItem); ok {
+ res = append(res, item.Meta)
+ return true
+ }
+
+ return false
+ })
+
+ return res
+}
diff --git a/src/pkg/retention/policy/rule/index/index_test.go b/src/pkg/retention/policy/rule/index/index_test.go
new file mode 100644
index 000000000..b55d29f79
--- /dev/null
+++ b/src/pkg/retention/policy/rule/index/index_test.go
@@ -0,0 +1,122 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+
+ "github.com/stretchr/testify/suite"
+)
+
+// IndexTestSuite tests the rule index
+type IndexTestSuite struct {
+ suite.Suite
+}
+
+// TestIndexEntry is entry of IndexTestSuite
+func TestIndexEntry(t *testing.T) {
+ suite.Run(t, new(IndexTestSuite))
+}
+
+// SetupSuite ...
+func (suite *IndexTestSuite) SetupSuite() {
+ Register(&Metadata{
+ TemplateID: "fakeEvaluator",
+ Action: "retain",
+ Parameters: []*IndexedParam{
+ {
+ Name: "fakeParam",
+ Type: "int",
+ Unit: "count",
+ Required: true,
+ },
+ },
+ }, newFakeEvaluator)
+}
+
+// TestRegister tests register
+func (suite *IndexTestSuite) TestGet() {
+
+ params := make(rule.Parameters)
+ params["fakeParam"] = 99
+ evaluator, err := Get("fakeEvaluator", params)
+ require.NoError(suite.T(), err)
+ require.NotNil(suite.T(), evaluator)
+
+ candidates := []*res.Candidate{{
+ Namespace: "library",
+ Repository: "harbor",
+ Kind: "image",
+ Tag: "latest",
+ PushedTime: time.Now().Unix(),
+ Labels: []string{"L1", "L2"},
+ }}
+
+ results, err := evaluator.Process(candidates)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(results))
+ assert.Condition(suite.T(), func() bool {
+ c := results[0]
+ return c.Repository == "harbor" && c.Tag == "latest"
+ })
+}
+
+// TestIndex tests Index
+func (suite *IndexTestSuite) TestIndex() {
+ metas := Index()
+ require.Equal(suite.T(), 9, len(metas))
+ assert.Condition(suite.T(), func() bool {
+ for _, m := range metas {
+ if m.TemplateID == "fakeEvaluator" &&
+ m.Action == "retain" &&
+ len(m.Parameters) > 0 {
+ return true
+ }
+ }
+ return false
+ }, "check fake evaluator in index")
+}
+
+type fakeEvaluator struct {
+ i int
+}
+
+// Process rule
+func (e *fakeEvaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) {
+ return artifacts, nil
+}
+
+// Action of the rule
+func (e *fakeEvaluator) Action() string {
+ return "retain"
+}
+
+// newFakeEvaluator is the factory of fakeEvaluator
+func newFakeEvaluator(parameters rule.Parameters) rule.Evaluator {
+ i := 10
+ if v, ok := parameters["fakeParam"]; ok {
+ i = v.(int)
+ }
+
+ return &fakeEvaluator{i}
+}
diff --git a/src/pkg/retention/policy/rule/lastx/evaluator.go b/src/pkg/retention/policy/rule/lastx/evaluator.go
new file mode 100644
index 000000000..b466f5eda
--- /dev/null
+++ b/src/pkg/retention/policy/rule/lastx/evaluator.go
@@ -0,0 +1,75 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lastx
+
+import (
+ "time"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of last x days rule
+ TemplateID = "lastXDays"
+ // ParameterX ...
+ ParameterX = TemplateID
+ // DefaultX defines the default X
+ DefaultX = 10
+)
+
+// evaluator for evaluating last x days
+type evaluator struct {
+ // last x days
+ x int
+}
+
+// Process the candidates based on the rule definition
+func (e *evaluator) Process(artifacts []*res.Candidate) (retain []*res.Candidate, err error) {
+ cutoff := time.Now().Add(time.Duration(e.x*-24) * time.Hour)
+ for _, a := range artifacts {
+ if time.Unix(a.PushedTime, 0).UTC().After(cutoff) {
+ retain = append(retain, a)
+ }
+ }
+
+ return
+}
+
+// Specify what action is performed to the candidates processed by this evaluator
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New a Evaluator
+func New(params rule.Parameters) rule.Evaluator {
+ if params != nil {
+ if param, ok := params[ParameterX]; ok {
+ if v, ok := param.(float64); ok && v >= 0 {
+ return &evaluator{
+ x: int(v),
+ }
+ }
+ }
+ }
+
+ log.Warningf("default parameter %d used for rule %s", DefaultX, TemplateID)
+
+ return &evaluator{
+ x: DefaultX,
+ }
+}
diff --git a/src/pkg/retention/policy/rule/lastx/evaluator_test.go b/src/pkg/retention/policy/rule/lastx/evaluator_test.go
new file mode 100644
index 000000000..becd79234
--- /dev/null
+++ b/src/pkg/retention/policy/rule/lastx/evaluator_test.go
@@ -0,0 +1,78 @@
+package lastx
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ tests := []struct {
+ Name string
+ args rule.Parameters
+ expectedX int
+ }{
+ {Name: "Valid", args: map[string]rule.Parameter{ParameterX: float64(3)}, expectedX: 3},
+ {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterX: float64(-3)}, expectedX: DefaultX},
+ {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedX: DefaultX},
+ {Name: "Default If Wrong Type", args: map[string]rule.Parameter{}, expectedX: DefaultX},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(tt.Name, func(t *testing.T) {
+ e := New(tt.args).(*evaluator)
+
+ require.Equal(t, tt.expectedX, e.x)
+ })
+ }
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ now := time.Now().UTC()
+ data := []*res.Candidate{
+ {PushedTime: now.Add(time.Duration(1*-24) * time.Hour).Unix()},
+ {PushedTime: now.Add(time.Duration(2*-24) * time.Hour).Unix()},
+ {PushedTime: now.Add(time.Duration(3*-24) * time.Hour).Unix()},
+ {PushedTime: now.Add(time.Duration(4*-24) * time.Hour).Unix()},
+ {PushedTime: now.Add(time.Duration(5*-24) * time.Hour).Unix()},
+ {PushedTime: now.Add(time.Duration(99*-24) * time.Hour).Unix()},
+ }
+
+ tests := []struct {
+ days float64
+ expected int
+ }{
+ {days: 0, expected: 0},
+ {days: 1, expected: 0},
+ {days: 2, expected: 1},
+ {days: 3, expected: 2},
+ {days: 4, expected: 3},
+ {days: 5, expected: 4},
+ {days: 6, expected: 5},
+ {days: 7, expected: 5},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(fmt.Sprintf("%v days - should keep %d", tt.days, tt.expected), func(t *testing.T) {
+ e := New(rule.Parameters{ParameterX: tt.days})
+
+ result, err := e.Process(data)
+
+ require.NoError(t, err)
+ require.Len(t, result, tt.expected)
+ })
+ }
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
diff --git a/src/pkg/retention/policy/rule/latestk/evaluator.go b/src/pkg/retention/policy/rule/latestk/evaluator.go
new file mode 100644
index 000000000..f6d73599a
--- /dev/null
+++ b/src/pkg/retention/policy/rule/latestk/evaluator.go
@@ -0,0 +1,89 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package latestk
+
+import (
+ "sort"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of latest active k rule
+ TemplateID = "latestActiveK"
+ // ParameterK ...
+ ParameterK = TemplateID
+ // DefaultK defines the default K
+ DefaultK = 10
+)
+
+// evaluator for evaluating latest active k images
+type evaluator struct {
+ // latest k
+ k int
+}
+
+// Process the candidates based on the rule definition
+func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) {
+ // Sort artifacts by their "active time"
+ //
+ // Active time is defined as the selection of c.PulledTime or c.PushedTime,
+ // whichever is bigger, aka more recent.
+ sort.Slice(artifacts, func(i, j int) bool {
+ return activeTime(artifacts[i]) > activeTime(artifacts[j])
+ })
+
+ i := e.k
+ if i > len(artifacts) {
+ i = len(artifacts)
+ }
+
+ return artifacts[:i], nil
+}
+
+// Specify what action is performed to the candidates processed by this evaluator
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New a Evaluator
+func New(params rule.Parameters) rule.Evaluator {
+ if params != nil {
+ if param, ok := params[ParameterK]; ok {
+ if v, ok := param.(float64); ok && v >= 0 {
+ return &evaluator{
+ k: int(v),
+ }
+ }
+ }
+ }
+
+ log.Debugf("default parameter %d used for rule %s", DefaultK, TemplateID)
+
+ return &evaluator{
+ k: DefaultK,
+ }
+}
+
+func activeTime(c *res.Candidate) int64 {
+ if c.PulledTime > c.PushedTime {
+ return c.PulledTime
+ }
+
+ return c.PushedTime
+}
diff --git a/src/pkg/retention/policy/rule/latestk/evaluator_test.go b/src/pkg/retention/policy/rule/latestk/evaluator_test.go
new file mode 100644
index 000000000..24b04fb9e
--- /dev/null
+++ b/src/pkg/retention/policy/rule/latestk/evaluator_test.go
@@ -0,0 +1,99 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package latestk
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+
+ artifacts []*res.Candidate
+}
+
+func (e *EvaluatorTestSuite) SetupSuite() {
+ e.artifacts = []*res.Candidate{
+ {PulledTime: 1, PushedTime: 2},
+ {PulledTime: 3, PushedTime: 4},
+ {PulledTime: 6, PushedTime: 5},
+ {PulledTime: 8, PushedTime: 7},
+ {PulledTime: 9, PushedTime: 9},
+ {PulledTime: 10, PushedTime: 10},
+ {PulledTime: 0, PushedTime: 11},
+ }
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ tests := []struct {
+ k int
+ expected int
+ minActiveTime int64
+ }{
+ {k: 0, expected: 0},
+ {k: 1, expected: 1, minActiveTime: 11},
+ {k: 2, expected: 2, minActiveTime: 10},
+ {k: 5, expected: 5, minActiveTime: 6},
+ {k: 6, expected: 6, minActiveTime: 3},
+ {k: 99, expected: len(e.artifacts)},
+ }
+ for _, tt := range tests {
+ e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) {
+ sut := &evaluator{k: tt.k}
+
+ result, err := sut.Process(e.artifacts)
+
+ require.NoError(t, err)
+ require.Len(t, result, tt.expected)
+
+ for _, v := range result {
+ assert.True(t, activeTime(v) >= tt.minActiveTime)
+ }
+ })
+ }
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ tests := []struct {
+ name string
+ params rule.Parameters
+ expectedK int
+ }{
+ {name: "Valid", params: rule.Parameters{ParameterK: float64(5)}, expectedK: 5},
+ {name: "Default If Negative", params: rule.Parameters{ParameterK: float64(-5)}, expectedK: DefaultK},
+ {name: "Default If Wrong Type", params: rule.Parameters{ParameterK: "5"}, expectedK: DefaultK},
+ {name: "Default If Wrong Key", params: rule.Parameters{"n": 5}, expectedK: DefaultK},
+ {name: "Default If Empty", params: rule.Parameters{}, expectedK: DefaultK},
+ }
+ for _, tt := range tests {
+ e.T().Run(tt.name, func(t *testing.T) {
+ sut := New(tt.params).(*evaluator)
+
+ require.Equal(t, tt.expectedK, sut.k)
+ })
+ }
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator.go b/src/pkg/retention/policy/rule/latestpl/evaluator.go
new file mode 100644
index 000000000..bed7b6e4e
--- /dev/null
+++ b/src/pkg/retention/policy/rule/latestpl/evaluator.go
@@ -0,0 +1,71 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package latestpl
+
+import (
+ "sort"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of the rule
+ TemplateID = "latestPulledN"
+
+ // ParameterN is the name of the metadata parameter for the N value
+ ParameterN = TemplateID
+
+ // DefaultN is the default number of tags to retain
+ DefaultN = 10
+)
+
+type evaluator struct {
+ n int
+}
+
+func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) {
+ sort.Slice(artifacts, func(i, j int) bool {
+ return artifacts[i].PulledTime > artifacts[j].PulledTime
+ })
+
+ i := e.n
+ if i > len(artifacts) {
+ i = len(artifacts)
+ }
+
+ return artifacts[:i], nil
+}
+
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New constructs an evaluator with the given parameters
+func New(params rule.Parameters) rule.Evaluator {
+ if params != nil {
+ if p, ok := params[ParameterN]; ok {
+ if v, ok := p.(float64); ok && v >= 0 {
+ return &evaluator{n: int(v)}
+ }
+ }
+ }
+
+ log.Warningf("default parameter %d used for rule %s", DefaultN, TemplateID)
+
+ return &evaluator{n: DefaultN}
+}
diff --git a/src/pkg/retention/policy/rule/latestpl/evaluator_test.go b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go
new file mode 100644
index 000000000..69b0605f5
--- /dev/null
+++ b/src/pkg/retention/policy/rule/latestpl/evaluator_test.go
@@ -0,0 +1,89 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package latestpl
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ tests := []struct {
+ Name string
+ args rule.Parameters
+ expectedK int
+ }{
+ {Name: "Valid", args: map[string]rule.Parameter{ParameterN: float64(5)}, expectedK: 5},
+ {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterN: float64(-1)}, expectedK: DefaultN},
+ {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultN},
+ {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterN: "foo"}, expectedK: DefaultN},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(tt.Name, func(t *testing.T) {
+ e := New(tt.args).(*evaluator)
+
+ require.Equal(t, tt.expectedK, e.n)
+ })
+ }
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ data := []*res.Candidate{{PulledTime: 0}, {PulledTime: 1}, {PulledTime: 2}, {PulledTime: 3}, {PulledTime: 4}}
+ rand.Shuffle(len(data), func(i, j int) {
+ data[i], data[j] = data[j], data[i]
+ })
+
+ tests := []struct {
+ n float64
+ expected int
+ minPullTime int64
+ }{
+ {n: 0, expected: 0, minPullTime: 0},
+ {n: 1, expected: 1, minPullTime: 4},
+ {n: 3, expected: 3, minPullTime: 2},
+ {n: 5, expected: 5, minPullTime: 0},
+ {n: 6, expected: 5, minPullTime: 0},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(fmt.Sprintf("%v", tt.n), func(t *testing.T) {
+ ev := New(map[string]rule.Parameter{ParameterN: tt.n})
+
+ result, err := ev.Process(data)
+
+ require.NoError(t, err)
+ require.Len(t, result, tt.expected)
+
+ for _, v := range result {
+ require.False(e.T(), v.PulledTime < tt.minPullTime)
+ }
+ })
+ }
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
diff --git a/src/pkg/retention/policy/rule/latestps/evaluator.go b/src/pkg/retention/policy/rule/latestps/evaluator.go
new file mode 100644
index 000000000..ac000a302
--- /dev/null
+++ b/src/pkg/retention/policy/rule/latestps/evaluator.go
@@ -0,0 +1,78 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package latestps
+
+import (
+ "sort"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of latest k rule
+ TemplateID = "latestPushedK"
+ // ParameterK ...
+ ParameterK = TemplateID
+ // DefaultK defines the default K
+ DefaultK = 10
+)
+
+// evaluator for evaluating latest k tags
+type evaluator struct {
+ // latest k
+ k int
+}
+
+// Process the candidates based on the rule definition
+func (e *evaluator) Process(artifacts []*res.Candidate) ([]*res.Candidate, error) {
+ // The updated proposal does not guarantee the order artifacts are provided, so we have to sort them first
+ sort.Slice(artifacts, func(i, j int) bool {
+ return artifacts[i].PushedTime < artifacts[j].PushedTime
+ })
+
+ i := e.k
+ if i > len(artifacts) {
+ i = len(artifacts)
+ }
+
+ return artifacts[:i], nil
+}
+
+// Specify what action is performed to the candidates processed by this evaluator
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New a Evaluator
+func New(params rule.Parameters) rule.Evaluator {
+ if params != nil {
+ if param, ok := params[ParameterK]; ok {
+ if v, ok := param.(float64); ok && v >= 0 {
+ return &evaluator{
+ k: int(v),
+ }
+ }
+ }
+ }
+
+ log.Warningf("default parameter %d used for rule %s", DefaultK, TemplateID)
+
+ return &evaluator{
+ k: DefaultK,
+ }
+}
diff --git a/src/pkg/retention/policy/rule/latestps/evaluator_test.go b/src/pkg/retention/policy/rule/latestps/evaluator_test.go
new file mode 100644
index 000000000..6e303c3c4
--- /dev/null
+++ b/src/pkg/retention/policy/rule/latestps/evaluator_test.go
@@ -0,0 +1,71 @@
+package latestps
+
+import (
+ "fmt"
+ "math/rand"
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/require"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ tests := []struct {
+ Name string
+ args rule.Parameters
+ expectedK int
+ }{
+ {Name: "Valid", args: map[string]rule.Parameter{ParameterK: float64(5)}, expectedK: 5},
+ {Name: "Default If Negative", args: map[string]rule.Parameter{ParameterK: float64(-1)}, expectedK: DefaultK},
+ {Name: "Default If Not Set", args: map[string]rule.Parameter{}, expectedK: DefaultK},
+ {Name: "Default If Wrong Type", args: map[string]rule.Parameter{ParameterK: "foo"}, expectedK: DefaultK},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(tt.Name, func(t *testing.T) {
+ e := New(tt.args).(*evaluator)
+
+ require.Equal(t, tt.expectedK, e.k)
+ })
+ }
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ data := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}, {PushedTime: 4}}
+ rand.Shuffle(len(data), func(i, j int) {
+ data[i], data[j] = data[j], data[i]
+ })
+
+ tests := []struct {
+ k float64
+ expected int
+ }{
+ {k: 0, expected: 0},
+ {k: 1, expected: 1},
+ {k: 3, expected: 3},
+ {k: 5, expected: 5},
+ {k: 6, expected: 5},
+ }
+
+ for _, tt := range tests {
+ e.T().Run(fmt.Sprintf("%v", tt.k), func(t *testing.T) {
+ e := New(map[string]rule.Parameter{ParameterK: tt.k})
+
+ result, err := e.Process(data)
+
+ require.NoError(t, err)
+ require.Len(t, result, tt.expected)
+ })
+ }
+}
+
+func TestEvaluator(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
diff --git a/src/pkg/retention/policy/rule/models.go b/src/pkg/retention/policy/rule/models.go
new file mode 100644
index 000000000..448b10183
--- /dev/null
+++ b/src/pkg/retention/policy/rule/models.go
@@ -0,0 +1,64 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rule
+
+// Metadata of the retention rule
+type Metadata struct {
+ // UUID of rule
+ ID int `json:"id"`
+
+ // Priority of rule when doing calculating
+ Priority int `json:"priority" valid:"Required"`
+
+ // Disabled rule
+ Disabled bool `json:"disabled"`
+
+ // Action of the rule performs
+ // "retain"
+ Action string `json:"action" valid:"Required"`
+
+ // Template ID
+ Template string `json:"template" valid:"Required"`
+
+ // The parameters of this rule
+ Parameters Parameters `json:"params"`
+
+ // Selector attached to the rule for filtering tags
+ TagSelectors []*Selector `json:"tag_selectors" valid:"Required"`
+
+ // Selector attached to the rule for filtering scope (e.g: repositories or namespaces)
+ ScopeSelectors map[string][]*Selector `json:"scope_selectors" valid:"Required"`
+}
+
+// Selector to narrow down the list
+type Selector struct {
+ // Kind of the selector
+ // "regularExpression" or "label"
+ Kind string `json:"kind" valid:"Required"`
+
+ // Decorated the selector
+ // for "regularExpression" : "matches" and "excludes"
+ // for "label" : "with" and "without"
+ Decoration string `json:"decoration" valid:"Required"`
+
+ // Param for the selector
+ Pattern string `json:"pattern" valid:"Required"`
+}
+
+// Parameters of rule, indexed by the key
+type Parameters map[string]Parameter
+
+// Parameter of rule
+type Parameter interface{}
diff --git a/src/pkg/retention/policy/rule/nothing/evaluator.go b/src/pkg/retention/policy/rule/nothing/evaluator.go
new file mode 100644
index 000000000..8bc4b9063
--- /dev/null
+++ b/src/pkg/retention/policy/rule/nothing/evaluator.go
@@ -0,0 +1,42 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nothing
+
+import (
+ "github.com/goharbor/harbor/src/pkg/retention/policy/action"
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // TemplateID of the always retain rule
+ TemplateID = "nothing"
+)
+
+type evaluator struct{}
+
+// Process for the "nothing" Evaluator simply returns the input with no error
+func (e *evaluator) Process(artifacts []*res.Candidate) (processed []*res.Candidate, err error) {
+ return processed, err
+}
+
+func (e *evaluator) Action() string {
+ return action.Retain
+}
+
+// New returns an "always" Evaluator. It requires no parameters.
+func New(_ rule.Parameters) rule.Evaluator {
+ return &evaluator{}
+}
diff --git a/src/pkg/retention/policy/rule/nothing/evaluator_test.go b/src/pkg/retention/policy/rule/nothing/evaluator_test.go
new file mode 100644
index 000000000..1432db651
--- /dev/null
+++ b/src/pkg/retention/policy/rule/nothing/evaluator_test.go
@@ -0,0 +1,49 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nothing
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/retention/policy/rule"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type EvaluatorTestSuite struct {
+ suite.Suite
+}
+
+func (e *EvaluatorTestSuite) TestNew() {
+ sut := New(rule.Parameters{})
+
+ require.NotNil(e.T(), sut)
+ require.IsType(e.T(), &evaluator{}, sut)
+}
+
+func (e *EvaluatorTestSuite) TestProcess() {
+ sut := New(rule.Parameters{})
+ input := []*res.Candidate{{PushedTime: 0}, {PushedTime: 1}, {PushedTime: 2}, {PushedTime: 3}}
+
+ result, err := sut.Process(input)
+
+ require.NoError(e.T(), err)
+ require.Len(e.T(), result, 0)
+}
+
+func TestEvaluatorSuite(t *testing.T) {
+ suite.Run(t, &EvaluatorTestSuite{})
+}
diff --git a/src/pkg/retention/q/query.go b/src/pkg/retention/q/query.go
new file mode 100644
index 000000000..bb2ba75d5
--- /dev/null
+++ b/src/pkg/retention/q/query.go
@@ -0,0 +1,29 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package q
+
+// Query parameters
+type Query struct {
+ PageNumber int64
+ PageSize int64
+}
+
+// TaskQuery parameters
+type TaskQuery struct {
+ ExecutionID int64
+ Status string
+ PageNumber int64
+ PageSize int64
+}
diff --git a/src/pkg/retention/res/candidate.go b/src/pkg/retention/res/candidate.go
new file mode 100644
index 000000000..ff8c8d145
--- /dev/null
+++ b/src/pkg/retention/res/candidate.go
@@ -0,0 +1,90 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package res
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ // Image kind
+ Image = "image"
+ // Chart kind
+ Chart = "chart"
+)
+
+// Repository of candidate
+type Repository struct {
+ // Namespace
+ Namespace string `json:"namespace"`
+ // Repository name
+ Name string `json:"name"`
+ // So far we need the kind of repository and retrieve candidates with different APIs
+ // TODO: REMOVE IT IN THE FUTURE IF WE SUPPORT UNIFIED ARTIFACT MODEL
+ Kind string `json:"kind"`
+}
+
+// ToJSON marshals repository to JSON string
+func (r *Repository) ToJSON() (string, error) {
+ jsonData, err := json.Marshal(r)
+ if err != nil {
+ return "", errors.Wrap(err, "marshal reporitory")
+ }
+
+ return string(jsonData), nil
+}
+
+// FromJSON constructs the repository from json data
+func (r *Repository) FromJSON(jsonData string) error {
+ if len(jsonData) == 0 {
+ return errors.New("empty json data to construct repository")
+ }
+
+ return json.Unmarshal([]byte(jsonData), r)
+}
+
+// Candidate for retention processor to match
+type Candidate struct {
+ // Namespace(project) ID
+ NamespaceID int64
+ // Namespace
+ Namespace string
+ // Repository name
+ Repository string
+ // Kind of the candidate
+ // "image" or "chart"
+ Kind string
+ // Tag info
+ Tag string
+ // Pushed time in seconds
+ PushedTime int64
+ // Pulled time in seconds
+ PulledTime int64
+ // Created time in seconds
+ CreationTime int64
+ // Labels attached with the candidate
+ Labels []string
+}
+
+// Hash code based on the candidate info for differentiation
+func (c *Candidate) Hash() string {
+ raw := fmt.Sprintf("%s:%s/%s:%s", c.Kind, c.Namespace, c.Repository, c.Tag)
+
+ return base64.StdEncoding.EncodeToString([]byte(raw))
+}
diff --git a/src/pkg/retention/res/result.go b/src/pkg/retention/res/result.go
new file mode 100644
index 000000000..be91be04a
--- /dev/null
+++ b/src/pkg/retention/res/result.go
@@ -0,0 +1,22 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package res
+
+// Result keeps the action result
+type Result struct {
+ Target *Candidate `json:"target"`
+ // nil error means success
+ Error error `json:"error"`
+}
diff --git a/src/pkg/retention/res/selector.go b/src/pkg/retention/res/selector.go
new file mode 100644
index 000000000..de0d34836
--- /dev/null
+++ b/src/pkg/retention/res/selector.go
@@ -0,0 +1,30 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package res
+
+// Selector is used to filter the inputting list
+type Selector interface {
+ // Select the matched ones
+ //
+ // Arguments:
+ // artifacts []*Candidate : candidates for matching
+ //
+ // Returns:
+ // []*Candidate : matched candidates
+ Select(artifacts []*Candidate) ([]*Candidate, error)
+}
+
+// SelectorFactory is factory method to return a selector implementation
+type SelectorFactory func(decoration string, pattern string) Selector
diff --git a/src/pkg/retention/res/selectors/doublestar/selector.go b/src/pkg/retention/res/selectors/doublestar/selector.go
new file mode 100644
index 000000000..fcbb628b9
--- /dev/null
+++ b/src/pkg/retention/res/selectors/doublestar/selector.go
@@ -0,0 +1,102 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doublestar
+
+import (
+ "github.com/bmatcuk/doublestar"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // Kind ...
+ Kind = "doublestar"
+ // Matches [pattern] for tag (default)
+ Matches = "matches"
+ // Excludes [pattern] for tag (default)
+ Excludes = "excludes"
+ // RepoMatches represents repository matches [pattern]
+ RepoMatches = "repoMatches"
+ // RepoExcludes represents repository excludes [pattern]
+ RepoExcludes = "repoExcludes"
+ // NSMatches represents namespace matches [pattern]
+ NSMatches = "nsMatches"
+ // NSExcludes represents namespace excludes [pattern]
+ NSExcludes = "nsExcludes"
+)
+
+// selector for regular expression
+type selector struct {
+ // Pre defined pattern declarator
+ // "matches", "excludes", "repoMatches" or "repoExcludes"
+ decoration string
+ // The pattern expression
+ pattern string
+}
+
+// Select candidates by regular expressions
+func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate, err error) {
+ value := ""
+ excludes := false
+
+ for _, art := range artifacts {
+ switch s.decoration {
+ case Matches:
+ value = art.Tag
+ case Excludes:
+ value = art.Tag
+ excludes = true
+ case RepoMatches:
+ value = art.Repository
+ case RepoExcludes:
+ value = art.Repository
+ excludes = true
+ case NSMatches:
+ value = art.Namespace
+ case NSExcludes:
+ value = art.Namespace
+ excludes = true
+ }
+
+ if len(value) > 0 {
+ matched, err := match(s.pattern, value)
+ if err != nil {
+ // if error occurred, directly throw it out
+ return nil, err
+ }
+
+ if (matched && !excludes) || (!matched && excludes) {
+ selected = append(selected, art)
+ }
+ }
+ }
+
+ return selected, nil
+}
+
+// New is factory method for doublestar selector
+func New(decoration string, pattern string) res.Selector {
+ return &selector{
+ decoration: decoration,
+ pattern: pattern,
+ }
+}
+
+// match returns whether the str matches the pattern
+func match(pattern, str string) (bool, error) {
+ if len(pattern) == 0 {
+ return true, nil
+ }
+ return doublestar.Match(pattern, str)
+}
diff --git a/src/pkg/retention/res/selectors/doublestar/selector_test.go b/src/pkg/retention/res/selectors/doublestar/selector_test.go
new file mode 100644
index 000000000..23c8dd377
--- /dev/null
+++ b/src/pkg/retention/res/selectors/doublestar/selector_test.go
@@ -0,0 +1,252 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package doublestar
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "testing"
+ "time"
+)
+
+// RegExpSelectorTestSuite is a suite for testing the label selector
+type RegExpSelectorTestSuite struct {
+ suite.Suite
+
+ artifacts []*res.Candidate
+}
+
+// TestRegExpSelector is entrance for RegExpSelectorTestSuite
+func TestRegExpSelector(t *testing.T) {
+ suite.Run(t, new(RegExpSelectorTestSuite))
+}
+
+// SetupSuite to do preparation work
+func (suite *RegExpSelectorTestSuite) SetupSuite() {
+ suite.artifacts = []*res.Candidate{
+ {
+ NamespaceID: 1,
+ Namespace: "library",
+ Repository: "harbor",
+ Tag: "latest",
+ Kind: res.Image,
+ PushedTime: time.Now().Unix() - 3600,
+ PulledTime: time.Now().Unix(),
+ CreationTime: time.Now().Unix() - 7200,
+ Labels: []string{"label1", "label2", "label3"},
+ },
+ {
+ NamespaceID: 2,
+ Namespace: "retention",
+ Repository: "redis",
+ Tag: "4.0",
+ Kind: res.Image,
+ PushedTime: time.Now().Unix() - 3600,
+ PulledTime: time.Now().Unix(),
+ CreationTime: time.Now().Unix() - 7200,
+ Labels: []string{"label1", "label4", "label5"},
+ },
+ {
+ NamespaceID: 2,
+ Namespace: "retention",
+ Repository: "redis",
+ Tag: "4.1",
+ Kind: res.Image,
+ PushedTime: time.Now().Unix() - 3600,
+ PulledTime: time.Now().Unix(),
+ CreationTime: time.Now().Unix() - 7200,
+ Labels: []string{"label1", "label4", "label5"},
+ },
+ }
+}
+
+// TestTagMatches tests the tag `matches` case
+func (suite *RegExpSelectorTestSuite) TestTagMatches() {
+ tagMatches := &selector{
+ decoration: Matches,
+ pattern: "{latest,4.*}",
+ }
+
+ selected, err := tagMatches.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 3, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:latest", "redis:4.0", "redis:4.1"}, selected)
+ })
+
+ tagMatches2 := &selector{
+ decoration: Matches,
+ pattern: "4.*",
+ }
+
+ selected, err = tagMatches2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"redis:4.0", "redis:4.1"}, selected)
+ })
+}
+
+// TestTagExcludes tests the tag `excludes` case
+func (suite *RegExpSelectorTestSuite) TestTagExcludes() {
+ tagExcludes := &selector{
+ decoration: Excludes,
+ pattern: "{latest,4.*}",
+ }
+
+ selected, err := tagExcludes.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 0, len(selected))
+
+ tagExcludes2 := &selector{
+ decoration: Excludes,
+ pattern: "4.*",
+ }
+
+ selected, err = tagExcludes2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:latest"}, selected)
+ })
+}
+
+// TestRepoMatches tests the repository `matches` case
+func (suite *RegExpSelectorTestSuite) TestRepoMatches() {
+ repoMatches := &selector{
+ decoration: RepoMatches,
+ pattern: "{redis}",
+ }
+
+ selected, err := repoMatches.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"redis:4.0", "redis:4.1"}, selected)
+ })
+
+ repoMatches2 := &selector{
+ decoration: RepoMatches,
+ pattern: "har*",
+ }
+
+ selected, err = repoMatches2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:latest"}, selected)
+ })
+}
+
+// TestRepoExcludes tests the repository `excludes` case
+func (suite *RegExpSelectorTestSuite) TestRepoExcludes() {
+ repoExcludes := &selector{
+ decoration: RepoExcludes,
+ pattern: "{redis}",
+ }
+
+ selected, err := repoExcludes.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:latest"}, selected)
+ })
+
+ repoExcludes2 := &selector{
+ decoration: RepoExcludes,
+ pattern: "har*",
+ }
+
+ selected, err = repoExcludes2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"redis:4.0", "redis:4.1"}, selected)
+ })
+}
+
+// TestNSMatches tests the namespace `matches` case
+func (suite *RegExpSelectorTestSuite) TestNSMatches() {
+ repoMatches := &selector{
+ decoration: NSMatches,
+ pattern: "{library}",
+ }
+
+ selected, err := repoMatches.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:latest"}, selected)
+ })
+
+ repoMatches2 := &selector{
+ decoration: RepoMatches,
+ pattern: "re*",
+ }
+
+ selected, err = repoMatches2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"redis:4.0", "redis:4.1"}, selected)
+ })
+}
+
+// TestNSExcludes tests the namespace `excludes` case
+func (suite *RegExpSelectorTestSuite) TestNSExcludes() {
+ repoExcludes := &selector{
+ decoration: NSExcludes,
+ pattern: "{library}",
+ }
+
+ selected, err := repoExcludes.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"redis:4.0", "redis:4.1"}, selected)
+ })
+
+ repoExcludes2 := &selector{
+ decoration: NSExcludes,
+ pattern: "re*",
+ }
+
+ selected, err = repoExcludes2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:latest"}, selected)
+ })
+}
+
+// Check whether the returned result matched the expected ones (only check repo:tag)
+func expect(expected []string, candidates []*res.Candidate) bool {
+ hash := make(map[string]bool)
+
+ for _, art := range candidates {
+ hash[fmt.Sprintf("%s:%s", art.Repository, art.Tag)] = true
+ }
+
+ for _, exp := range expected {
+ if _, ok := hash[exp]; !ok {
+ return ok
+ }
+ }
+
+ return true
+}
diff --git a/src/pkg/retention/res/selectors/index/index.go b/src/pkg/retention/res/selectors/index/index.go
new file mode 100644
index 000000000..fe00c4f4b
--- /dev/null
+++ b/src/pkg/retention/res/selectors/index/index.go
@@ -0,0 +1,109 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package index
+
+import (
+ "sync"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/label"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/goharbor/harbor/src/pkg/retention/res/selectors/doublestar"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ // Register doublestar selector
+ Register(doublestar.Kind, []string{
+ doublestar.Matches,
+ doublestar.Excludes,
+ doublestar.RepoMatches,
+ doublestar.RepoExcludes,
+ doublestar.NSMatches,
+ doublestar.NSExcludes,
+ }, doublestar.New)
+
+ // Register label selector
+ Register(label.Kind, []string{label.With, label.Without}, label.New)
+}
+
+// index for keeping the mapping between selector meta and its implementation
+var index sync.Map
+
+// IndexedMeta describes the indexed selector
+type IndexedMeta struct {
+ Kind string `json:"kind"`
+ Decorations []string `json:"decorations"`
+}
+
+// indexedItem defined item kept in the index
+type indexedItem struct {
+ Meta *IndexedMeta
+ Factory res.SelectorFactory
+}
+
+// Register the selector with the corresponding selector kind and decoration
+func Register(kind string, decorations []string, factory res.SelectorFactory) {
+ if len(kind) == 0 || factory == nil {
+ // do nothing
+ return
+ }
+
+ index.Store(kind, &indexedItem{
+ Meta: &IndexedMeta{
+ Kind: kind,
+ Decorations: decorations,
+ },
+ Factory: factory,
+ })
+}
+
+// Get selector with the provided kind and decoration
+func Get(kind, decoration, pattern string) (res.Selector, error) {
+ if len(kind) == 0 || len(decoration) == 0 {
+ return nil, errors.New("empty selector kind or decoration")
+ }
+
+ v, ok := index.Load(kind)
+ if !ok {
+ return nil, errors.Errorf("selector %s is not registered", kind)
+ }
+
+ item := v.(*indexedItem)
+ for _, dec := range item.Meta.Decorations {
+ if dec == decoration {
+ factory := item.Factory
+ return factory(decoration, pattern), nil
+ }
+ }
+
+ return nil, errors.Errorf("decoration %s of selector %s is not supported", decoration, kind)
+}
+
+// Index returns all the declarative selectors
+func Index() []*IndexedMeta {
+ all := make([]*IndexedMeta, 0)
+
+ index.Range(func(k, v interface{}) bool {
+ if item, ok := v.(*indexedItem); ok {
+ all = append(all, item.Meta)
+ return true
+ }
+
+ return false
+ })
+
+ return all
+}
diff --git a/src/pkg/retention/res/selectors/label/selector.go b/src/pkg/retention/res/selectors/label/selector.go
new file mode 100644
index 000000000..2fa788a5a
--- /dev/null
+++ b/src/pkg/retention/res/selectors/label/selector.go
@@ -0,0 +1,86 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+import (
+ "strings"
+
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+)
+
+const (
+ // Kind ...
+ Kind = "label"
+ // With labels
+ With = "withLabels"
+ // Without labels
+ Without = "withoutLabels"
+)
+
+// selector is for label selector
+type selector struct {
+ // Pre defined pattern decorations
+ // "with" or "without"
+ decoration string
+ // Label list
+ labels []string
+}
+
+// Select candidates by the labels
+func (s *selector) Select(artifacts []*res.Candidate) (selected []*res.Candidate, err error) {
+ for _, art := range artifacts {
+ if isMatched(s.labels, art.Labels, s.decoration) {
+ selected = append(selected, art)
+ }
+ }
+
+ return selected, nil
+}
+
+// New is factory method for list selector
+func New(decoration string, pattern string) res.Selector {
+ labels := make([]string, 0)
+ if len(pattern) > 0 {
+ labels = append(labels, strings.Split(pattern, ",")...)
+ }
+
+ return &selector{
+ decoration: decoration,
+ labels: labels,
+ }
+}
+
+// Check if the resource labels match the pattern labels
+func isMatched(patternLbls []string, resLbls []string, decoration string) bool {
+ hash := make(map[string]bool)
+
+ for _, lbl := range resLbls {
+ hash[lbl] = true
+ }
+
+ for _, lbl := range patternLbls {
+ _, exists := hash[lbl]
+
+ if decoration == Without && exists {
+ return false
+ }
+
+ if decoration == With && !exists {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/pkg/retention/res/selectors/label/selector_test.go b/src/pkg/retention/res/selectors/label/selector_test.go
new file mode 100644
index 000000000..6bf58118a
--- /dev/null
+++ b/src/pkg/retention/res/selectors/label/selector_test.go
@@ -0,0 +1,148 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package label
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/pkg/retention/res"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+ "testing"
+ "time"
+)
+
+// LabelSelectorTestSuite is a suite for testing the label selector
+type LabelSelectorTestSuite struct {
+ suite.Suite
+
+ artifacts []*res.Candidate
+}
+
+// TestLabelSelector is entrance for LabelSelectorTestSuite
+func TestLabelSelector(t *testing.T) {
+ suite.Run(t, new(LabelSelectorTestSuite))
+}
+
+// SetupSuite to do preparation work
+func (suite *LabelSelectorTestSuite) SetupSuite() {
+ suite.artifacts = []*res.Candidate{
+ {
+ NamespaceID: 1,
+ Namespace: "library",
+ Repository: "harbor",
+ Tag: "1.9",
+ Kind: res.Image,
+ PushedTime: time.Now().Unix() - 3600,
+ PulledTime: time.Now().Unix(),
+ CreationTime: time.Now().Unix() - 7200,
+ Labels: []string{"label1", "label2", "label3"},
+ },
+ {
+ NamespaceID: 1,
+ Namespace: "library",
+ Repository: "harbor",
+ Tag: "dev",
+ Kind: res.Image,
+ PushedTime: time.Now().Unix() - 3600,
+ PulledTime: time.Now().Unix(),
+ CreationTime: time.Now().Unix() - 7200,
+ Labels: []string{"label1", "label4", "label5"},
+ },
+ }
+}
+
+// TestWithLabelsUnMatched tests the selector of `with` labels but nothing matched
+func (suite *LabelSelectorTestSuite) TestWithLabelsUnMatched() {
+ withNothing := &selector{
+ decoration: With,
+ labels: []string{"label6"},
+ }
+
+ selected, err := withNothing.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 0, len(selected))
+}
+
+// TestWithLabelsMatched tests the selector of `with` labels and matched something
+func (suite *LabelSelectorTestSuite) TestWithLabelsMatched() {
+ with1 := &selector{
+ decoration: With,
+ labels: []string{"label2"},
+ }
+
+ selected, err := with1.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 1, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:1.9"}, selected)
+ })
+
+ with2 := &selector{
+ decoration: With,
+ labels: []string{"label1"},
+ }
+
+ selected2, err := with2.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected2))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:1.9", "harbor:dev"}, selected2)
+ })
+}
+
+// TestWithoutExistingLabels tests the selector of `without` existing labels
+func (suite *LabelSelectorTestSuite) TestWithoutExistingLabels() {
+ withoutExisting := &selector{
+ decoration: Without,
+ labels: []string{"label1"},
+ }
+
+ selected, err := withoutExisting.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 0, len(selected))
+}
+
+// TestWithoutNoneExistingLabels tests the selector of `without` non-existing labels
+func (suite *LabelSelectorTestSuite) TestWithoutNoneExistingLabels() {
+ withoutNonExisting := &selector{
+ decoration: Without,
+ labels: []string{"label6"},
+ }
+
+ selected, err := withoutNonExisting.Select(suite.artifacts)
+ require.NoError(suite.T(), err)
+ assert.Equal(suite.T(), 2, len(selected))
+ assert.Condition(suite.T(), func() bool {
+ return expect([]string{"harbor:1.9", "harbor:dev"}, selected)
+ })
+}
+
+// Check whether the returned result matched the expected ones (only check repo:tag)
+func expect(expected []string, candidates []*res.Candidate) bool {
+ hash := make(map[string]bool)
+
+ for _, art := range candidates {
+ hash[fmt.Sprintf("%s:%s", art.Repository, art.Tag)] = true
+ }
+
+ for _, exp := range expected {
+ if _, ok := hash[exp]; !ok {
+ return ok
+ }
+ }
+
+ return true
+}
diff --git a/src/pkg/scan/vuln.go b/src/pkg/scan/vuln.go
new file mode 100644
index 000000000..a4ccac027
--- /dev/null
+++ b/src/pkg/scan/vuln.go
@@ -0,0 +1,136 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scan
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/clair"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "reflect"
+)
+
+// VulnerabilityItem represents a vulnerability reported by scanner
+type VulnerabilityItem struct {
+ ID string `json:"id"`
+ Severity models.Severity `json:"severity"`
+ Pkg string `json:"package"`
+ Version string `json:"version"`
+ Description string `json:"description"`
+ Link string `json:"link"`
+ Fixed string `json:"fixedVersion,omitempty"`
+}
+
+// VulnerabilityList is a list of vulnerabilities, which should be scanner-agnostic
+type VulnerabilityList []VulnerabilityItem
+
+// ApplyWhitelist filters out the CVE defined in the whitelist in the parm.
+// It returns the items that are filtered for the caller to track or log.
+func (vl *VulnerabilityList) ApplyWhitelist(whitelist models.CVEWhitelist) VulnerabilityList {
+ filtered := VulnerabilityList{}
+ if whitelist.IsExpired() {
+ log.Info("The input whitelist is expired, skip filtering")
+ return filtered
+ }
+ s := whitelist.CVESet()
+ r := (*vl)[:0]
+ for _, v := range *vl {
+ if _, ok := s[v.ID]; ok {
+ log.Debugf("Filtered Vulnerability in whitelist, CVE ID: %s, severity: %s", v.ID, v.Severity)
+ filtered = append(filtered, v)
+ } else {
+ r = append(r, v)
+ }
+ }
+ val := reflect.ValueOf(vl)
+ val.Elem().SetLen(len(r))
+ return filtered
+}
+
+// Severity returns the highest severity of the vulnerabilities in the list
+func (vl *VulnerabilityList) Severity() models.Severity {
+ s := models.SevNone
+ for _, v := range *vl {
+ if v.Severity > s {
+ s = v.Severity
+ }
+ }
+ return s
+}
+
+// HasCVE returns whether the vulnerability list has the vulnerability with CVE ID in the parm
+func (vl *VulnerabilityList) HasCVE(id string) bool {
+ for _, v := range *vl {
+ if v.ID == id {
+ return true
+ }
+ }
+ return false
+}
+
+// VulnListFromClairResult transforms the returned value of Clair API to a VulnerabilityList
+func VulnListFromClairResult(layerWithVuln *models.ClairLayerEnvelope) VulnerabilityList {
+ res := VulnerabilityList{}
+ if layerWithVuln == nil {
+ return res
+ }
+ l := layerWithVuln.Layer
+ if l == nil {
+ return res
+ }
+ features := l.Features
+ if features == nil {
+ return res
+ }
+ for _, f := range features {
+ vulnerabilities := f.Vulnerabilities
+ if vulnerabilities == nil {
+ continue
+ }
+ for _, v := range vulnerabilities {
+ vItem := VulnerabilityItem{
+ ID: v.Name,
+ Pkg: f.Name,
+ Version: f.Version,
+ Severity: clair.ParseClairSev(v.Severity),
+ Fixed: v.FixedBy,
+ Link: v.Link,
+ Description: v.Description,
+ }
+ res = append(res, vItem)
+ }
+ }
+ return res
+}
+
+// VulnListByDigest returns the VulnerabilityList based on the scan result of artifact with the digest in the parm
+func VulnListByDigest(digest string) (VulnerabilityList, error) {
+ var res VulnerabilityList
+ overview, err := dao.GetImgScanOverview(digest)
+ if err != nil {
+ return res, err
+ }
+ if overview == nil || len(overview.DetailsKey) == 0 {
+ return res, fmt.Errorf("unable to get the scan result for digest: %s, the artifact is not scanned", digest)
+ }
+ c := clair.NewClient(config.ClairEndpoint(), nil)
+ clairRes, err := c.GetResult(overview.DetailsKey)
+ if err != nil {
+ return res, fmt.Errorf("failed to get scan result from Clair, error: %v", err)
+ }
+ return VulnListFromClairResult(clairRes), nil
+}
diff --git a/src/pkg/scan/vuln_test.go b/src/pkg/scan/vuln_test.go
new file mode 100644
index 000000000..fce594cd9
--- /dev/null
+++ b/src/pkg/scan/vuln_test.go
@@ -0,0 +1,178 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scan
+
+import (
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+var (
+ past = int64(1561967574)
+ vulnList1 = VulnerabilityList{}
+ vulnList2 = VulnerabilityList{
+ {ID: "CVE-2018-10754",
+ Severity: models.SevLow,
+ Pkg: "ncurses",
+ Version: "6.0+20161126-1+deb9u2",
+ },
+ {
+ ID: "CVE-2018-6485",
+ Severity: models.SevHigh,
+ Pkg: "glibc",
+ Version: "2.24-11+deb9u4",
+ },
+ }
+ whiteList1 = models.CVEWhitelist{
+ ExpiresAt: &past,
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2018-6485"},
+ },
+ }
+ whiteList2 = models.CVEWhitelist{
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2018-6485"},
+ },
+ }
+ whiteList3 = models.CVEWhitelist{
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2018-6485"},
+ {CVEID: "CVE-2018-10754"},
+ {CVEID: "CVE-2019-12817"},
+ },
+ }
+)
+
+func TestMain(m *testing.M) {
+ dao.PrepareTestForPostgresSQL()
+ os.Exit(m.Run())
+}
+
+func TestVulnerabilityList_HasCVE(t *testing.T) {
+ cases := []struct {
+ input VulnerabilityList
+ cve string
+ result bool
+ }{
+ {
+ input: vulnList1,
+ cve: "CVE-2018-10754",
+ result: false,
+ },
+ {
+ input: vulnList2,
+ cve: "CVE-2018-10754",
+ result: true,
+ },
+ }
+ for _, c := range cases {
+ assert.Equal(t, c.result, c.input.HasCVE(c.cve))
+ }
+}
+
+func TestVulnerabilityList_Severity(t *testing.T) {
+ cases := []struct {
+ input VulnerabilityList
+ expect models.Severity
+ }{
+ {
+ input: vulnList1,
+ expect: models.SevNone,
+ }, {
+ input: vulnList2,
+ expect: models.SevHigh,
+ },
+ }
+ for _, c := range cases {
+ assert.Equal(t, c.expect, c.input.Severity())
+ }
+}
+
+func TestVulnerabilityList_ApplyWhitelist(t *testing.T) {
+ cases := []struct {
+ vl VulnerabilityList
+ wl models.CVEWhitelist
+ expectFiltered VulnerabilityList
+ expectSev models.Severity
+ }{
+ {
+ vl: vulnList2,
+ wl: whiteList1,
+ expectFiltered: VulnerabilityList{},
+ expectSev: models.SevHigh,
+ },
+ {
+ vl: vulnList2,
+ wl: whiteList2,
+ expectFiltered: VulnerabilityList{
+ {
+ ID: "CVE-2018-6485",
+ Severity: models.SevHigh,
+ Pkg: "glibc",
+ Version: "2.24-11+deb9u4",
+ },
+ },
+ expectSev: models.SevLow,
+ },
+ {
+ vl: vulnList1,
+ wl: whiteList3,
+ expectFiltered: VulnerabilityList{},
+ expectSev: models.SevNone,
+ },
+ {
+ vl: vulnList2,
+ wl: whiteList3,
+ expectFiltered: VulnerabilityList{
+ {ID: "CVE-2018-10754",
+ Severity: models.SevLow,
+ Pkg: "ncurses",
+ Version: "6.0+20161126-1+deb9u2",
+ },
+ {
+ ID: "CVE-2018-6485",
+ Severity: models.SevHigh,
+ Pkg: "glibc",
+ Version: "2.24-11+deb9u4",
+ },
+ },
+ expectSev: models.SevNone,
+ },
+ }
+ for _, c := range cases {
+ filtered := c.vl.ApplyWhitelist(c.wl)
+ assert.Equal(t, c.expectFiltered, filtered)
+ assert.Equal(t, c.vl.Severity(), c.expectSev)
+ }
+}
+
+func TestVulnListByDigest(t *testing.T) {
+ _, err := VulnListByDigest("notexist")
+ assert.NotNil(t, err)
+}
+
+func TestVulnListFromClairResult(t *testing.T) {
+ l := VulnListFromClairResult(nil)
+ assert.Equal(t, VulnerabilityList{}, l)
+ lv := &models.ClairLayerEnvelope{
+ Layer: nil,
+ Error: nil,
+ }
+ l2 := VulnListFromClairResult(lv)
+ assert.Equal(t, VulnerabilityList{}, l2)
+}
diff --git a/src/pkg/scan/whitelist/manager.go b/src/pkg/scan/whitelist/manager.go
new file mode 100644
index 000000000..d582e3f10
--- /dev/null
+++ b/src/pkg/scan/whitelist/manager.go
@@ -0,0 +1,85 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package whitelist
+
+import (
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/jobservice/logger"
+)
+
+// Manager defines the interface of CVE whitelist manager, it support both system level and project level whitelists
+type Manager interface {
+ // CreateEmpty creates empty whitelist for given project
+ CreateEmpty(projectID int64) error
+ // Set sets the whitelist for given project (create or update)
+ Set(projectID int64, list models.CVEWhitelist) error
+ // Get gets the whitelist for given project
+ Get(projectID int64) (*models.CVEWhitelist, error)
+ // SetSys sets system level whitelist
+ SetSys(list models.CVEWhitelist) error
+ // GetSys gets system level whitelist
+ GetSys() (*models.CVEWhitelist, error)
+}
+
+type defaultManager struct{}
+
+// CreateEmpty creates empty whitelist for given project
+func (d *defaultManager) CreateEmpty(projectID int64) error {
+ l := models.CVEWhitelist{
+ ProjectID: projectID,
+ }
+ _, err := dao.CreateCVEWhitelist(l)
+ if err != nil {
+ logger.Errorf("Failed to create empty CVE whitelist for project: %d, error: %v", projectID, err)
+ }
+ return err
+}
+
+// Set sets the whitelist for given project (create or update)
+func (d *defaultManager) Set(projectID int64, list models.CVEWhitelist) error {
+ list.ProjectID = projectID
+ if err := Validate(list); err != nil {
+ return err
+ }
+ _, err := dao.UpdateCVEWhitelist(list)
+ return err
+}
+
+// Get gets the whitelist for given project
+func (d *defaultManager) Get(projectID int64) (*models.CVEWhitelist, error) {
+ wl, err := dao.GetCVEWhitelist(projectID)
+ if wl == nil && err == nil {
+ log.Debugf("No CVE whitelist found for project %d, returning empty list.", projectID)
+ return &models.CVEWhitelist{ProjectID: projectID, Items: []models.CVEWhitelistItem{}}, nil
+ }
+ return wl, err
+}
+
+// SetSys sets the system level whitelist
+func (d *defaultManager) SetSys(list models.CVEWhitelist) error {
+ return d.Set(0, list)
+}
+
+// GetSys gets the system level whitelist
+func (d *defaultManager) GetSys() (*models.CVEWhitelist, error) {
+ return d.Get(0)
+}
+
+// NewDefaultManager return a new instance of defaultManager
+func NewDefaultManager() Manager {
+ return &defaultManager{}
+}
diff --git a/src/pkg/scan/whitelist/manager_test.go b/src/pkg/scan/whitelist/manager_test.go
new file mode 100644
index 000000000..8dbf6da37
--- /dev/null
+++ b/src/pkg/scan/whitelist/manager_test.go
@@ -0,0 +1,46 @@
+package whitelist
+
+import (
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/stretchr/testify/assert"
+ "os"
+ "testing"
+)
+
+func TestMain(m *testing.M) {
+
+ // databases := []string{"mysql", "sqlite"}
+ databases := []string{"postgresql"}
+ for _, database := range databases {
+ log.Infof("run test cases for database: %s", database)
+
+ result := 1
+ switch database {
+ case "postgresql":
+ dao.PrepareTestForPostgresSQL()
+ default:
+ log.Fatalf("invalid database: %s", database)
+ }
+
+ result = m.Run()
+
+ if result != 0 {
+ os.Exit(result)
+ }
+ }
+}
+
+func TestDefaultManager_CreateEmpty(t *testing.T) {
+ dm := NewDefaultManager()
+ assert.NoError(t, dm.CreateEmpty(99))
+ assert.Error(t, dm.CreateEmpty(99))
+}
+
+func TestDefaultManager_Get(t *testing.T) {
+ dm := NewDefaultManager()
+ // return empty list
+ l, err := dm.Get(1234)
+ assert.Nil(t, err)
+ assert.Empty(t, l.Items)
+}
diff --git a/src/pkg/scan/whitelist/validator.go b/src/pkg/scan/whitelist/validator.go
new file mode 100644
index 000000000..cef2a17df
--- /dev/null
+++ b/src/pkg/scan/whitelist/validator.go
@@ -0,0 +1,60 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package whitelist
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/common/models"
+ "regexp"
+)
+
+type invalidErr struct {
+ msg string
+}
+
+func (ie *invalidErr) Error() string {
+ return ie.msg
+}
+
+// NewInvalidErr ...
+func NewInvalidErr(s string) error {
+ return &invalidErr{
+ msg: s,
+ }
+}
+
+// IsInvalidErr checks if the error is an invalidErr
+func IsInvalidErr(err error) bool {
+ _, ok := err.(*invalidErr)
+ return ok
+}
+
+const cveIDPattern = `^CVE-\d{4}-\d+$`
+
+// Validate help validates the CVE whitelist, to ensure the CVE ID is valid and there's no duplication
+func Validate(wl models.CVEWhitelist) error {
+ m := map[string]struct{}{}
+ re := regexp.MustCompile(cveIDPattern)
+ for _, it := range wl.Items {
+ if !re.MatchString(it.CVEID) {
+ return &invalidErr{fmt.Sprintf("invalid CVE ID: %s", it.CVEID)}
+ }
+ if _, ok := m[it.CVEID]; ok {
+ return &invalidErr{fmt.Sprintf("duplicate CVE ID in whitelist: %s", it.CVEID)}
+ }
+ m[it.CVEID] = struct{}{}
+ }
+ return nil
+}
diff --git a/src/pkg/scan/whitelist/validator_test.go b/src/pkg/scan/whitelist/validator_test.go
new file mode 100644
index 000000000..e147d2364
--- /dev/null
+++ b/src/pkg/scan/whitelist/validator_test.go
@@ -0,0 +1,102 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package whitelist
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestIsInvalidErr(t *testing.T) {
+ cases := []struct {
+ instance error
+ expect bool
+ }{
+ {
+ instance: nil,
+ expect: false,
+ },
+ {
+ instance: fmt.Errorf("whatever"),
+ expect: false,
+ },
+ {
+ instance: NewInvalidErr("This is true"),
+ expect: true,
+ },
+ }
+
+ for n, c := range cases {
+ t.Logf("Executing TestIsInvalidErr case: %d\n", n)
+ assert.Equal(t, c.expect, IsInvalidErr(c.instance))
+ }
+}
+
+func TestValidate(t *testing.T) {
+ cases := []struct {
+ l models.CVEWhitelist
+ noError bool
+ }{
+ {
+ l: models.CVEWhitelist{
+ Items: nil,
+ },
+ noError: true,
+ },
+ {
+ l: models.CVEWhitelist{
+ Items: []models.CVEWhitelistItem{},
+ },
+ noError: true,
+ },
+ {
+ l: models.CVEWhitelist{
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "breakit"},
+ },
+ },
+ noError: false,
+ },
+ {
+ l: models.CVEWhitelist{
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2014-456132"},
+ {CVEID: "CVE-2014-7654321"},
+ },
+ },
+ noError: true,
+ },
+ {
+ l: models.CVEWhitelist{
+ Items: []models.CVEWhitelistItem{
+ {CVEID: "CVE-2014-456132"},
+ {CVEID: "CVE-2014-456132"},
+ {CVEID: "CVE-2014-7654321"},
+ },
+ },
+ noError: false,
+ },
+ }
+ for n, c := range cases {
+ t.Logf("Executing TestValidate case: %d\n", n)
+ e := Validate(c.l)
+ assert.Equal(t, c.noError, e == nil)
+ if e != nil {
+ assert.True(t, IsInvalidErr(e))
+ }
+ }
+}
diff --git a/src/pkg/scheduler/dao/schedule.go b/src/pkg/scheduler/dao/schedule.go
new file mode 100644
index 000000000..1728556e4
--- /dev/null
+++ b/src/pkg/scheduler/dao/schedule.go
@@ -0,0 +1,99 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/astaxie/beego/orm"
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+)
+
+// ScheduleDao defines the method that a schedule data access model should implement
+type ScheduleDao interface {
+ Create(*model.Schedule) (int64, error)
+ Update(*model.Schedule, ...string) error
+ Delete(int64) error
+ Get(int64) (*model.Schedule, error)
+ List(...*model.ScheduleQuery) ([]*model.Schedule, error)
+}
+
+// New returns an instance of the default schedule data access model implementation
+func New() ScheduleDao {
+ return &scheduleDao{}
+}
+
+type scheduleDao struct{}
+
+func (s *scheduleDao) Create(schedule *model.Schedule) (int64, error) {
+ if schedule == nil {
+ return 0, errors.New("nil schedule")
+ }
+ now := time.Now()
+ schedule.CreationTime = &now
+ schedule.UpdateTime = &now
+ return dao.GetOrmer().Insert(schedule)
+}
+
+func (s *scheduleDao) Update(schedule *model.Schedule, cols ...string) error {
+ if schedule == nil {
+ return errors.New("nil schedule")
+ }
+ if schedule.ID <= 0 {
+ return fmt.Errorf("invalid ID: %d", schedule.ID)
+ }
+ now := time.Now()
+ schedule.UpdateTime = &now
+ _, err := dao.GetOrmer().Update(schedule, cols...)
+ return err
+}
+
+func (s *scheduleDao) Delete(id int64) error {
+ _, err := dao.GetOrmer().Delete(&model.Schedule{
+ ID: id,
+ })
+ return err
+}
+
+func (s *scheduleDao) Get(id int64) (*model.Schedule, error) {
+ schedule := &model.Schedule{
+ ID: id,
+ }
+ if err := dao.GetOrmer().Read(schedule); err != nil {
+ if err == orm.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return schedule, nil
+}
+
+func (s *scheduleDao) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) {
+ qs := dao.GetOrmer().QueryTable(&model.Schedule{})
+ if len(query) > 0 && query[0] != nil {
+ if len(query[0].JobID) > 0 {
+ qs = qs.Filter("JobID", query[0].JobID)
+ }
+ }
+ schedules := []*model.Schedule{}
+ _, err := qs.All(&schedules)
+ if err != nil {
+ return nil, err
+ }
+ return schedules, nil
+}
diff --git a/src/pkg/scheduler/dao/schedule_test.go b/src/pkg/scheduler/dao/schedule_test.go
new file mode 100644
index 000000000..60acf4f23
--- /dev/null
+++ b/src/pkg/scheduler/dao/schedule_test.go
@@ -0,0 +1,122 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dao
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+var schDao = &scheduleDao{}
+
+type scheduleTestSuite struct {
+ suite.Suite
+ scheduleID int64
+}
+
+func (s *scheduleTestSuite) SetupSuite() {
+ dao.PrepareTestForPostgresSQL()
+}
+
+func (s *scheduleTestSuite) SetupTest() {
+ t := s.T()
+ id, err := schDao.Create(&model.Schedule{
+ JobID: "1",
+ Status: "pending",
+ })
+ require.Nil(t, err)
+ s.scheduleID = id
+}
+func (s *scheduleTestSuite) TearDownTest() {
+ // clear
+ dao.GetOrmer().Raw("delete from schedule").Exec()
+}
+
+func (s *scheduleTestSuite) TestCreate() {
+ t := s.T()
+ // nil schedule
+ _, err := schDao.Create(nil)
+ require.NotNil(t, err)
+
+ // pass
+ _, err = schDao.Create(&model.Schedule{
+ JobID: "1",
+ })
+ require.Nil(t, err)
+}
+
+func (s *scheduleTestSuite) TestUpdate() {
+ t := s.T()
+ // nil schedule
+ err := schDao.Update(nil)
+ require.NotNil(t, err)
+
+ // invalid ID
+ err = schDao.Update(&model.Schedule{})
+ require.NotNil(t, err)
+
+ // pass
+ err = schDao.Update(&model.Schedule{
+ ID: s.scheduleID,
+ Status: "running",
+ })
+ require.Nil(t, err)
+ schedule, err := schDao.Get(s.scheduleID)
+ require.Nil(t, err)
+ assert.Equal(t, "running", schedule.Status)
+}
+
+func (s *scheduleTestSuite) TestDelete() {
+ t := s.T()
+ err := schDao.Delete(s.scheduleID)
+ require.Nil(t, err)
+ schedule, err := schDao.Get(s.scheduleID)
+ require.Nil(t, err)
+ assert.Nil(t, schedule)
+}
+
+func (s *scheduleTestSuite) TestGet() {
+ t := s.T()
+ schedule, err := schDao.Get(s.scheduleID)
+ require.Nil(t, err)
+ assert.Equal(t, "pending", schedule.Status)
+}
+
+func (s *scheduleTestSuite) TestList() {
+ t := s.T()
+ // nil query
+ schedules, err := schDao.List()
+ require.Nil(t, err)
+ require.Equal(t, 1, len(schedules))
+ assert.Equal(t, s.scheduleID, schedules[0].ID)
+
+ // query by job ID
+ schedules, err = schDao.List(&model.ScheduleQuery{
+ JobID: "1",
+ })
+ require.Nil(t, err)
+ require.Equal(t, 1, len(schedules))
+ assert.Equal(t, s.scheduleID, schedules[0].ID)
+}
+
+func TestScheduleDao(t *testing.T) {
+ suite.Run(t, &scheduleTestSuite{})
+}
diff --git a/src/pkg/scheduler/hook/handler.go b/src/pkg/scheduler/hook/handler.go
new file mode 100644
index 000000000..f176850fa
--- /dev/null
+++ b/src/pkg/scheduler/hook/handler.go
@@ -0,0 +1,59 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hook
+
+import (
+ "time"
+
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+)
+
+// GlobalController is an instance of the default controller that can be used globally
+var GlobalController = NewController()
+
+// Controller updates the scheduler job status or runs the callback function
+type Controller interface {
+ UpdateStatus(scheduleID int64, status string) error
+ Run(callbackFuncName string, params interface{}) error
+}
+
+// NewController returns an instance of the default controller
+func NewController() Controller {
+ return &controller{
+ manager: scheduler.GlobalManager,
+ }
+}
+
+type controller struct {
+ manager scheduler.Manager
+}
+
+func (c *controller) UpdateStatus(scheduleID int64, status string) error {
+ now := time.Now()
+ return c.manager.Update(&model.Schedule{
+ ID: scheduleID,
+ Status: status,
+ UpdateTime: &now,
+ }, "Status", "UpdateTime")
+}
+
+func (c *controller) Run(callbackFuncName string, params interface{}) error {
+ f, err := scheduler.GetCallbackFunc(callbackFuncName)
+ if err != nil {
+ return err
+ }
+ return f(params)
+}
diff --git a/src/pkg/scheduler/hook/handler_test.go b/src/pkg/scheduler/hook/handler_test.go
new file mode 100644
index 000000000..99875ae5f
--- /dev/null
+++ b/src/pkg/scheduler/hook/handler_test.go
@@ -0,0 +1,56 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hook
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/scheduler"
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+ htesting "github.com/goharbor/harbor/src/testing"
+ "github.com/stretchr/testify/require"
+)
+
+var h = &controller{
+ manager: &htesting.FakeSchedulerManager{},
+}
+
+func TestUpdateStatus(t *testing.T) {
+ // task not exist
+ err := h.UpdateStatus(1, "running")
+ require.NotNil(t, err)
+
+ // pass
+ h.manager.(*htesting.FakeSchedulerManager).Schedules = []*model.Schedule{
+ {
+ ID: 1,
+ Status: "",
+ },
+ }
+ err = h.UpdateStatus(1, "running")
+ require.Nil(t, err)
+}
+
+func TestRun(t *testing.T) {
+ // callback function not exist
+ err := h.Run("not-exist", nil)
+ require.NotNil(t, err)
+
+ // pass
+ err = scheduler.Register("callback", func(interface{}) error { return nil })
+ require.Nil(t, err)
+ err = h.Run("callback", nil)
+ require.Nil(t, err)
+}
diff --git a/src/pkg/scheduler/manager.go b/src/pkg/scheduler/manager.go
new file mode 100644
index 000000000..735e89873
--- /dev/null
+++ b/src/pkg/scheduler/manager.go
@@ -0,0 +1,66 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduler
+
+import (
+ "github.com/goharbor/harbor/src/pkg/scheduler/dao"
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+)
+
+var (
+ // GlobalManager is an instance of the default manager that
+ // can be used globally
+ GlobalManager = NewManager()
+)
+
+// Manager manages the schedule of the scheduler
+type Manager interface {
+ Create(*model.Schedule) (int64, error)
+ Update(*model.Schedule, ...string) error
+ Delete(int64) error
+ Get(int64) (*model.Schedule, error)
+ List(...*model.ScheduleQuery) ([]*model.Schedule, error)
+}
+
+// NewManager returns an instance of the default manager
+func NewManager() Manager {
+ return &manager{
+ scheduleDao: dao.New(),
+ }
+}
+
+type manager struct {
+ scheduleDao dao.ScheduleDao
+}
+
+func (m *manager) Create(schedule *model.Schedule) (int64, error) {
+ return m.scheduleDao.Create(schedule)
+}
+
+func (m *manager) Update(schedule *model.Schedule, props ...string) error {
+ return m.scheduleDao.Update(schedule, props...)
+}
+
+func (m *manager) Delete(id int64) error {
+ return m.scheduleDao.Delete(id)
+}
+
+func (m *manager) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) {
+ return m.scheduleDao.List(query...)
+}
+
+func (m *manager) Get(id int64) (*model.Schedule, error) {
+ return m.scheduleDao.Get(id)
+}
diff --git a/src/pkg/scheduler/manager_test.go b/src/pkg/scheduler/manager_test.go
new file mode 100644
index 000000000..b9f59b358
--- /dev/null
+++ b/src/pkg/scheduler/manager_test.go
@@ -0,0 +1,110 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduler
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+ "github.com/stretchr/testify/mock"
+ "github.com/stretchr/testify/suite"
+)
+
+var mgr *manager
+
+type fakeScheduleDao struct {
+ schedules []*model.Schedule
+ mock.Mock
+}
+
+func (f *fakeScheduleDao) Create(*model.Schedule) (int64, error) {
+ f.Called()
+ return 1, nil
+}
+func (f *fakeScheduleDao) Update(*model.Schedule, ...string) error {
+ f.Called()
+ return nil
+}
+func (f *fakeScheduleDao) Delete(int64) error {
+ f.Called()
+ return nil
+}
+func (f *fakeScheduleDao) Get(int64) (*model.Schedule, error) {
+ f.Called()
+ return nil, nil
+}
+func (f *fakeScheduleDao) List(query ...*model.ScheduleQuery) ([]*model.Schedule, error) {
+ f.Called()
+ if len(query) == 0 || query[0] == nil {
+ return f.schedules, nil
+ }
+ result := []*model.Schedule{}
+ for _, sch := range f.schedules {
+ if sch.JobID == query[0].JobID {
+ result = append(result, sch)
+ }
+ }
+ return result, nil
+}
+
+type managerTestSuite struct {
+ suite.Suite
+}
+
+func (m *managerTestSuite) SetupTest() {
+ // recreate schedule manager
+ mgr = &manager{
+ scheduleDao: &fakeScheduleDao{},
+ }
+}
+
+func (m *managerTestSuite) TestCreate() {
+ t := m.T()
+ mgr.scheduleDao.(*fakeScheduleDao).On("Create", mock.Anything)
+ mgr.Create(nil)
+ mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Create")
+}
+
+func (m *managerTestSuite) TestUpdate() {
+ t := m.T()
+ mgr.scheduleDao.(*fakeScheduleDao).On("Update", mock.Anything)
+ mgr.Update(nil)
+ mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Update")
+}
+
+func (m *managerTestSuite) TestDelete() {
+ t := m.T()
+ mgr.scheduleDao.(*fakeScheduleDao).On("Delete", mock.Anything)
+ mgr.Delete(1)
+ mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Delete")
+}
+
+func (m *managerTestSuite) TestGet() {
+ t := m.T()
+ mgr.scheduleDao.(*fakeScheduleDao).On("Get", mock.Anything)
+ mgr.Get(1)
+ mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "Get")
+}
+
+func (m *managerTestSuite) TestList() {
+ t := m.T()
+ mgr.scheduleDao.(*fakeScheduleDao).On("List", mock.Anything)
+ mgr.List(nil)
+ mgr.scheduleDao.(*fakeScheduleDao).AssertCalled(t, "List")
+}
+
+func TestManager(t *testing.T) {
+ suite.Run(t, &managerTestSuite{})
+}
diff --git a/src/pkg/scheduler/model/schedule.go b/src/pkg/scheduler/model/schedule.go
new file mode 100644
index 000000000..3cdbe0a68
--- /dev/null
+++ b/src/pkg/scheduler/model/schedule.go
@@ -0,0 +1,40 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "time"
+
+ "github.com/astaxie/beego/orm"
+)
+
+func init() {
+ orm.RegisterModel(
+ new(Schedule))
+}
+
+// Schedule is a record for a scheduler job
+type Schedule struct {
+ ID int64 `orm:"pk;auto;column(id)" json:"id"`
+ JobID string `orm:"column(job_id)" json:"job_id"`
+ Status string `orm:"column(status)" json:"status"`
+ CreationTime *time.Time `orm:"column(creation_time)" json:"creation_time"`
+ UpdateTime *time.Time `orm:"column(update_time)" json:"update_time"`
+}
+
+// ScheduleQuery is query for schedule
+type ScheduleQuery struct {
+ JobID string
+}
diff --git a/src/pkg/scheduler/periodic_job.go b/src/pkg/scheduler/periodic_job.go
new file mode 100644
index 000000000..f52f47af5
--- /dev/null
+++ b/src/pkg/scheduler/periodic_job.go
@@ -0,0 +1,54 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduler
+
+import (
+ "encoding/json"
+
+ "github.com/goharbor/harbor/src/jobservice/job"
+)
+
+// const definitions
+const (
+ // the job name that used to register to Jobservice
+ JobNameScheduler = "SCHEDULER"
+)
+
+// PeriodicJob is designed to generate hook event periodically
+type PeriodicJob struct{}
+
+// MaxFails of the job
+func (pj *PeriodicJob) MaxFails() uint {
+ return 3
+}
+
+// ShouldRetry indicates job can be retried if failed
+func (pj *PeriodicJob) ShouldRetry() bool {
+ return true
+}
+
+// Validate the parameters
+func (pj *PeriodicJob) Validate(params job.Parameters) error {
+ return nil
+}
+
+// Run the job
+func (pj *PeriodicJob) Run(ctx job.Context, params job.Parameters) error {
+ data, err := json.Marshal(params)
+ if err != nil {
+ return err
+ }
+ return ctx.Checkin(string(data))
+}
diff --git a/src/pkg/scheduler/scheduler.go b/src/pkg/scheduler/scheduler.go
new file mode 100644
index 000000000..6fb7d7e87
--- /dev/null
+++ b/src/pkg/scheduler/scheduler.go
@@ -0,0 +1,208 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduler
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "sync"
+ "time"
+
+ chttp "github.com/goharbor/harbor/src/common/http"
+ "github.com/goharbor/harbor/src/common/job"
+ "github.com/goharbor/harbor/src/common/job/models"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+ "github.com/pkg/errors"
+)
+
+// const definitions
+const (
+ JobParamCallbackFunc = "callback_func"
+ JobParamCallbackFuncParams = "params"
+)
+
+var (
+ // GlobalScheduler is an instance of the default scheduler that
+ // can be used globally. Call Init() to initialize it first
+ GlobalScheduler Scheduler
+ registry = make(map[string]CallbackFunc)
+)
+
+// CallbackFunc defines the function that the scheduler calls when triggered
+type CallbackFunc func(interface{}) error
+
+// Scheduler provides the capability to run a periodic task, a callback function
+// needs to be registered before using the scheduler
+// The "params" is passed to the callback function specified by "callbackFuncName"
+// as encoded json string, so the callback function must decode it before using
+type Scheduler interface {
+ Schedule(cron string, callbackFuncName string, params interface{}) (int64, error)
+ UnSchedule(id int64) error
+}
+
+// Register the callback function with name, and the function will be called
+// by the scheduler when the scheduler is triggered
+func Register(name string, callbackFunc CallbackFunc) error {
+ if len(name) == 0 {
+ return errors.New("empty name")
+ }
+ if callbackFunc == nil {
+ return errors.New("callback function is nil")
+ }
+
+ _, exist := registry[name]
+ if exist {
+ return fmt.Errorf("callback function %s already exists", name)
+ }
+ registry[name] = callbackFunc
+
+ return nil
+}
+
+// GetCallbackFunc returns the registered callback function specified by the name
+func GetCallbackFunc(name string) (CallbackFunc, error) {
+ f, exist := registry[name]
+ if !exist {
+ return nil, fmt.Errorf("callback function %s not found", name)
+ }
+ return f, nil
+}
+
+func callbackFuncExist(name string) bool {
+ _, exist := registry[name]
+ return exist
+}
+
+// Init the GlobalScheduler
+func Init() {
+ GlobalScheduler = New(config.InternalCoreURL())
+}
+
+// New returns an instance of the default scheduler
+func New(internalCoreURL string) Scheduler {
+ return &scheduler{
+ internalCoreURL: internalCoreURL,
+ jobserviceClient: job.GlobalClient,
+ manager: GlobalManager,
+ }
+}
+
+type scheduler struct {
+ sync.RWMutex
+ internalCoreURL string
+ manager Manager
+ jobserviceClient job.Client
+}
+
+func (s *scheduler) Schedule(cron string, callbackFuncName string, params interface{}) (int64, error) {
+ if !callbackFuncExist(callbackFuncName) {
+ return 0, fmt.Errorf("callback function %s not found", callbackFuncName)
+ }
+
+ // create schedule record
+ now := time.Now()
+ scheduleID, err := s.manager.Create(&model.Schedule{
+ CreationTime: &now,
+ UpdateTime: &now,
+ })
+ if err != nil {
+ return 0, err
+ }
+ // if got error in the following steps, delete the schedule record in database
+ defer func() {
+ if err != nil {
+ e := s.manager.Delete(scheduleID)
+ if e != nil {
+ log.Errorf("failed to delete the schedule %d: %v", scheduleID, e)
+ }
+ }
+ }()
+ log.Debugf("the schedule record %d created", scheduleID)
+
+ // submit scheduler job to Jobservice
+ statusHookURL := fmt.Sprintf("%s/service/notifications/schedules/%d", s.internalCoreURL, scheduleID)
+ jd := &models.JobData{
+ Name: JobNameScheduler,
+ Parameters: map[string]interface{}{
+ JobParamCallbackFunc: callbackFuncName,
+ },
+ Metadata: &models.JobMetadata{
+ JobKind: job.JobKindPeriodic,
+ Cron: cron,
+ },
+ StatusHook: statusHookURL,
+ }
+ if params != nil {
+ var paramsData []byte
+ paramsData, err = json.Marshal(params)
+ if err != nil {
+ return 0, err
+ }
+ jd.Parameters[JobParamCallbackFuncParams] = string(paramsData)
+ }
+ jobID, err := s.jobserviceClient.SubmitJob(jd)
+ if err != nil {
+ return 0, err
+ }
+ // if got error in the following steps, stop the scheduler job
+ defer func() {
+ if err != nil {
+ if e := s.jobserviceClient.PostAction(jobID, job.JobActionStop); e != nil {
+ log.Errorf("failed to stop the scheduler job %s: %v", jobID, e)
+ }
+ }
+ }()
+ log.Debugf("the scheduler job submitted to Jobservice, job ID: %s", jobID)
+
+ // populate the job ID for the schedule
+ err = s.manager.Update(&model.Schedule{
+ ID: scheduleID,
+ JobID: jobID,
+ }, "JobID")
+ if err != nil {
+ return 0, err
+ }
+
+ return scheduleID, nil
+}
+
+func (s *scheduler) UnSchedule(id int64) error {
+ schedule, err := s.manager.Get(id)
+ if err != nil {
+ return err
+ }
+ if schedule == nil {
+ log.Warningf("the schedule record %d not found", id)
+ return nil
+ }
+ if err = s.jobserviceClient.PostAction(schedule.JobID, job.JobActionStop); err != nil {
+ herr, ok := err.(*chttp.Error)
+ // if the job specified by jobID is not found in Jobservice, just delete
+ // the schedule record
+ if !ok || herr.Code != http.StatusNotFound {
+ return err
+ }
+ }
+ log.Debugf("the stop action for job %s submitted to the Jobservice", schedule.JobID)
+ if err = s.manager.Delete(schedule.ID); err != nil {
+ return err
+ }
+ log.Debugf("the schedule record %d deleted", schedule.ID)
+
+ return nil
+}
diff --git a/src/pkg/scheduler/scheduler_test.go b/src/pkg/scheduler/scheduler_test.go
new file mode 100644
index 000000000..de4bb2a2a
--- /dev/null
+++ b/src/pkg/scheduler/scheduler_test.go
@@ -0,0 +1,115 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package scheduler
+
+import (
+ "testing"
+
+ htesting "github.com/goharbor/harbor/src/testing"
+ "github.com/goharbor/harbor/src/testing/job"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+var sch *scheduler
+
+type schedulerTestSuite struct {
+ suite.Suite
+}
+
+func (s *schedulerTestSuite) SetupTest() {
+ t := s.T()
+ // empty callback function registry before running every test case
+ // and register a new callback function named "callback"
+ registry = make(map[string]CallbackFunc)
+ err := Register("callback", func(interface{}) error { return nil })
+ require.Nil(t, err)
+
+ // recreate the scheduler object
+ sch = &scheduler{
+ jobserviceClient: &job.MockJobClient{},
+ manager: &htesting.FakeSchedulerManager{},
+ }
+}
+
+func (s *schedulerTestSuite) TestRegister() {
+ t := s.T()
+ var name string
+ var callbackFun CallbackFunc
+
+ // empty name
+ err := Register(name, callbackFun)
+ require.NotNil(t, err)
+
+ // nil callback function
+ name = "test"
+ err = Register(name, callbackFun)
+ require.NotNil(t, err)
+
+ // pass
+ callbackFun = func(interface{}) error { return nil }
+ err = Register(name, callbackFun)
+ require.Nil(t, err)
+
+ // duplicate name
+ err = Register(name, callbackFun)
+ require.NotNil(t, err)
+}
+
+func (s *schedulerTestSuite) TestGetCallbackFunc() {
+ t := s.T()
+ // not exist
+ _, err := GetCallbackFunc("not-exist")
+ require.NotNil(t, err)
+
+ // pass
+ f, err := GetCallbackFunc("callback")
+ require.Nil(t, err)
+ assert.NotNil(t, f)
+}
+
+func (s *schedulerTestSuite) TestSchedule() {
+ t := s.T()
+
+ // callback function not exist
+ _, err := sch.Schedule("0 * * * * *", "not-exist", nil)
+ require.NotNil(t, err)
+
+ // pass
+ id, err := sch.Schedule("0 * * * * *", "callback", nil)
+ require.Nil(t, err)
+ assert.Equal(t, int64(1), id)
+}
+
+func (s *schedulerTestSuite) TestUnSchedule() {
+ t := s.T()
+ // schedule not exist
+ err := sch.UnSchedule(1)
+ require.NotNil(t, err)
+
+ // schedule exist
+ id, err := sch.Schedule("0 * * * * *", "callback", nil)
+ require.Nil(t, err)
+ assert.Equal(t, int64(1), id)
+
+ err = sch.UnSchedule(id)
+ require.Nil(t, err)
+}
+
+func TestScheduler(t *testing.T) {
+ s := &schedulerTestSuite{}
+ suite.Run(t, s)
+}
diff --git a/src/pkg/types/format.go b/src/pkg/types/format.go
new file mode 100644
index 000000000..cc97f0764
--- /dev/null
+++ b/src/pkg/types/format.go
@@ -0,0 +1,40 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "fmt"
+)
+
+var (
+ resourceValueFormats = map[ResourceName]func(int64) string{
+ ResourceStorage: byteCountToDisplaySize,
+ }
+)
+
+func byteCountToDisplaySize(value int64) string {
+ const unit = 1024
+ if value < unit {
+ return fmt.Sprintf("%d B", value)
+ }
+
+ div, exp := int64(unit), 0
+ for n := value / unit; n >= unit; n /= unit {
+ div *= unit
+ exp++
+ }
+
+ return fmt.Sprintf("%.1f %ciB", float64(value)/float64(div), "KMGTPE"[exp])
+}
diff --git a/src/pkg/types/format_test.go b/src/pkg/types/format_test.go
new file mode 100644
index 000000000..19f6607eb
--- /dev/null
+++ b/src/pkg/types/format_test.go
@@ -0,0 +1,45 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import "testing"
+
+func Test_byteCountToDisplaySize(t *testing.T) {
+ type args struct {
+ value int64
+ }
+ tests := []struct {
+ name string
+ args args
+ want string
+ }{
+ {"100 B", args{100}, "100 B"},
+ {"1.0 KiB", args{1024}, "1.0 KiB"},
+ {"1.5 KiB", args{1024 * 3 / 2}, "1.5 KiB"},
+ {"1.0 MiB", args{1024 * 1024}, "1.0 MiB"},
+ {"1.5 MiB", args{1024 * 1024 * 3 / 2}, "1.5 MiB"},
+ {"1.0 GiB", args{1024 * 1024 * 1024}, "1.0 GiB"},
+ {"1.5 GiB", args{1024 * 1024 * 1024 * 3 / 2}, "1.5 GiB"},
+ {"1.0 TiB", args{1024 * 1024 * 1024 * 1024}, "1.0 TiB"},
+ {"1.5 TiB", args{1024 * 1024 * 1024 * 1024 * 3 / 2}, "1.5 TiB"},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := byteCountToDisplaySize(tt.args.value); got != tt.want {
+ t.Errorf("byteCountToDisplaySize() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/pkg/types/resources.go b/src/pkg/types/resources.go
new file mode 100644
index 000000000..95a98fdff
--- /dev/null
+++ b/src/pkg/types/resources.go
@@ -0,0 +1,137 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+const (
+ // UNLIMITED unlimited resource value
+ UNLIMITED = -1
+
+ // ResourceCount count, in number
+ ResourceCount ResourceName = "count"
+ // ResourceStorage storage size, in bytes
+ ResourceStorage ResourceName = "storage"
+)
+
+// ResourceName is the name identifying various resources in a ResourceList.
+type ResourceName string
+
+// FormatValue returns string for the resource value
+func (resource ResourceName) FormatValue(value int64) string {
+ format, ok := resourceValueFormats[resource]
+ if ok {
+ return format(value)
+ }
+
+ return strconv.FormatInt(value, 10)
+}
+
+// ResourceList is a set of (resource name, value) pairs.
+type ResourceList map[ResourceName]int64
+
+func (resources ResourceList) String() string {
+ bytes, _ := json.Marshal(resources)
+ return string(bytes)
+}
+
+// NewResourceList returns resource list from string
+func NewResourceList(s string) (ResourceList, error) {
+ var resources ResourceList
+ if err := json.Unmarshal([]byte(s), &resources); err != nil {
+ return nil, err
+ }
+
+ return resources, nil
+}
+
+// Equals returns true if the two lists are equivalent
+func Equals(a ResourceList, b ResourceList) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ for key, value1 := range a {
+ value2, found := b[key]
+ if !found {
+ return false
+ }
+ if value1 != value2 {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Add returns the result of a + b for each named resource
+func Add(a ResourceList, b ResourceList) ResourceList {
+ result := ResourceList{}
+ for key, value := range a {
+ if other, found := b[key]; found {
+ value = value + other
+ }
+ result[key] = value
+ }
+
+ for key, value := range b {
+ if _, found := result[key]; !found {
+ result[key] = value
+ }
+ }
+ return result
+}
+
+// Subtract returns the result of a - b for each named resource
+func Subtract(a ResourceList, b ResourceList) ResourceList {
+ result := ResourceList{}
+ for key, value := range a {
+ if other, found := b[key]; found {
+ value = value - other
+ }
+ result[key] = value
+ }
+
+ for key, value := range b {
+ if _, found := result[key]; !found {
+ result[key] = -value
+ }
+ }
+
+ return result
+}
+
+// Zero returns the result of a - a for each named resource
+func Zero(a ResourceList) ResourceList {
+ result := ResourceList{}
+ for key := range a {
+ result[key] = 0
+ }
+ return result
+}
+
+// IsNegative returns the set of resource names that have a negative value.
+func IsNegative(a ResourceList) []ResourceName {
+ results := []ResourceName{}
+ for k, v := range a {
+ if v < 0 {
+ results = append(results, k)
+ }
+ }
+ return results
+}
diff --git a/src/pkg/types/resources_test.go b/src/pkg/types/resources_test.go
new file mode 100644
index 000000000..c912c5f3e
--- /dev/null
+++ b/src/pkg/types/resources_test.go
@@ -0,0 +1,86 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/suite"
+)
+
+type ResourcesSuite struct {
+ suite.Suite
+}
+
+func (suite *ResourcesSuite) TestNewResourceList() {
+ res1, err1 := NewResourceList("")
+ suite.Error(err1)
+ suite.Nil(res1)
+ suite.Equal(0, len(res1))
+
+ res2, err2 := NewResourceList("{}")
+ suite.Nil(err2)
+ suite.NotNil(res2)
+}
+
+func (suite *ResourcesSuite) TestEquals() {
+ suite.True(Equals(ResourceList{}, ResourceList{}))
+ suite.True(Equals(ResourceList{ResourceStorage: 100}, ResourceList{ResourceStorage: 100}))
+ suite.False(Equals(ResourceList{ResourceStorage: 100}, ResourceList{ResourceStorage: 200}))
+ suite.False(Equals(ResourceList{ResourceStorage: 100}, ResourceList{ResourceStorage: 100, ResourceCount: 10}))
+ suite.False(Equals(ResourceList{ResourceStorage: 100, ResourceCount: 10}, ResourceList{ResourceStorage: 100}))
+}
+
+func (suite *ResourcesSuite) TestAdd() {
+ res1 := ResourceList{ResourceStorage: 100}
+ res2 := ResourceList{ResourceStorage: 100}
+ res3 := ResourceList{ResourceStorage: 100, ResourceCount: 10}
+ res4 := ResourceList{ResourceCount: 10}
+
+ suite.Equal(res1, Add(ResourceList{}, res1))
+ suite.Equal(ResourceList{ResourceStorage: 200}, Add(res1, res2))
+ suite.Equal(ResourceList{ResourceStorage: 200, ResourceCount: 10}, Add(res1, res3))
+ suite.Equal(ResourceList{ResourceStorage: 100, ResourceCount: 10}, Add(res1, res4))
+}
+
+func (suite *ResourcesSuite) TestSubtract() {
+ res1 := ResourceList{ResourceStorage: 100}
+ res2 := ResourceList{ResourceStorage: 100}
+ res3 := ResourceList{ResourceStorage: 100, ResourceCount: 10}
+ res4 := ResourceList{ResourceCount: 10}
+
+ suite.Equal(res1, Subtract(res1, ResourceList{}))
+ suite.Equal(ResourceList{ResourceStorage: 0}, Subtract(res1, res2))
+ suite.Equal(ResourceList{ResourceStorage: 0, ResourceCount: -10}, Subtract(res1, res3))
+ suite.Equal(ResourceList{ResourceStorage: 100, ResourceCount: -10}, Subtract(res1, res4))
+}
+
+func (suite *ResourcesSuite) TestZero() {
+ res1 := ResourceList{ResourceStorage: 100}
+ res2 := ResourceList{ResourceCount: 10, ResourceStorage: 100}
+
+ suite.Equal(ResourceList{}, Zero(ResourceList{}))
+ suite.Equal(ResourceList{ResourceStorage: 0}, Zero(res1))
+ suite.Equal(ResourceList{ResourceStorage: 0, ResourceCount: 0}, Zero(res2))
+}
+
+func (suite *ResourcesSuite) TestIsNegative() {
+ suite.EqualValues([]ResourceName{ResourceStorage}, IsNegative(ResourceList{ResourceStorage: -100, ResourceCount: 100}))
+ suite.EqualValues([]ResourceName{ResourceStorage, ResourceCount}, IsNegative(ResourceList{ResourceStorage: -100, ResourceCount: -100}))
+}
+
+func TestRunResourcesSuite(t *testing.T) {
+ suite.Run(t, new(ResourcesSuite))
+}
diff --git a/src/portal/lib/ng-package.json b/src/portal/lib/ng-package.json
index 053ada8db..89852ea86 100644
--- a/src/portal/lib/ng-package.json
+++ b/src/portal/lib/ng-package.json
@@ -8,6 +8,13 @@
"@ngx-translate/core": "ngx-translate-core",
"@ngx-translate/core/index": "ngx-translate-core",
"ngx-markdown": "ngx-markdown"
- }
+ },
+ "umdModuleIds": {
+ "@clr/angular" : "angular",
+ "ngx-markdown" : "ngxMarkdown",
+ "@ngx-translate/http-loader" : "httpLoader",
+ "ngx-cookie" : "ngxCookie",
+ "@ngx-translate/core" : "core$1"
+ }
}
}
\ No newline at end of file
diff --git a/src/portal/lib/ng-package.prod.json b/src/portal/lib/ng-package.prod.json
index 464fcabd4..85a87a50d 100644
--- a/src/portal/lib/ng-package.prod.json
+++ b/src/portal/lib/ng-package.prod.json
@@ -7,6 +7,13 @@
"@ngx-translate/core": "ngx-translate-core",
"@ngx-translate/core/index": "ngx-translate-core",
"ngx-markdown": "ngx-markdown"
- }
+ },
+ "umdModuleIds": {
+ "@clr/angular" : "angular",
+ "ngx-markdown" : "ngxMarkdown",
+ "@ngx-translate/http-loader" : "httpLoader",
+ "ngx-cookie" : "ngxCookie",
+ "@ngx-translate/core" : "core$1"
+ }
}
}
\ No newline at end of file
diff --git a/src/portal/lib/package.json b/src/portal/lib/package.json
index 4804668db..9c49c4207 100644
--- a/src/portal/lib/package.json
+++ b/src/portal/lib/package.json
@@ -1,6 +1,6 @@
{
"name": "@harbor/ui",
- "version": "1.8.0-rc2",
+ "version": "1.9.0",
"description": "Harbor shared UI components based on Clarity and Angular7",
"author": "CNCF",
"module": "index.js",
diff --git a/src/portal/lib/src/config/config.ts b/src/portal/lib/src/config/config.ts
index 2a376d71c..9505b11c5 100644
--- a/src/portal/lib/src/config/config.ts
+++ b/src/portal/lib/src/config/config.ts
@@ -87,16 +87,19 @@ export class Configuration {
token_expiration: NumberValueItem;
scan_all_policy: ComplexValueItem;
read_only: BoolValueItem;
+ notification_enable: BoolValueItem;
http_authproxy_endpoint?: StringValueItem;
http_authproxy_tokenreview_endpoint?: StringValueItem;
http_authproxy_verify_cert?: BoolValueItem;
- http_authproxy_always_onboard?: BoolValueItem;
+ http_authproxy_skip_search?: BoolValueItem;
oidc_name?: StringValueItem;
oidc_endpoint?: StringValueItem;
oidc_client_id?: StringValueItem;
oidc_client_secret?: StringValueItem;
oidc_verify_cert?: BoolValueItem;
oidc_scope?: StringValueItem;
+ count_per_project: NumberValueItem;
+ storage_per_project: NumberValueItem;
public constructor() {
this.auth_mode = new StringValueItem("db_auth", true);
this.project_creation_restriction = new StringValueItem("everyone", true);
@@ -138,15 +141,18 @@ export class Configuration {
}
}, true);
this.read_only = new BoolValueItem(false, true);
+ this.notification_enable = new BoolValueItem(false, true);
this.http_authproxy_endpoint = new StringValueItem("", true);
this.http_authproxy_tokenreview_endpoint = new StringValueItem("", true);
this.http_authproxy_verify_cert = new BoolValueItem(false, true);
- this.http_authproxy_always_onboard = new BoolValueItem(false, true);
+ this.http_authproxy_skip_search = new BoolValueItem(false, true);
this.oidc_name = new StringValueItem('', true);
this.oidc_endpoint = new StringValueItem('', true);
this.oidc_client_id = new StringValueItem('', true);
this.oidc_client_secret = new StringValueItem('', true);
this.oidc_verify_cert = new BoolValueItem(false, true);
this.oidc_scope = new StringValueItem('', true);
+ this.count_per_project = new NumberValueItem(-1, true);
+ this.storage_per_project = new NumberValueItem(-1, true);
}
}
diff --git a/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts b/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts
index dfcc0c4e8..2a8c55c18 100644
--- a/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts
+++ b/src/portal/lib/src/config/gc/gc-history/gc-history.component.ts
@@ -4,6 +4,7 @@ import { GcJobViewModel } from "../gcLog";
import { GcViewModelFactory } from "../gc.viewmodel.factory";
import { ErrorHandler } from "../../../error-handler/index";
import { Subscription, timer } from "rxjs";
+import { REFRESH_TIME_DIFFERENCE } from '../../../shared/shared.const';
const JOB_STATUS = {
PENDING: "pending",
RUNNING: "running"
@@ -34,7 +35,7 @@ export class GcHistoryComponent implements OnInit, OnDestroy {
this.loading = false;
// to avoid some jobs not finished.
if (!this.timerDelay) {
- this.timerDelay = timer(3000, 3000).subscribe(() => {
+ this.timerDelay = timer(REFRESH_TIME_DIFFERENCE, REFRESH_TIME_DIFFERENCE).subscribe(() => {
let count: number = 0;
this.jobs.forEach(job => {
if (
diff --git a/src/portal/lib/src/config/index.ts b/src/portal/lib/src/config/index.ts
index 5ecae2c6e..a43adbce6 100644
--- a/src/portal/lib/src/config/index.ts
+++ b/src/portal/lib/src/config/index.ts
@@ -6,6 +6,8 @@ import { VulnerabilityConfigComponent } from './vulnerability/vulnerability-conf
import { RegistryConfigComponent } from './registry-config.component';
import { GcComponent } from './gc/gc.component';
import { GcHistoryComponent } from './gc/gc-history/gc-history.component';
+import { ProjectQuotasComponent } from './project-quotas/project-quotas.component';
+import { EditProjectQuotasComponent } from './project-quotas/edit-project-quotas/edit-project-quotas.component';
export * from './config';
export * from './replication/replication-config.component';
@@ -20,5 +22,7 @@ export const CONFIGURATION_DIRECTIVES: Type[] = [
GcComponent,
SystemSettingsComponent,
VulnerabilityConfigComponent,
- RegistryConfigComponent
+ RegistryConfigComponent,
+ ProjectQuotasComponent,
+ EditProjectQuotasComponent
];
diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html
new file mode 100644
index 000000000..c9bd48440
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.html
@@ -0,0 +1,87 @@
+
+ {{ defaultTextsObj.editQuota }}
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss
new file mode 100644
index 000000000..43f9bf3bc
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.scss
@@ -0,0 +1,84 @@
+::ng-deep .modal-dialog {
+ width: 25rem;
+}
+
+.modal-body {
+ padding-top: 0.8rem;
+ overflow-y: visible;
+ overflow-x: visible;
+
+ .clr-form-compact {
+ div.form-group {
+ padding-left: 8.5rem;
+
+ .mr-3px {
+ margin-right: 3px;
+ }
+
+ .quota-input {
+ width: 2rem;
+ padding-right: 0.8rem;
+ }
+
+ .select-div {
+ width: 2.5rem;
+
+ ::ng-deep .clr-form-control {
+ margin-top: 0.28rem;
+
+ select {
+ padding-right: 15px;
+ }
+ }
+ }
+ }
+ }
+
+ .clr-form-compact-common {
+ div.form-group {
+ padding-left: 6rem;
+
+ .select-div {
+ width: 1.6rem;
+ }
+ }
+ }
+}
+
+.progress-block {
+ width: 8rem;
+}
+
+.progress-div {
+ position: relative;
+ padding-right: 0.6rem;
+ width: 9rem;
+}
+
+::ng-deep {
+ .progress {
+ &.warning>progress {
+ color: orange;
+
+ &::-webkit-progress-value {
+ background-color: orange;
+ }
+
+ &::-moz-progress-bar {
+ background-color: orange;
+ }
+ }
+ }
+}
+
+.progress-label {
+ position: absolute;
+ right: -2.3rem;
+ top: 0;
+ width: 3.5rem;
+ font-weight: 100;
+ font-size: 10px;
+
+ overflow: hidden;
+ text-overflow: ellipsis;
+}
\ No newline at end of file
diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.spec.ts b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.spec.ts
new file mode 100644
index 000000000..595f1ab1b
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.spec.ts
@@ -0,0 +1,37 @@
+import { async, ComponentFixture, TestBed } from '@angular/core/testing';
+
+import { EditProjectQuotasComponent } from './edit-project-quotas.component';
+import { SharedModule } from '../../../shared/shared.module';
+import { InlineAlertComponent } from '../../../inline-alert/inline-alert.component';
+import { SERVICE_CONFIG, IServiceConfig } from '../../../service.config';
+import { RouterModule } from '@angular/router';
+
+describe('EditProjectQuotasComponent', () => {
+ let component: EditProjectQuotasComponent;
+ let fixture: ComponentFixture;
+ let config: IServiceConfig = {
+ quotaUrl: "/api/quotas/testing"
+ };
+ beforeEach(async(() => {
+ TestBed.configureTestingModule({
+ imports: [
+ SharedModule,
+ RouterModule.forRoot([])
+ ],
+ declarations: [ EditProjectQuotasComponent, InlineAlertComponent ],
+ providers: [
+ { provide: SERVICE_CONFIG, useValue: config },
+ ]
+ })
+ .compileComponents();
+ }));
+
+ beforeEach(() => {
+ fixture = TestBed.createComponent(EditProjectQuotasComponent);
+ component = fixture.componentInstance;
+ fixture.detectChanges();
+ });
+ it('should create', () => {
+ expect(component).toBeTruthy();
+ });
+});
diff --git a/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts
new file mode 100644
index 000000000..ca3248b32
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/edit-project-quotas/edit-project-quotas.component.ts
@@ -0,0 +1,154 @@
+import {
+ Component,
+ EventEmitter,
+ Output,
+ ViewChild,
+ OnInit,
+} from '@angular/core';
+import { NgForm, Validators } from '@angular/forms';
+
+import { InlineAlertComponent } from '../../../inline-alert/inline-alert.component';
+
+import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from "../../../shared/shared.const";
+
+import { clone, getSuitableUnit, getByte, GetIntegerAndUnit, validateCountLimit, validateLimit } from '../../../utils';
+import { EditQuotaQuotaInterface, QuotaHardLimitInterface } from '../../../service';
+import { distinctUntilChanged } from 'rxjs/operators';
+
+@Component({
+ selector: 'edit-project-quotas',
+ templateUrl: './edit-project-quotas.component.html',
+ styleUrls: ['./edit-project-quotas.component.scss']
+})
+export class EditProjectQuotasComponent implements OnInit {
+ openEditQuota: boolean;
+ defaultTextsObj: { editQuota: string; setQuota: string; countQuota: string; storageQuota: string; isSystemDefaultQuota: boolean } = {
+ editQuota: '',
+ setQuota: '',
+ countQuota: '',
+ storageQuota: '',
+ isSystemDefaultQuota: false,
+ };
+ quotaHardLimitValue: QuotaHardLimitInterface = {
+ storageLimit: -1
+ , storageUnit: ''
+ , countLimit: -1
+ };
+ quotaUnits = QuotaUnits;
+ staticBackdrop = true;
+ closable = false;
+ quotaForm: NgForm;
+ @ViewChild(InlineAlertComponent)
+ inlineAlert: InlineAlertComponent;
+
+ @ViewChild('quotaForm')
+ currentForm: NgForm;
+ @Output() confirmAction = new EventEmitter();
+ quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT;
+ quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT;
+ constructor() { }
+
+ ngOnInit() {
+ }
+
+ onSubmit(): void {
+ const emitData = {
+ formValue: this.currentForm.value,
+ isSystemDefaultQuota: this.defaultTextsObj.isSystemDefaultQuota,
+ id: this.quotaHardLimitValue.id
+ };
+ this.confirmAction.emit(emitData);
+ }
+ onCancel() {
+ this.openEditQuota = false;
+ }
+
+ openEditQuotaModal(defaultTextsObj: EditQuotaQuotaInterface): void {
+ this.defaultTextsObj = defaultTextsObj;
+ if (this.defaultTextsObj.isSystemDefaultQuota) {
+ this.quotaHardLimitValue = {
+ storageLimit: defaultTextsObj.quotaHardLimitValue.storageLimit === QuotaUnlimited ?
+ QuotaUnlimited : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.storageLimit
+ , clone(QuotaUnits), 0, clone(QuotaUnits)).partNumberHard
+ , storageUnit: defaultTextsObj.quotaHardLimitValue.storageLimit === QuotaUnlimited ?
+ QuotaUnits[3].UNIT : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.storageLimit
+ , clone(QuotaUnits), 0, clone(QuotaUnits)).partCharacterHard
+ , countLimit: defaultTextsObj.quotaHardLimitValue.countLimit
+ };
+ } else {
+ this.quotaHardLimitValue = {
+ storageLimit: defaultTextsObj.quotaHardLimitValue.hard.storage === QuotaUnlimited ?
+ QuotaUnlimited : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.hard.storage
+ , clone(QuotaUnits), defaultTextsObj.quotaHardLimitValue.used.storage, clone(QuotaUnits)).partNumberHard
+ , storageUnit: defaultTextsObj.quotaHardLimitValue.hard.storage === QuotaUnlimited ?
+ QuotaUnits[3].UNIT : GetIntegerAndUnit(defaultTextsObj.quotaHardLimitValue.hard.storage
+ , clone(QuotaUnits), defaultTextsObj.quotaHardLimitValue.used.storage, clone(QuotaUnits)).partCharacterHard
+ , countLimit: defaultTextsObj.quotaHardLimitValue.hard.count
+ , id: defaultTextsObj.quotaHardLimitValue.id
+ , countUsed: defaultTextsObj.quotaHardLimitValue.used.count
+ , storageUsed: defaultTextsObj.quotaHardLimitValue.used.storage
+ };
+ }
+ let defaultForm = {
+ count: this.quotaHardLimitValue.countLimit
+ , storage: this.quotaHardLimitValue.storageLimit
+ , storageUnit: this.quotaHardLimitValue.storageUnit
+ };
+ this.currentForm.resetForm(defaultForm);
+ this.openEditQuota = true;
+
+ this.currentForm.form.controls['storage'].setValidators(
+ [
+ Validators.required,
+ Validators.pattern('(^-1$)|(^([1-9]+)([0-9]+)*$)'),
+ validateLimit(this.currentForm.form.controls['storageUnit'])
+ ]);
+ this.currentForm.form.controls['count'].setValidators(
+ [
+ Validators.required,
+ Validators.pattern('(^-1$)|(^([1-9]+)([0-9]+)*$)'),
+ validateCountLimit()
+ ]);
+ this.currentForm.form.valueChanges
+ .pipe(distinctUntilChanged((a, b) => JSON.stringify(a) === JSON.stringify(b)))
+ .subscribe((data) => {
+ ['storage', 'storageUnit', 'count'].forEach(fieldName => {
+ if (this.currentForm.form.get(fieldName) && this.currentForm.form.get(fieldName).value !== null) {
+ this.currentForm.form.get(fieldName).updateValueAndValidity();
+ }
+ });
+ });
+ }
+
+ get isValid() {
+ return this.currentForm.valid && this.currentForm.dirty;
+ }
+ getSuitableUnit(value) {
+ const QuotaUnitsCopy = clone(QuotaUnits);
+ return getSuitableUnit(value, QuotaUnitsCopy);
+ }
+ getIntegerAndUnit(valueHard, valueUsed) {
+ return GetIntegerAndUnit(valueHard
+ , clone(QuotaUnits), valueUsed, clone(QuotaUnits));
+ }
+ getByte(count: number, unit: string) {
+ if (+count === +count) {
+ return getByte(+count, unit);
+ }
+ return 0;
+ }
+ isDangerColor(limit: number | string, used: number | string, unit?: string) {
+ if (unit) {
+ return limit !== QuotaUnlimited ? +used / getByte(+limit, unit) >= this.quotaDangerCoefficient : false;
+ }
+ return limit !== QuotaUnlimited ? +used / +limit >= this.quotaDangerCoefficient : false;
+ }
+ isWarningColor(limit: number | string, used: number | string, unit?: string) {
+ if (unit) {
+ return limit !== QuotaUnlimited ?
+ +used / getByte(+limit, unit) >= this.quotaWarningCoefficient && +used / getByte(+limit, unit) <= this.quotaDangerCoefficient : false;
+ }
+ return limit !== QuotaUnlimited ?
+ +used / +limit >= this.quotaWarningCoefficient && +used / +limit <= this.quotaDangerCoefficient : false;
+ }
+}
diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.html b/src/portal/lib/src/config/project-quotas/project-quotas.component.html
new file mode 100644
index 000000000..22af1333d
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.html
@@ -0,0 +1,79 @@
+
+
+
+
+
+ {{'QUOTA.PROJECT_QUOTA_DEFAULT_ARTIFACT' | translate}}{{ quotaHardLimitValue?.countLimit === -1? ('QUOTA.UNLIMITED'| translate): quotaHardLimitValue?.countLimit }}
+
+ {{'QUOTA.PROJECT_QUOTA_DEFAULT_DISK' | translate}}
+ {{ quotaHardLimitValue?.storageLimit === -1?('QUOTA.UNLIMITED' | translate): getIntegerAndUnit(quotaHardLimitValue?.storageLimit, 0).partNumberHard}}
+ {{ quotaHardLimitValue?.storageLimit === -1?'':quotaHardLimitValue?.storageUnit }}
+
+
+
+
+
+
+
+
+
+
+
+
+ {{'QUOTA.PROJECT' | translate}}
+ {{'QUOTA.OWNER' | translate}}
+ {{'QUOTA.COUNT' | translate }}
+ {{'QUOTA.STORAGE' | translate }}
+ {{'QUOTA.PLACEHOLDER' | translate }}
+
+
+
+
+
+ {{quota?.ref?.name}}
+ {{quota?.ref?.owner_name}}
+
+
+ quotaDangerCoefficient:false"
+ [class.warning]="quota.hard.count!==-1?quota.used.count/quota.hard.count<=quotaDangerCoefficient &"a.used.count/quota.hard.count>=quotaWarningCoefficient:false"
+ >
+
+
+
+
+
+
+
+ quotaDangerCoefficient:false"
+ [class.warning]="quota.hard.storage!==-1?quota.used.storage/quota.hard.storage>=quotaWarningCoefficient&"a.used.storage/quota.hard.storage<=quotaDangerCoefficient:false"
+ >
+
+
+
+
+
+
+
+ {{pagination.firstItem + 1}} - {{pagination.lastItem + 1}}
+ {{'DESTINATION.OF' | translate}}
+ {{totalCount}} {{'SUMMARY.QUOTAS' | translate}}
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.scss b/src/portal/lib/src/config/project-quotas/project-quotas.component.scss
new file mode 100644
index 000000000..eeb09db91
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.scss
@@ -0,0 +1,42 @@
+.default-quota {
+ display: flex;
+
+ .default-quota-text {
+ display: flex;
+ justify-content: space-between;
+ min-width: 13rem;
+
+ .num-count {
+ display: inline-block;
+ min-width: 2rem;
+ }
+ }
+}
+
+.color-0 {
+ color: #000;
+}
+
+.progress-block {
+ label {
+ font-weight: 400 !important;
+ }
+}
+
+.default-quota-edit-button {
+ height: 1rem;
+}
+
+.min-label-width {
+ min-width: 120px;
+}
+
+.quota-top {
+ display: flex;
+ justify-content: space-between;
+}
+
+.refresh-div {
+ margin-top: auto;
+ cursor: pointer;
+}
\ No newline at end of file
diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.spec.ts b/src/portal/lib/src/config/project-quotas/project-quotas.component.spec.ts
new file mode 100644
index 000000000..168685550
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.spec.ts
@@ -0,0 +1,93 @@
+import { async, ComponentFixture, TestBed } from '@angular/core/testing';
+
+import { ProjectQuotasComponent } from './project-quotas.component';
+import { IServiceConfig, SERVICE_CONFIG } from '../../service.config';
+import { SharedModule } from '../../shared/shared.module';
+import { RouterModule } from '@angular/router';
+import { EditProjectQuotasComponent } from './edit-project-quotas/edit-project-quotas.component';
+import { InlineAlertComponent } from '../../inline-alert/inline-alert.component';
+import {
+ ConfigurationService, ConfigurationDefaultService, QuotaService
+ , QuotaDefaultService, Quota, RequestQueryParams
+} from '../../service';
+import { ErrorHandler } from '../../error-handler';
+import { of } from 'rxjs';
+import { delay } from 'rxjs/operators';
+import {APP_BASE_HREF} from '@angular/common';
+describe('ProjectQuotasComponent', () => {
+ let spy: jasmine.Spy;
+ let quotaService: QuotaService;
+
+ let component: ProjectQuotasComponent;
+ let fixture: ComponentFixture;
+
+ let config: IServiceConfig = {
+ quotaUrl: "/api/quotas/testing"
+ };
+ let mockQuotaList: Quota[] = [{
+ id: 1111,
+ ref: {
+ id: 1111,
+ name: "project1",
+ owner_name: "project1"
+ },
+ creation_time: "12212112121",
+ update_time: "12212112121",
+ hard: {
+ count: -1,
+ storage: -1,
+ },
+ used: {
+ count: 1234,
+ storage: 1234
+ },
+ }
+ ];
+ beforeEach(async(() => {
+ TestBed.configureTestingModule({
+ imports: [
+ SharedModule,
+ RouterModule.forRoot([])
+ ],
+ declarations: [ProjectQuotasComponent, EditProjectQuotasComponent, InlineAlertComponent],
+ providers: [
+ ErrorHandler,
+ { provide: SERVICE_CONFIG, useValue: config },
+ { provide: ConfigurationService, useClass: ConfigurationDefaultService },
+ { provide: QuotaService, useClass: QuotaDefaultService },
+ { provide: APP_BASE_HREF, useValue : '/' }
+
+ ]
+ })
+ .compileComponents();
+ }));
+
+ beforeEach(async(() => {
+
+ fixture = TestBed.createComponent(ProjectQuotasComponent);
+ component = fixture.componentInstance;
+ component.quotaHardLimitValue = {
+ countLimit: 1111,
+ storageLimit: 23,
+ storageUnit: 'GB'
+ };
+ component.loading = true;
+ quotaService = fixture.debugElement.injector.get(QuotaService);
+ spy = spyOn(quotaService, 'getQuotaList')
+ .and.callFake(function (params: RequestQueryParams) {
+ let header = new Map();
+ header.set("X-Total-Count", 123);
+ const httpRes = {
+ headers: header,
+ body: mockQuotaList
+ };
+ return of(httpRes).pipe(delay(0));
+ });
+
+ fixture.detectChanges();
+ }));
+
+ it('should create', () => {
+ expect(component).toBeTruthy();
+ });
+});
diff --git a/src/portal/lib/src/config/project-quotas/project-quotas.component.ts b/src/portal/lib/src/config/project-quotas/project-quotas.component.ts
new file mode 100644
index 000000000..fa457b03e
--- /dev/null
+++ b/src/portal/lib/src/config/project-quotas/project-quotas.component.ts
@@ -0,0 +1,241 @@
+import { Component, Input, Output, EventEmitter, ViewChild, SimpleChanges, OnChanges } from '@angular/core';
+import { Configuration } from '../config';
+import {
+ Quota, State, Comparator, ClrDatagridComparatorInterface, QuotaHardLimitInterface, QuotaHard
+} from '../../service/interface';
+import {
+ clone, isEmpty, getChanges, getSuitableUnit, calculatePage, CustomComparator
+ , getByte, GetIntegerAndUnit
+} from '../../utils';
+import { ErrorHandler } from '../../error-handler/index';
+import { QuotaUnits, QuotaUnlimited, QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from '../../shared/shared.const';
+import { EditProjectQuotasComponent } from './edit-project-quotas/edit-project-quotas.component';
+import {
+ ConfigurationService
+} from '../../service/index';
+import { TranslateService } from '@ngx-translate/core';
+import { forkJoin } from 'rxjs';
+import { QuotaService } from "../../service/quota.service";
+import { Router } from '@angular/router';
+import { finalize } from 'rxjs/operators';
+const quotaSort = {
+ count: 'used.count',
+ storage: "used.storage",
+ sortType: 'string'
+};
+const QuotaType = 'project';
+
+@Component({
+ selector: 'project-quotas',
+ templateUrl: './project-quotas.component.html',
+ styleUrls: ['./project-quotas.component.scss']
+})
+export class ProjectQuotasComponent implements OnChanges {
+
+ config: Configuration = new Configuration();
+ @ViewChild('editProjectQuotas')
+ editQuotaDialog: EditProjectQuotasComponent;
+ loading = true;
+ quotaHardLimitValue: QuotaHardLimitInterface;
+ currentState: State;
+
+ @Output() configChange: EventEmitter = new EventEmitter();
+ @Output() refreshAllconfig: EventEmitter = new EventEmitter();
+ quotaList: Quota[] = [];
+ originalConfig: Configuration;
+ currentPage = 1;
+ totalCount = 0;
+ pageSize = 15;
+ quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT;
+ quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT;
+ @Input()
+ get allConfig(): Configuration {
+ return this.config;
+ }
+ set allConfig(cfg: Configuration) {
+ this.config = cfg;
+ this.configChange.emit(this.config);
+ }
+ countComparator: Comparator = new CustomComparator(quotaSort.count, quotaSort.sortType);
+ storageComparator: Comparator = new CustomComparator(quotaSort.storage, quotaSort.sortType);
+
+ constructor(
+ private configService: ConfigurationService,
+ private quotaService: QuotaService,
+ private translate: TranslateService,
+ private router: Router,
+ private errorHandler: ErrorHandler) { }
+
+ editQuota(quotaHardLimitValue: Quota) {
+ const defaultTexts = [this.translate.get('QUOTA.EDIT_PROJECT_QUOTAS')
+ , this.translate.get('QUOTA.SET_QUOTAS', { params: quotaHardLimitValue.ref.name })
+ , this.translate.get('QUOTA.COUNT_QUOTA'), this.translate.get('QUOTA.STORAGE_QUOTA')];
+ forkJoin(...defaultTexts).subscribe(res => {
+ const defaultTextsObj = {
+ editQuota: res[0],
+ setQuota: res[1],
+ countQuota: res[2],
+ storageQuota: res[3],
+ quotaHardLimitValue: quotaHardLimitValue,
+ isSystemDefaultQuota: false
+ };
+ this.editQuotaDialog.openEditQuotaModal(defaultTextsObj);
+ });
+ }
+
+ editDefaultQuota(quotaHardLimitValue: QuotaHardLimitInterface) {
+ const defaultTexts = [this.translate.get('QUOTA.EDIT_DEFAULT_PROJECT_QUOTAS'), this.translate.get('QUOTA.SET_DEFAULT_QUOTAS')
+ , this.translate.get('QUOTA.COUNT_DEFAULT_QUOTA'), this.translate.get('QUOTA.STORAGE_DEFAULT_QUOTA')];
+ forkJoin(...defaultTexts).subscribe(res => {
+ const defaultTextsObj = {
+ editQuota: res[0],
+ setQuota: res[1],
+ countQuota: res[2],
+ storageQuota: res[3],
+ quotaHardLimitValue: quotaHardLimitValue,
+ isSystemDefaultQuota: true
+ };
+ this.editQuotaDialog.openEditQuotaModal(defaultTextsObj);
+
+ });
+ }
+ public getChanges() {
+ let allChanges = getChanges(this.originalConfig, this.config);
+ if (allChanges) {
+ return this.getQuotaChanges(allChanges);
+ }
+ return null;
+ }
+
+ getQuotaChanges(allChanges) {
+ let changes = {};
+ for (let prop in allChanges) {
+ if (prop === 'storage_per_project'
+ || prop === 'count_per_project'
+ ) {
+ changes[prop] = allChanges[prop];
+ }
+ }
+ return changes;
+ }
+
+ public saveConfig(configQuota): void {
+ this.allConfig.count_per_project.value = configQuota.count;
+ this.allConfig.storage_per_project.value = +configQuota.storage === QuotaUnlimited ?
+ configQuota.storage : getByte(configQuota.storage, configQuota.storageUnit);
+ let changes = this.getChanges();
+ if (!isEmpty(changes)) {
+ this.loading = true;
+ this.configService.saveConfigurations(changes)
+ .pipe(finalize(() => {
+ this.loading = false;
+ this.editQuotaDialog.openEditQuota = false;
+ }))
+ .subscribe(response => {
+ this.refreshAllconfig.emit();
+ this.errorHandler.info('CONFIG.SAVE_SUCCESS');
+ }
+ , error => {
+ this.errorHandler.error(error);
+ });
+ } else {
+ // Inprop situation, should not come here
+ this.translate.get('CONFIG.NO_CHANGE').subscribe(res => {
+ this.editQuotaDialog.inlineAlert.showInlineError(res);
+ });
+ }
+ }
+
+ confirmEdit(event) {
+ if (event.isSystemDefaultQuota) {
+ this.saveConfig(event.formValue);
+ } else {
+ this.saveCurrentQuota(event);
+ }
+ }
+ saveCurrentQuota(event) {
+ let count = +event.formValue.count;
+ let storage = +event.formValue.storage === QuotaUnlimited ?
+ +event.formValue.storage : getByte(+event.formValue.storage, event.formValue.storageUnit);
+ let rep: QuotaHard = { hard: { count, storage } };
+ this.loading = true;
+ this.quotaService.updateQuota(event.id, rep).subscribe(res => {
+ this.editQuotaDialog.openEditQuota = false;
+ this.getQuotaList(this.currentState);
+ this.errorHandler.info('QUOTA.SAVE_SUCCESS');
+ }, error => {
+ this.errorHandler.error(error);
+ this.loading = false;
+ });
+ }
+
+ getquotaHardLimitValue() {
+ const storageNumberAndUnit = this.allConfig.storage_per_project ? this.allConfig.storage_per_project.value : QuotaUnlimited;
+ const storageLimit = storageNumberAndUnit;
+ const storageUnit = this.getIntegerAndUnit(storageNumberAndUnit, 0).partCharacterHard;
+ const countLimit = this.allConfig.count_per_project ? this.allConfig.count_per_project.value : QuotaUnlimited;
+ this.quotaHardLimitValue = { storageLimit, storageUnit, countLimit };
+ }
+ getQuotaList(state: State) {
+ if (!state || !state.page) {
+ return;
+ }
+ // Keep state for future filtering and sorting
+ this.currentState = state;
+
+ let pageNumber: number = calculatePage(state);
+ if (pageNumber <= 0) { pageNumber = 1; }
+ let sortBy: any = '';
+ if (state.sort) {
+ sortBy = state.sort.by as string | ClrDatagridComparatorInterface;
+ sortBy = sortBy.fieldName ? sortBy.fieldName : sortBy;
+ sortBy = state.sort.reverse ? `-${sortBy}` : sortBy;
+ }
+ this.loading = true;
+
+ this.quotaService.getQuotaList(QuotaType, pageNumber, this.pageSize, sortBy).pipe(finalize(() => {
+ this.loading = false;
+ })).subscribe(res => {
+ if (res.headers) {
+ let xHeader: string = res.headers.get("X-Total-Count");
+ if (xHeader) {
+ this.totalCount = parseInt(xHeader, 0);
+ }
+ }
+ this.quotaList = res.body.filter((quota) => {
+ return quota.ref !== null;
+ }) as Quota[];
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+ ngOnChanges(changes: SimpleChanges): void {
+ if (changes && changes["allConfig"]) {
+ this.originalConfig = clone(this.config);
+ this.getquotaHardLimitValue();
+ }
+ }
+ getSuitableUnit(value) {
+ const QuotaUnitsCopy = clone(QuotaUnits);
+ return getSuitableUnit(value, QuotaUnitsCopy);
+ }
+ getIntegerAndUnit(valueHard, valueUsed) {
+ return GetIntegerAndUnit(valueHard
+ , clone(QuotaUnits), valueUsed, clone(QuotaUnits));
+ }
+
+ goToLink(proId) {
+ let linkUrl = ["harbor", "projects", proId, "summary"];
+ this.router.navigate(linkUrl);
+ }
+ refresh() {
+ const state: State = {
+ page: {
+ from: 0,
+ to: 14,
+ size: 15
+ },
+ };
+ this.getQuotaList(state);
+ }
+}
diff --git a/src/portal/lib/src/config/registry-config.component.spec.ts b/src/portal/lib/src/config/registry-config.component.spec.ts
index d938115da..9aef842d4 100644
--- a/src/portal/lib/src/config/registry-config.component.spec.ts
+++ b/src/portal/lib/src/config/registry-config.component.spec.ts
@@ -19,7 +19,7 @@ import {
ScanningResultDefaultService,
SystemInfoService,
SystemInfoDefaultService,
- SystemInfo
+ SystemInfo, SystemCVEWhitelist
} from '../service/index';
import { Configuration } from './config';
import { of } from 'rxjs';
@@ -56,7 +56,12 @@ describe('RegistryConfigComponent (inline template)', () => {
"harbor_version": "v1.1.1-rc1-160-g565110d",
"next_scan_all": 0
};
-
+ let mockSystemWhitelist: SystemCVEWhitelist = {
+ "expires_at": 1561996800,
+ "id": 1,
+ "items": [],
+ "project_id": 0
+ };
beforeEach(async(() => {
TestBed.configureTestingModule({
imports: [
@@ -90,7 +95,7 @@ describe('RegistryConfigComponent (inline template)', () => {
systemInfoService = fixture.debugElement.injector.get(SystemInfoService);
spy = spyOn(cfgService, 'getConfigurations').and.returnValue(of(mockConfig));
spySystemInfo = spyOn(systemInfoService, 'getSystemInfo').and.returnValue(of(mockSystemInfo));
-
+ spySystemInfo = spyOn(systemInfoService, 'getSystemWhitelist').and.returnValue(of(mockSystemWhitelist));
fixture.detectChanges();
});
diff --git a/src/portal/lib/src/config/registry-config.component.ts b/src/portal/lib/src/config/registry-config.component.ts
index 66e2f3d3e..28a9252e8 100644
--- a/src/portal/lib/src/config/registry-config.component.ts
+++ b/src/portal/lib/src/config/registry-config.component.ts
@@ -12,9 +12,10 @@ import {
clone
} from '../utils';
import { ErrorHandler } from '../error-handler/index';
-import { SystemSettingsComponent, VulnerabilityConfigComponent, GcComponent} from './index';
import { Configuration } from './config';
-import { map, catchError } from "rxjs/operators";
+import { VulnerabilityConfigComponent } from "./vulnerability/vulnerability-config.component";
+import { GcComponent } from "./gc";
+import { SystemSettingsComponent } from "./system/system-settings.component";
@Component({
selector: 'hbr-registry-config',
diff --git a/src/portal/lib/src/config/system/system-settings.component.html b/src/portal/lib/src/config/system/system-settings.component.html
index e5bef3026..72b9458f6 100644
--- a/src/portal/lib/src/config/system/system-settings.component.html
+++ b/src/portal/lib/src/config/system/system-settings.component.html
@@ -4,23 +4,27 @@
-
{{'ROBOT_ACCOUNT.TOKEN_EXPIRATION' | translate}}
-
-
+
+
{{'ROBOT_ACCOUNT.NUMBER_REQUIRED' | translate}}
@@ -56,12 +63,96 @@
{{'CONFIG.REPO_READ_ONLY' | translate}}
-
+
+
+
+
+ {{'CONFIG.TOOLTIP.REPO_TOOLTIP' | translate}}
+
+
+
+
+
+ {{'CVE_WHITELIST.DEPLOYMENT_SECURITY'|translate}}
+
+
+
+ {{'CVE_WHITELIST.CVE_WHITELIST'|translate}}
+
+
+ {{'CVE_WHITELIST.SYS_WHITELIST_EXPLAIN'|translate}}
+
+
+ {{'CVE_WHITELIST.ADD_SYS'|translate}}
+
+
+ {{'CVE_WHITELIST.WARNING_SYS'|translate}}
+
+
+
+
+
+
+
+
+
+
+
+ {{'CVE_WHITELIST.ENTER'|translate}}
+
+ {{'CVE_WHITELIST.HELP'|translate}}
+
+
+
+
+
+
+
+ - {{'CVE_WHITELIST.NONE'|translate}}
+ -
+ {{item.cve_id}}
+
+
+
+
+
+
+ {{'CVE_WHITELIST.EXPIRES_AT'|translate}}
+
+
+
+
+
+
+
+
+ {{'CVE_WHITELIST.NEVER_EXPIRES'|translate}}
+
+
+
+
+
+
+
+
+ {{'CONFIG.WEBHOOK_NOTIFICATION_ENABLED' | translate}}
+
+
- {{'CONFIG.TOOLTIP.REPO_TOOLTIP' | translate}}
+ {{'CONFIG.TOOLTIP.WEBHOOK_TOOLTIP' | translate}}
@@ -69,9 +160,11 @@
-
\ No newline at end of file
diff --git a/src/portal/lib/src/config/system/system-settings.component.scss b/src/portal/lib/src/config/system/system-settings.component.scss
index 96d0e31e1..5b708737f 100644
--- a/src/portal/lib/src/config/system/system-settings.component.scss
+++ b/src/portal/lib/src/config/system/system-settings.component.scss
@@ -1,12 +1,78 @@
.subtitle {
- font-size: 14px;
- font-weight: 600;
+ font-size: 14px;
+ font-weight: 600;
}
.create-tooltip {
- top: -1;
+ top: -1;
}
.read-tooltip {
- top: -7px;
+ top: -7px;
+}
+
+.title {
+ font-weight: bold;
+}
+
+.margin-top-4 {
+ margin-top: 4px;
+}
+
+.whitelist-window {
+ border: 1px solid #ccc;
+ border-radius: 3px;
+ padding: 12px;
+ height: 224px;
+ width: 222px;
+ color: #0079bb;
+ overflow-y: auto;
+ li {
+ height: 24px;
+ line-height: 24px;
+ list-style-type: none;
+ }
+}
+
+.width-70per {
+ width: 70%;
+}
+
+.none {
+ color: #ccc;
+}
+
+.underline {
+ border-bottom: 1px solid;
+}
+
+.color-0079bb {
+ color: #0079bb;
+}
+
+.padding-top-8 {
+ padding-top: 8px;
+}
+
+.padding-left-80 {
+ padding-left: 80px;
+}
+
+.add-modal {
+ position: absolute;
+ padding: 0 8px;
+ background-color: rgb(238, 238, 238);
+
+ input {
+ width: 100%;
+ border: 1px solid;
+ }
+
+ button {
+ float: right;
+ }
+}
+.hand{
+ cursor: pointer;
+ margin: 0;
}
\ No newline at end of file
diff --git a/src/portal/lib/src/config/system/system-settings.component.ts b/src/portal/lib/src/config/system/system-settings.component.ts
index a160af87b..f87779109 100644
--- a/src/portal/lib/src/config/system/system-settings.component.ts
+++ b/src/portal/lib/src/config/system/system-settings.component.ts
@@ -1,20 +1,36 @@
-import { Component, Input, OnInit, Output, EventEmitter, ViewChild, Inject, OnChanges, SimpleChanges } from '@angular/core';
-import { NgForm } from '@angular/forms';
-import { Configuration, StringValueItem } from '../config';
-import { SERVICE_CONFIG, IServiceConfig } from '../../service.config';
-import { clone, isEmpty, getChanges } from '../../utils';
-import { ErrorHandler } from '../../error-handler/index';
-import { ConfirmationMessage } from '../../confirmation-dialog/confirmation-message';
-import { ConfirmationDialogComponent } from '../../confirmation-dialog/confirmation-dialog.component';
-import { ConfirmationState, ConfirmationTargets } from '../../shared/shared.const';
-import { ConfirmationAcknowledgement } from '../../confirmation-dialog/confirmation-state-message';
import {
- ConfigurationService
+ Component,
+ Input,
+ OnInit,
+ Output,
+ EventEmitter,
+ ViewChild,
+ Inject,
+ OnChanges,
+ SimpleChanges,
+ ElementRef
+} from '@angular/core';
+import {NgForm} from '@angular/forms';
+import {Configuration, StringValueItem} from '../config';
+import {SERVICE_CONFIG, IServiceConfig} from '../../service.config';
+import {clone, isEmpty, getChanges, compareValue} from '../../utils';
+import {ErrorHandler} from '../../error-handler/index';
+import {ConfirmationMessage} from '../../confirmation-dialog/confirmation-message';
+import {ConfirmationDialogComponent} from '../../confirmation-dialog/confirmation-dialog.component';
+import {ConfirmationState, ConfirmationTargets} from '../../shared/shared.const';
+import {ConfirmationAcknowledgement} from '../../confirmation-dialog/confirmation-state-message';
+import {
+ ConfigurationService, SystemCVEWhitelist, SystemInfo, SystemInfoService, VulnerabilityItem
} from '../../service/index';
-import { from } from 'rxjs';
+import {forkJoin} from "rxjs";
+
const fakePass = 'aWpLOSYkIzJTTU4wMDkx';
const ONE_HOUR_MINUTES: number = 60;
const ONE_DAY_MINUTES: number = 24 * ONE_HOUR_MINUTES;
+const ONE_THOUSAND: number = 1000;
+const CVE_DETAIL_PRE_URL = `https://nvd.nist.gov/vuln/detail/`;
+const TARGET_BLANK = "_blank";
+
@Component({
selector: 'system-settings',
templateUrl: './system-settings.component.html',
@@ -26,6 +42,11 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
private originalConfig: Configuration;
downloadLink: string;
robotTokenExpiration: string;
+ systemWhitelist: SystemCVEWhitelist;
+ systemWhitelistOrigin: SystemCVEWhitelist;
+ cveIds: string;
+ showAddModal: boolean = false;
+ systemInfo: SystemInfo;
@Output() configChange: EventEmitter = new EventEmitter();
@Output() readOnlyChange: EventEmitter = new EventEmitter();
@Output() reloadSystemConfig: EventEmitter = new EventEmitter();
@@ -34,6 +55,7 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
get systemSettings(): Configuration {
return this.config;
}
+
set systemSettings(cfg: Configuration) {
this.config = cfg;
this.configChange.emit(this.config);
@@ -46,6 +68,7 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
@ViewChild("systemConfigFrom") systemSettingsForm: NgForm;
@ViewChild("cfgConfirmationDialog") confirmationDlg: ConfirmationDialogComponent;
+ @ViewChild('dateInput') dateInput: ElementRef;
get editable(): boolean {
return this.systemSettings &&
@@ -85,7 +108,7 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
let changes = {};
for (let prop in allChanges) {
if (prop === 'token_expiration' || prop === 'read_only' || prop === 'project_creation_restriction'
- || prop === 'robot_token_duration') {
+ || prop === 'robot_token_duration' || prop === 'notification_enable') {
changes[prop] = allChanges[prop];
}
}
@@ -96,6 +119,10 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
this.systemSettings.read_only.value = $event;
}
+ setWebhookNotificationEnabledValue($event: any) {
+ this.systemSettings.notification_enable.value = $event;
+ }
+
disabled(prop: any): boolean {
return !(prop && prop.editable);
}
@@ -112,11 +139,18 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
*/
public save(): void {
let changes = this.getChanges();
- if (!isEmpty(changes)) {
+ if (!isEmpty(changes) || !compareValue(this.systemWhitelistOrigin, this.systemWhitelist)) {
this.onGoing = true;
- this.configService.saveConfigurations(changes)
- .subscribe(response => {
- this.onGoing = false;
+ let observables = [];
+ if (!isEmpty(changes)) {
+ observables.push(this.configService.saveConfigurations(changes));
+ }
+ if (!compareValue(this.systemWhitelistOrigin, this.systemWhitelist)) {
+ observables.push(this.systemInfoService.updateSystemWhitelist(this.systemWhitelist));
+ }
+ forkJoin(observables).subscribe(result => {
+ this.onGoing = false;
+ if (!isEmpty(changes)) {
// API should return the updated configurations here
// Unfortunately API does not do that
// To refresh the view, we can clone the original data copy
@@ -128,12 +162,15 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
}
this.reloadSystemConfig.emit();
- this.errorHandler.info('CONFIG.SAVE_SUCCESS');
}
- , error => {
- this.onGoing = false;
- this.errorHandler.error(error);
- });
+ if (!compareValue(this.systemWhitelistOrigin, this.systemWhitelist)) {
+ this.systemWhitelistOrigin = clone(this.systemWhitelist);
+ }
+ this.errorHandler.info('CONFIG.SAVE_SUCCESS');
+ }, error => {
+ this.onGoing = false;
+ this.errorHandler.error(error);
+ });
} else {
// Inprop situation, should not come here
console.error('Save abort because nothing changed');
@@ -175,6 +212,9 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
let changes = this.getChanges();
this.reset(changes);
this.initRobotToken();
+ if (!compareValue(this.systemWhitelistOrigin, this.systemWhitelist)) {
+ this.systemWhitelist = clone(this.systemWhitelistOrigin);
+ }
}
}
@@ -191,7 +231,7 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
*/
public cancel(): void {
let changes = this.getChanges();
- if (!isEmpty(changes)) {
+ if (!isEmpty(changes) || !compareValue(this.systemWhitelistOrigin, this.systemWhitelist)) {
let msg = new ConfirmationMessage(
'CONFIG.CONFIRM_TITLE',
'CONFIG.CONFIRM_SUMMARY',
@@ -207,23 +247,59 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
}
constructor(@Inject(SERVICE_CONFIG) private configInfo: IServiceConfig,
- private configService: ConfigurationService,
- private errorHandler: ErrorHandler) {
+ private configService: ConfigurationService,
+ private errorHandler: ErrorHandler,
+ private systemInfoService: SystemInfoService) {
if (this.configInfo && this.configInfo.systemInfoEndpoint) {
this.downloadLink = this.configInfo.systemInfoEndpoint + "/getcert";
}
}
+
ngOnInit() {
this.initRobotToken();
+ this.getSystemWhitelist();
+ this.getSystemInfo();
}
- private initRobotToken (): void {
+ getSystemInfo() {
+ this.systemInfoService.getSystemInfo()
+ .subscribe(systemInfo => this.systemInfo = systemInfo
+ , error => this.errorHandler.error(error));
+ }
+
+ get withClair(): boolean {
+ return this.systemInfo ? this.systemInfo.with_clair : false;
+ }
+
+ getSystemWhitelist() {
+ this.onGoing = true;
+ this.systemInfoService.getSystemWhitelist()
+ .subscribe((systemWhitelist) => {
+ this.onGoing = false;
+ if (!systemWhitelist.items) {
+ systemWhitelist.items = [];
+ }
+ if (!systemWhitelist.expires_at) {
+ systemWhitelist.expires_at = null;
+ }
+ this.systemWhitelist = systemWhitelist;
+ this.systemWhitelistOrigin = clone(systemWhitelist);
+ }, error => {
+ this.onGoing = false;
+ console.error('An error occurred during getting systemWhitelist');
+ // this.errorHandler.error(error);
+ }
+ );
+ }
+
+ private initRobotToken(): void {
if (this.config &&
- this.config.robot_token_duration ) {
+ this.config.robot_token_duration) {
let robotExpiration = this.config.robot_token_duration.value;
this.robotTokenExpiration = Math.floor(robotExpiration / ONE_DAY_MINUTES) + '';
}
}
+
changeToken(v: string) {
if (!v || v === "") {
return;
@@ -235,5 +311,83 @@ export class SystemSettingsComponent implements OnChanges, OnInit {
this.config.robot_token_duration.value = +v * ONE_DAY_MINUTES;
}
+ deleteItem(index: number) {
+ this.systemWhitelist.items.splice(index, 1);
+ }
+ addToSystemWhitelist() {
+ // remove duplication and add to systemWhitelist
+ let map = {};
+ this.systemWhitelist.items.forEach(item => {
+ map[item.cve_id] = true;
+ });
+ this.cveIds.split(/[\n,]+/).forEach(id => {
+ let cveObj: any = {};
+ cveObj.cve_id = id.trim();
+ if (!map[cveObj.cve_id]) {
+ map[cveObj.cve_id] = true;
+ this.systemWhitelist.items.push(cveObj);
+ }
+ });
+ // clear modal and close modal
+ this.cveIds = null;
+ this.showAddModal = false;
+ }
+
+ get hasWhitelistChanged(): boolean {
+ return !compareValue(this.systemWhitelistOrigin, this.systemWhitelist);
+ }
+
+ isDisabled(): boolean {
+ if (this.cveIds) {
+ let arr = this.cveIds.split(/[\n,]+/);
+ let flag = false;
+ for (let i = 0; i < arr.length; i++) {
+ let id = arr[i].trim();
+ if (!/^CVE-[\d]+-[\d]+$/.test(id)) {
+ flag = true;
+ break;
+ }
+ }
+ return flag;
+ }
+ return true;
+ }
+
+ get expiresDate() {
+ if (this.systemWhitelist && this.systemWhitelist.expires_at) {
+ return new Date(this.systemWhitelist.expires_at * ONE_THOUSAND);
+ }
+ return null;
+ }
+
+ set expiresDate(date) {
+ if (this.systemWhitelist && date) {
+ this.systemWhitelist.expires_at = Math.floor(date.getTime() / ONE_THOUSAND);
+ }
+ }
+
+ get neverExpires(): boolean {
+ return !(this.systemWhitelist && this.systemWhitelist.expires_at);
+ }
+
+ set neverExpires(flag) {
+ if (flag) {
+ this.systemWhitelist.expires_at = null;
+ this.systemInfoService.resetDateInput(this.dateInput);
+ } else {
+ this.systemWhitelist.expires_at = Math.floor(new Date().getTime() / ONE_THOUSAND);
+ }
+ }
+
+ get hasExpired(): boolean {
+ if (this.systemWhitelistOrigin && this.systemWhitelistOrigin.expires_at) {
+ return new Date().getTime() > this.systemWhitelistOrigin.expires_at * ONE_THOUSAND;
+ }
+ return false;
+ }
+
+ goToDetail(cveId) {
+ window.open(CVE_DETAIL_PRE_URL + `${cveId}`, TARGET_BLANK);
+ }
}
diff --git a/src/portal/lib/src/config/vulnerability/vulnerability-config.component.html b/src/portal/lib/src/config/vulnerability/vulnerability-config.component.html
index 7dc12abf4..c0023b470 100644
--- a/src/portal/lib/src/config/vulnerability/vulnerability-config.component.html
+++ b/src/portal/lib/src/config/vulnerability/vulnerability-config.component.html
@@ -11,17 +11,17 @@
- {{ updatedTimestamp | date:'MM/dd/y HH:mm:ss' }} AM
+ {{ updatedTimestamp | date:'short' }}
{{nt.namespace}}
- {{ convertToLocalTime(nt.last_update) | date:'MM/dd/y HH:mm:ss'}} AM
+ {{ convertToLocalTime(nt.last_update) | date:'short'}}
- {{ updatedTimestamp | date:'MM/dd/y HH:mm:ss' }} AM
+ {{ updatedTimestamp | date:'short' }}
\ No newline at end of file
diff --git a/src/portal/src/app/project/project.component.ts b/src/portal/src/app/project/project.component.ts
index 10ff031e1..948f3e602 100644
--- a/src/portal/src/app/project/project.component.ts
+++ b/src/portal/src/app/project/project.component.ts
@@ -15,6 +15,9 @@ import { Component, OnInit, ViewChild } from '@angular/core';
import { CreateProjectComponent } from './create-project/create-project.component';
import { ListProjectComponent } from './list-project/list-project.component';
import { ProjectTypes } from '../shared/shared.const';
+import { ConfigurationService } from '../config/config.service';
+import { Configuration, QuotaHardInterface } from '@harbor/ui';
+import { SessionService } from "../shared/session.service";
@Component({
selector: 'project',
@@ -23,7 +26,7 @@ import { ProjectTypes } from '../shared/shared.const';
})
export class ProjectComponent implements OnInit {
projectTypes = ProjectTypes;
-
+ quotaObj: QuotaHardInterface;
@ViewChild(CreateProjectComponent)
creationProject: CreateProjectComponent;
@@ -45,16 +48,33 @@ export class ProjectComponent implements OnInit {
}
}
- constructor() {
- }
+ constructor(
+ public configService: ConfigurationService,
+ private session: SessionService
+ ) { }
ngOnInit(): void {
if (window.sessionStorage && window.sessionStorage['projectTypeValue'] && window.sessionStorage['fromDetails']) {
this.currentFilteredType = +window.sessionStorage['projectTypeValue'];
window.sessionStorage.removeItem('fromDetails');
}
+ if (this.isSystemAdmin) {
+ this.getConfigration();
+ }
+ }
+ getConfigration() {
+ this.configService.getConfiguration()
+ .subscribe((configurations: Configuration) => {
+ this.quotaObj = {
+ count_per_project: configurations.count_per_project ? configurations.count_per_project.value : -1,
+ storage_per_project: configurations.storage_per_project ? configurations.storage_per_project.value : -1
+ };
+ });
+ }
+ public get isSystemAdmin(): boolean {
+ let account = this.session.getCurrentUser();
+ return account != null && account.has_admin_role;
}
-
openModal(): void {
this.creationProject.newProject();
}
diff --git a/src/portal/src/app/project/project.module.ts b/src/portal/src/app/project/project.module.ts
index fb180988f..d29d255f0 100644
--- a/src/portal/src/app/project/project.module.ts
+++ b/src/portal/src/app/project/project.module.ts
@@ -17,6 +17,7 @@ import { RouterModule } from '@angular/router';
import { SharedModule } from '../shared/shared.module';
import { RepositoryModule } from '../repository/repository.module';
import { ReplicationModule } from '../replication/replication.module';
+import { SummaryModule } from './summary/summary.module';
import { LogModule } from '../log/log.module';
import { ProjectComponent } from './project.component';
@@ -38,6 +39,14 @@ import { ProjectLabelComponent } from "../project/project-label/project-label.co
import { HelmChartModule } from './helm-chart/helm-chart.module';
import { RobotAccountComponent } from './robot-account/robot-account.component';
import { AddRobotComponent } from './robot-account/add-robot/add-robot.component';
+import { AddHttpAuthGroupComponent } from './member/add-http-auth-group/add-http-auth-group.component';
+import { TagRetentionComponent } from "./tag-retention/tag-retention.component";
+import { AddRuleComponent } from "./tag-retention/add-rule/add-rule.component";
+import { TagRetentionService } from "./tag-retention/tag-retention.service";
+import { WebhookService } from './webhook/webhook.service';
+import { WebhookComponent } from './webhook/webhook.component';
+import { AddWebhookComponent } from './webhook/add-webhook/add-webhook.component';
+import { AddWebhookFormComponent } from './webhook/add-webhook-form/add-webhook-form.component';
@NgModule({
imports: [
@@ -46,7 +55,8 @@ import { AddRobotComponent } from './robot-account/add-robot/add-robot.component
ReplicationModule,
LogModule,
RouterModule,
- HelmChartModule
+ HelmChartModule,
+ SummaryModule
],
declarations: [
ProjectComponent,
@@ -59,10 +69,16 @@ import { AddRobotComponent } from './robot-account/add-robot/add-robot.component
ProjectLabelComponent,
AddGroupComponent,
RobotAccountComponent,
- AddRobotComponent
+ AddRobotComponent,
+ AddHttpAuthGroupComponent,
+ TagRetentionComponent,
+ AddRuleComponent,
+ WebhookComponent,
+ AddWebhookComponent,
+ AddWebhookFormComponent,
],
exports: [ProjectComponent, ListProjectComponent],
- providers: [ProjectRoutingResolver, MemberService, RobotService]
+ providers: [ProjectRoutingResolver, MemberService, RobotService, TagRetentionService, WebhookService]
})
export class ProjectModule {
diff --git a/src/portal/src/app/project/project.ts b/src/portal/src/app/project/project.ts
index 7a5df0f96..f6365f244 100644
--- a/src/portal/src/app/project/project.ts
+++ b/src/portal/src/app/project/project.ts
@@ -51,6 +51,7 @@ export class Project {
prevent_vul: string | boolean;
severity: string;
auto_scan: string | boolean;
+ retention_id: number;
};
constructor () {
this.metadata = {};
diff --git a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html
index 661a4bd5e..4019db7f4 100644
--- a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html
+++ b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.html
@@ -1,130 +1,134 @@
- {{'ROBOT_ACCOUNT.CREAT_ROBOT_ACCOUNT' | translate}}
-
-
-
+
+
-
-
-
- {{ createSuccess | translate}}
-
-
-
- {{'ROBOT_ACCOUNT.ALERT_TEXT' | translate}}
+ [clrModalStaticBackdrop]="staticBackdrop" [clrModalClosable]="closable">
+
+
+
+ {{ createSuccess | translate}}
+
+
+
+ {{'ROBOT_ACCOUNT.ALERT_TEXT' | translate}}
+
+
+
-
-
-
-
-
-
- {{'ROBOT_ACCOUNT.NAME'
- | translate}}
- {{robotAccount}}
-
-
- {{'ROBOT_ACCOUNT.TOKEN' |
- translate}}
-
-
-
-
+
+
+
+ {{'ROBOT_ACCOUNT.NAME'
+ | translate}}
+ {{robotAccount}}
+
+
+ {{'ROBOT_ACCOUNT.TOKEN' |
+ translate}}
+
+
+ {{'ROBOT_ACCOUNT.EXPORT_TO_FILE' | translate}}
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.scss b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.scss
index 5073f3322..df3f83daa 100644
--- a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.scss
+++ b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.scss
@@ -3,7 +3,7 @@
}
.input-width {
- width: 200px;
+ width: 300px;
}
.copy-token {
@@ -35,3 +35,15 @@
.no-margin {
margin: 0;
}
+
+.permission{
+ padding-top: 5px;
+ color: #000000;
+}
+
+.padding-left-120{
+ padding-left: 126px;
+}
+.w-90{
+ width: 90%;
+}
\ No newline at end of file
diff --git a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.ts b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.ts
index 42c83ce32..eb007d9ea 100644
--- a/src/portal/src/app/project/robot-account/add-robot/add-robot.component.ts
+++ b/src/portal/src/app/project/robot-account/add-robot/add-robot.component.ts
@@ -17,6 +17,7 @@ import { TranslateService } from "@ngx-translate/core";
import { ErrorHandler } from "@harbor/ui";
import { MessageHandlerService } from "../../../shared/message-handler/message-handler.service";
import { InlineAlertComponent } from "../../../shared/inline-alert/inline-alert.component";
+import { DomSanitizer, SafeUrl } from '@angular/platform-browser';
@Component({
selector: "add-robot",
@@ -28,6 +29,8 @@ export class AddRobotComponent implements OnInit, OnDestroy {
copyToken: boolean;
robotToken: string;
robotAccount: string;
+ downLoadFileName: string = '';
+ downLoadHref: SafeUrl = '';
isSubmitOnGoing = false;
closable: boolean = false;
staticBackdrop: boolean = true;
@@ -38,17 +41,20 @@ export class AddRobotComponent implements OnInit, OnDestroy {
robotNameChecker: Subject = new Subject();
nameTooltipText = "ROBOT_ACCOUNT.ROBOT_NAME";
robotForm: NgForm;
+ imagePermissionPush: boolean = true;
+ imagePermissionPull: boolean = true;
@Input() projectId: number;
@Input() projectName: string;
@Output() create = new EventEmitter();
@ViewChild("robotForm") currentForm: NgForm;
@ViewChild("copyAlert") copyAlert: InlineAlertComponent;
constructor(
- private robotService: RobotService,
- private translate: TranslateService,
- private errorHandler: ErrorHandler,
- private cdr: ChangeDetectorRef,
- private messageHandlerService: MessageHandlerService
+ private robotService: RobotService,
+ private translate: TranslateService,
+ private errorHandler: ErrorHandler,
+ private cdr: ChangeDetectorRef,
+ private messageHandlerService: MessageHandlerService,
+ private sanitizer: DomSanitizer
) {}
ngOnInit(): void {
@@ -59,31 +65,31 @@ export class AddRobotComponent implements OnInit, OnDestroy {
if (this.isRobotNameValid) {
this.checkOnGoing = true;
this.robotService
- .listRobotAccount(this.projectId)
- .pipe(
- finalize(() => {
- this.checkOnGoing = false;
- let hnd = setInterval(() => this.cdr.markForCheck(), 100);
- setTimeout(() => clearInterval(hnd), 2000);
- })
- )
- .subscribe(
- response => {
- if (response && response.length) {
- if (
- response.find(target => {
- return target.name === "robot$" + cont.value;
- })
- ) {
- this.isRobotNameValid = false;
- this.nameTooltipText = "ROBOT_ACCOUNT.ACCOUNT_EXISTING";
+ .listRobotAccount(this.projectId)
+ .pipe(
+ finalize(() => {
+ this.checkOnGoing = false;
+ let hnd = setInterval(() => this.cdr.markForCheck(), 100);
+ setTimeout(() => clearInterval(hnd), 2000);
+ })
+ )
+ .subscribe(
+ response => {
+ if (response && response.length) {
+ if (
+ response.find(target => {
+ return target.name === "robot$" + cont.value;
+ })
+ ) {
+ this.isRobotNameValid = false;
+ this.nameTooltipText = "ROBOT_ACCOUNT.ACCOUNT_EXISTING";
+ }
+ }
+ },
+ error => {
+ this.errorHandler.error(error);
}
- }
- },
- error => {
- this.errorHandler.error(error);
- }
- );
+ );
} else {
this.nameTooltipText = "ROBOT_ACCOUNT.ROBOT_NAME";
}
@@ -98,6 +104,8 @@ export class AddRobotComponent implements OnInit, OnDestroy {
this.robot.name = "";
this.robot.description = "";
this.addRobotOpened = true;
+ this.imagePermissionPush = true;
+ this.imagePermissionPull = true;
this.isRobotNameValid = true;
this.robot = new Robot();
this.nameTooltipText = "ROBOT_ACCOUNT.ROBOT_NAME";
@@ -116,49 +124,61 @@ export class AddRobotComponent implements OnInit, OnDestroy {
if (this.isSubmitOnGoing) {
return;
}
+ // set value to robot.access.isPullImage and robot.access.isPushOrPullImage when submit
+ if ( this.imagePermissionPush && this.imagePermissionPull) {
+ this.robot.access.isPullImage = false;
+ this.robot.access.isPushOrPullImage = true;
+ } else {
+ this.robot.access.isPullImage = true;
+ this.robot.access.isPushOrPullImage = false;
+ }
this.isSubmitOnGoing = true;
this.robotService
- .addRobotAccount(
- this.projectId,
- this.robot,
- this.projectName
- )
- .subscribe(
- response => {
- this.isSubmitOnGoing = false;
- this.robotToken = response.token;
- this.robotAccount = response.name;
- this.copyToken = true;
- this.create.emit(true);
- this.translate
- .get("ROBOT_ACCOUNT.CREATED_SUCCESS", { param: this.robotAccount })
- .subscribe((res: string) => {
- this.createSuccess = res;
- });
- this.addRobotOpened = false;
- },
- error => {
- this.isSubmitOnGoing = false;
- this.copyAlert.showInlineError(error);
- }
- );
+ .addRobotAccount(
+ this.projectId,
+ this.robot,
+ this.projectName
+ )
+ .subscribe(
+ response => {
+ this.isSubmitOnGoing = false;
+ this.robotToken = response.token;
+ this.robotAccount = response.name;
+ this.copyToken = true;
+ this.create.emit(true);
+ this.translate
+ .get("ROBOT_ACCOUNT.CREATED_SUCCESS", { param: this.robotAccount })
+ .subscribe((res: string) => {
+ this.createSuccess = res;
+ });
+ this.addRobotOpened = false;
+ // export to token file
+ const downLoadUrl = `data:text/json;charset=utf-8, ${encodeURIComponent(JSON.stringify(response))}`;
+ this.downLoadHref = this.sanitizer.bypassSecurityTrustUrl(downLoadUrl);
+ this.downLoadFileName = `${response.name}.json`;
+ },
+ error => {
+ this.isSubmitOnGoing = false;
+ this.copyAlert.showInlineError(error);
+ }
+ );
}
isValid(): boolean {
return (
- this.currentForm &&
- this.currentForm.valid &&
- !this.isSubmitOnGoing &&
- this.isRobotNameValid &&
- !this.checkOnGoing
+ this.currentForm &&
+ this.currentForm.valid &&
+ !this.isSubmitOnGoing &&
+ this.isRobotNameValid &&
+ !this.checkOnGoing
);
}
get shouldDisable(): boolean {
if (this.robot && this.robot.access) {
return (
- !this.isValid() ||
- (!this.robot.access.isPushOrPullImage && !this.robot.access.isPullImage
- && !this.robot.access.isPullChart && !this.robot.access.isPushChart)
+ !this.isValid() ||
+ (!this.robot.access.isPushOrPullImage && !this.robot.access.isPullImage
+ && !this.robot.access.isPullChart && !this.robot.access.isPushChart)
);
}
}
@@ -180,9 +200,13 @@ export class AddRobotComponent implements OnInit, OnDestroy {
onCpSuccess($event: any): void {
this.copyToken = false;
this.translate
- .get("ROBOT_ACCOUNT.COPY_SUCCESS", { param: this.robotAccount })
- .subscribe((res: string) => {
- this.messageHandlerService.showSuccess(res);
- });
+ .get("ROBOT_ACCOUNT.COPY_SUCCESS", { param: this.robotAccount })
+ .subscribe((res: string) => {
+ this.messageHandlerService.showSuccess(res);
+ });
+ }
+
+ closeModal() {
+ this.copyToken = false;
}
}
diff --git a/src/portal/src/app/project/robot-account/robot.ts b/src/portal/src/app/project/robot-account/robot.ts
index 9e90ac83c..5e859430b 100644
--- a/src/portal/src/app/project/robot-account/robot.ts
+++ b/src/portal/src/app/project/robot-account/robot.ts
@@ -16,7 +16,7 @@ export class Robot {
constructor () {
this.access = {};
// this.access[0].action = true;
- this.access.isPullImage = true;
+ this.access.isPullImage = false;
this.access.isPushOrPullImage = true;
this.access.isPushChart = false;
this.access.isPullChart = false;
diff --git a/src/portal/src/app/project/summary/summary.component.html b/src/portal/src/app/project/summary/summary.component.html
new file mode 100644
index 000000000..70d640c1a
--- /dev/null
+++ b/src/portal/src/app/project/summary/summary.component.html
@@ -0,0 +1,74 @@
+
+
+
+ {{'SUMMARY.PROJECT_REPOSITORY' | translate}}
+
+ - {{summaryInformation?.repo_count}}
+
+
+
+ {{'SUMMARY.PROJECT_HELM_CHART' | translate}}
+
+ - {{summaryInformation?.chart_count}}
+
+
+
+ {{'SUMMARY.PROJECT_MEMBER' | translate}}
+
+ - {{ summaryInformation?.project_admin_count }} {{'SUMMARY.ADMIN' | translate}}
+ - {{ summaryInformation?.master_count }} {{'SUMMARY.MASTER' | translate}}
+ - {{ summaryInformation?.developer_count }} {{'SUMMARY.DEVELOPER' | translate}}
+ - {{ summaryInformation?.guest_count }} {{'SUMMARY.GUEST' | translate}}
+
+
+
+
+
+ {{'SUMMARY.PROJECT_QUOTAS' | translate}}
+
+
+ {{'SUMMARY.ARTIFACT_COUNT' | translate}}
+ {{ summaryInformation?.quota?.used?.count }} {{ 'QUOTA.OF' | translate }}
+ {{ summaryInformation?.quota?.hard?.count ===-1?('QUOTA.UNLIMITED' | translate): summaryInformation?.quota?.hard?.count }}
+
+
+
+
+
+ quotaDangerCoefficient:false"
+ [class.warning]="summaryInformation?.quota?.hard?.count!==-1?summaryInformation?.quota?.used?.count/summaryInformation?.quota?.hard?.count<=quotaDangerCoefficient&&summaryInformation?.quota?.used?.count/summaryInformation?.quota?.hard?.count>=quotaWarningCoefficient:false">
+
+
+
+
+
+ {{'SUMMARY.STORAGE_CONSUMPTION' | translate}}
+
+ {{ summaryInformation?.quota?.hard?.storage !== -1 ?(getIntegerAndUnit(summaryInformation?.quota?.hard?.storage, summaryInformation?.quota?.used?.storage).partNumberUsed
+ + getIntegerAndUnit(summaryInformation?.quota?.hard?.storage, summaryInformation?.quota?.used?.storage).partCharacterUsed) : getSuitableUnit(summaryInformation?.quota?.used?.storage)}}
+
+
+ {{ 'QUOTA.OF' | translate }}
+ {{ summaryInformation?.quota?.hard?.storage ===-1? ('QUOTA.UNLIMITED' | translate) : getIntegerAndUnit(summaryInformation?.quota?.hard?.storage, summaryInformation?.quota?.used?.storage).partNumberHard }}
+ {{ summaryInformation?.quota?.hard?.storage ===-1? '': getIntegerAndUnit(summaryInformation?.quota?.hard?.storage, summaryInformation?.quota?.used?.storage).partCharacterHard }}
+
+
+
+
+
+ quotaDangerCoefficient:false"
+ [class.warning]="summaryInformation?.quota?.hard?.storage!==-1?summaryInformation?.quota?.used?.storage/summaryInformation?.quota?.hard?.storage<=quotaDangerCoefficient&&summaryInformation?.quota?.used?.storage/summaryInformation?.quota?.hard?.storage>=quotaWarningCoefficient:false">
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/summary/summary.component.scss b/src/portal/src/app/project/summary/summary.component.scss
new file mode 100644
index 000000000..7f53fb84a
--- /dev/null
+++ b/src/portal/src/app/project/summary/summary.component.scss
@@ -0,0 +1,54 @@
+.summary {
+ color: #000;
+ padding-right: 0.3rem;
+ font-size: 13px;
+ .summary-left {
+ .project-detail {
+ width: 17rem;
+ min-height: 3rem;
+
+ ul {
+ width: 8rem;
+ }
+ }
+ }
+
+ h5 {
+ font-size: 13px;
+ font-weight: 700;
+ }
+
+ .summary-right {
+ .quotas-progress {
+ min-width: 10rem;
+ ;
+ }
+ }
+}
+
+.display-flex {
+ display: flex;
+ justify-content: space-between;
+}
+
+.progress,
+.progress-static {
+ progress {
+ max-height: 0.48rem;
+ }
+}
+::ng-deep {
+ .progress {
+ &.warning>progress {
+ color: orange;
+
+ &::-webkit-progress-value {
+ background-color: orange;
+ }
+
+ &::-moz-progress-bar {
+ background-color: orange;
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/portal/src/app/project/summary/summary.component.spec.ts b/src/portal/src/app/project/summary/summary.component.spec.ts
new file mode 100644
index 000000000..4d0773533
--- /dev/null
+++ b/src/portal/src/app/project/summary/summary.component.spec.ts
@@ -0,0 +1,25 @@
+import { async, ComponentFixture, TestBed } from '@angular/core/testing';
+
+import { SummaryComponent } from './summary.component';
+
+describe('SummaryComponent', () => {
+ let component: SummaryComponent;
+ let fixture: ComponentFixture;
+
+ beforeEach(async(() => {
+ TestBed.configureTestingModule({
+ declarations: [ SummaryComponent ]
+ })
+ .compileComponents();
+ }));
+
+ beforeEach(() => {
+ fixture = TestBed.createComponent(SummaryComponent);
+ component = fixture.componentInstance;
+ fixture.detectChanges();
+ });
+
+ it('should create', () => {
+ expect(component).toBeTruthy();
+ });
+});
diff --git a/src/portal/src/app/project/summary/summary.component.ts b/src/portal/src/app/project/summary/summary.component.ts
new file mode 100644
index 000000000..457d7fbb2
--- /dev/null
+++ b/src/portal/src/app/project/summary/summary.component.ts
@@ -0,0 +1,43 @@
+import { Component, OnInit } from '@angular/core';
+import { ProjectService, clone, QuotaUnits, getSuitableUnit, ErrorHandler, GetIntegerAndUnit
+ , QUOTA_DANGER_COEFFICIENT, QUOTA_WARNING_COEFFICIENT } from '@harbor/ui';
+import { ActivatedRoute } from '@angular/router';
+
+import { AppConfigService } from "../../app-config.service";
+@Component({
+ selector: 'summary',
+ templateUrl: './summary.component.html',
+ styleUrls: ['./summary.component.scss']
+})
+export class SummaryComponent implements OnInit {
+ projectId: number;
+ summaryInformation: any;
+ quotaDangerCoefficient: number = QUOTA_DANGER_COEFFICIENT;
+ quotaWarningCoefficient: number = QUOTA_WARNING_COEFFICIENT;
+ constructor(
+ private projectService: ProjectService,
+ private errorHandler: ErrorHandler,
+ private appConfigService: AppConfigService,
+ private route: ActivatedRoute
+ ) { }
+
+ ngOnInit() {
+ this.projectId = this.route.snapshot.parent.params['id'];
+ this.projectService.getProjectSummary(this.projectId).subscribe(res => {
+ this.summaryInformation = res;
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+ getSuitableUnit(value) {
+ const QuotaUnitsCopy = clone(QuotaUnits);
+ return getSuitableUnit(value, QuotaUnitsCopy);
+ }
+ getIntegerAndUnit(hardValue, usedValue) {
+ return GetIntegerAndUnit(hardValue, clone(QuotaUnits), usedValue, clone(QuotaUnits));
+ }
+ public get withHelmChart(): boolean {
+ return this.appConfigService.getConfig().with_chartmuseum;
+ }
+
+}
diff --git a/src/portal/src/app/project/summary/summary.module.ts b/src/portal/src/app/project/summary/summary.module.ts
new file mode 100644
index 000000000..96ef04c8b
--- /dev/null
+++ b/src/portal/src/app/project/summary/summary.module.ts
@@ -0,0 +1,13 @@
+import { NgModule } from '@angular/core';
+import { CommonModule } from '@angular/common';
+import { SummaryComponent } from './summary.component';
+import { TranslateModule } from '@ngx-translate/core';
+
+@NgModule({
+ declarations: [SummaryComponent],
+ imports: [
+ CommonModule,
+ TranslateModule
+ ]
+})
+export class SummaryModule { }
diff --git a/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.html b/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.html
new file mode 100644
index 000000000..397883cab
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.html
@@ -0,0 +1,118 @@
+
+ {{'TAG_RETENTION.ADD_TITLE' | translate}}
+ {{'TAG_RETENTION.EDIT_TITLE' | translate}}
+
+ {{'TAG_RETENTION.ADD_SUBTITLE' | translate}}
+
+
+
+ {{'TAG_RETENTION.IN_REPOSITORIES' | translate}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.REP_SEPARATOR' | translate}}
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.BY_WHAT' | translate}}
+
+
+
+
+
+
+
+
+
+
+
+ {{getI18nKey(unit)|translate}}
+
+
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.TAGS' | translate}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.TAG_SEPARATOR' | translate}}
+
+
+
+
+
+
+ {{'TAG_RETENTION.LABELS' | translate}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.REP_LABELS' | translate}}
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.scss b/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.scss
new file mode 100644
index 000000000..fa794f0f6
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.scss
@@ -0,0 +1,13 @@
+.color-97 {
+ color: #979797;
+}
+
+.over-line {
+ height: 15px;
+ line-height: 15px;
+ font-size: 10px;
+}
+
+.height-72 {
+ height: 72px;
+}
\ No newline at end of file
diff --git a/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.ts b/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.ts
new file mode 100644
index 000000000..271c8c1d0
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/add-rule/add-rule.component.ts
@@ -0,0 +1,187 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+import {
+ Component,
+ OnInit,
+ OnDestroy,
+ Output,
+ EventEmitter,
+} from "@angular/core";
+import { Rule, RuleMetadate } from "../retention";
+import { compareValue } from "@harbor/ui";
+import { TagRetentionService } from "../tag-retention.service";
+
+@Component({
+ selector: "add-rule",
+ templateUrl: "./add-rule.component.html",
+ styleUrls: ["./add-rule.component.scss"]
+})
+export class AddRuleComponent implements OnInit, OnDestroy {
+ addRuleOpened: boolean = false;
+ @Output() clickAdd = new EventEmitter();
+ metadata: RuleMetadate = new RuleMetadate();
+ rule: Rule = new Rule();
+ isAdd: boolean = true;
+ editRuleOrigin: Rule;
+
+ constructor(private tagRetentionService: TagRetentionService) {
+
+ }
+
+ ngOnInit(): void {
+ }
+
+ ngOnDestroy(): void {
+ }
+
+ set template(template) {
+ this.rule.template = template;
+ }
+
+ get template() {
+ return this.rule.template;
+ }
+
+ get unit(): string {
+ let str = "";
+ this.metadata.templates.forEach(t => {
+ if (t.rule_template === this.rule.template) {
+ str = t.params[0].unit;
+ }
+ });
+ return str;
+ }
+
+ get num() {
+ return this.rule.params[this.template];
+ }
+
+ set num(num) {
+ if (num) {
+ num = num.trim();
+ }
+ if (parseInt(num, 10) > 0) {
+ num = parseInt(num, 10);
+ }
+ this.rule.params[this.template] = num;
+ }
+
+ get repoSelect() {
+ return this.rule.scope_selectors.repository[0].decoration;
+ }
+
+ set repoSelect(repoSelect) {
+ this.rule.scope_selectors.repository[0].decoration = repoSelect;
+ }
+
+ set repositories(repositories) {
+ if (repositories.indexOf(",") !== -1) {
+ this.rule.scope_selectors.repository[0].pattern = "{" + repositories + "}";
+ } else {
+ this.rule.scope_selectors.repository[0].pattern = repositories;
+ }
+ }
+
+ get repositories() {
+ return this.rule.scope_selectors.repository[0].pattern.replace(/[{}]/g, "");
+ }
+
+ get tagsSelect() {
+ return this.rule.tag_selectors[0].decoration;
+ }
+
+ set tagsSelect(tagsSelect) {
+ this.rule.tag_selectors[0].decoration = tagsSelect;
+ }
+
+ set tagsInput(tagsInput) {
+ if (tagsInput.indexOf(",") !== -1) {
+ this.rule.tag_selectors[0].pattern = "{" + tagsInput + "}";
+ } else {
+ this.rule.tag_selectors[0].pattern = tagsInput;
+ }
+ }
+
+ get tagsInput() {
+ return this.rule.tag_selectors[0].pattern.replace(/[{}]/g, "");
+ }
+
+ get labelsSelect() {
+ return this.rule.tag_selectors[1].decoration;
+ }
+
+ set labelsSelect(labelsSelect) {
+ this.rule.tag_selectors[1].decoration = labelsSelect;
+ }
+
+ set labelsInput(labelsInput) {
+ this.rule.tag_selectors[1].pattern = labelsInput;
+ }
+
+ get labelsInput() {
+ return this.rule.tag_selectors[1].pattern;
+ }
+
+ canNotAdd(): boolean {
+ if (!this.isAdd && compareValue(this.editRuleOrigin, this.rule)) {
+ return true;
+ }
+ if (!this.hasParam()) {
+ return !(this.rule.template
+ && this.rule.scope_selectors.repository[0].pattern
+ && this.rule.tag_selectors[0].pattern);
+ } else {
+ return !(this.rule.template
+ && this.rule.params[this.template]
+ && parseInt(this.rule.params[this.template], 10) >= 0
+ && this.rule.scope_selectors.repository[0].pattern
+ && this.rule.tag_selectors[0].pattern);
+ }
+ }
+
+ open() {
+ this.addRuleOpened = true;
+ }
+
+ close() {
+ this.addRuleOpened = false;
+ }
+
+ cancel() {
+ this.close();
+ }
+
+ add() {
+ this.close();
+ this.clickAdd.emit(this.rule);
+ }
+
+ getI18nKey(str: string) {
+ return this.tagRetentionService.getI18nKey(str);
+ }
+ hasParam(): boolean {
+ if (this.metadata && this.metadata.templates) {
+ let flag: boolean = false;
+ this.metadata.templates.forEach(t => {
+ if (t.rule_template === this.template) {
+ if ( t.params && t.params.length > 0) {
+ flag = true;
+ }
+ }
+ });
+ return flag;
+ }
+ return false;
+ }
+}
diff --git a/src/portal/src/app/project/tag-retention/retention.ts b/src/portal/src/app/project/tag-retention/retention.ts
new file mode 100644
index 000000000..749c59f00
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/retention.ts
@@ -0,0 +1,136 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+export class Retention {
+ algorithm: string;
+ rules: Array;
+ trigger: {
+ kind: string;
+ references: object;
+ settings: {
+ cron: string;
+ }
+ };
+ scope: {
+ level: string,
+ ref: number;
+ };
+ cap: number;
+
+ constructor() {
+ this.rules = [];
+ this.algorithm = "or";
+ this.trigger = {
+ kind: "Schedule",
+ references: {},
+ settings: {
+ cron: "",
+ }
+ };
+ }
+}
+
+export class Rule {
+ disabled: boolean;
+ id: number;
+ priority: number;
+ action: string;
+ template: string;
+ params: object;
+ tag_selectors: Array;
+ scope_selectors: {
+ repository: Array;
+ };
+
+ constructor() {
+ this.disabled = false;
+ this.action = "retain";
+ this.params = {};
+ this.scope_selectors = {
+ repository: [
+ {
+ kind: 'doublestar',
+ decoration: 'repoMatches',
+ pattern: '**'
+ }
+ ]
+ };
+ this.tag_selectors = [
+ {
+ kind: 'doublestar',
+ decoration: 'matches',
+ pattern: '**'
+ },
+ {
+ kind: 'label',
+ decoration: "withLabels",
+ pattern: null
+ }
+ ];
+ }
+}
+
+export class Selector {
+ kind: string;
+ decoration: string;
+ pattern: string;
+}
+
+export class Param {
+ type: string;
+ unit: string;
+ required: boolean;
+}
+
+export class Template {
+ rule_template: string;
+ display_text: string;
+ action: "retain";
+ params: Array;
+}
+
+export class SelectorRuleMetadate {
+ display_text: string;
+ kind: string;
+ decorations: Array;
+}
+
+export class RuleMetadate {
+ templates: Array;
+ scope_selectors: Array;
+ tag_selectors: Array;
+
+ constructor() {
+ this.templates = [];
+ this.scope_selectors = [
+ {
+ display_text: null,
+ kind: null,
+ decorations: []
+ }
+ ];
+ this.tag_selectors = [
+ {
+ display_text: null,
+ kind: null,
+ decorations: []
+ },
+ {
+ display_text: null,
+ kind: null,
+ decorations: []
+ }
+ ];
+ }
+}
+
diff --git a/src/portal/src/app/project/tag-retention/tag-retention.component.html b/src/portal/src/app/project/tag-retention/tag-retention.component.html
new file mode 100644
index 000000000..8c3f4e510
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/tag-retention.component.html
@@ -0,0 +1,218 @@
+
+
+ {{'TAG_RETENTION.RETENTION_RULES' | translate}} {{retention?.rules?.length ? retention?.rules?.length : 0}}/15
+ Loading...
+
+
+
+
+ 0" class="list-unstyled">
+ -
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.IN_REPOSITORIES' | translate}}
+ {{getI18nKey(rule?.scope_selectors?.repository[0]?.decoration)|translate}}
+ {{formatPattern(rule?.scope_selectors?.repository[0]?.pattern)}}
+ ,
+ {{getI18nKey(rule?.action)|translate}}
+ {{getI18nKey(rule?.template)|translate:{number: rule?.params[rule?.template] } }}
+ {{'TAG_RETENTION.WITH_CONDITION' | translate}}
+ {{'TAG_RETENTION.LOWER_TAGS' | translate}}
+ {{getI18nKey(rule?.tag_selectors[0]?.decoration)|translate}}
+ {{formatPattern(rule?.tag_selectors[0]?.pattern)}}
+
+ {{'TAG_RETENTION.AND' | translate}}
+ {{'TAG_RETENTION.LOWER_LABELS' | translate}}
+ {{getI18nKey(rule?.tag_selectors[1]?.decoration)|translate}}
+ {{rule?.tag_selectors[1]?.pattern}}
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.ADD_RULE_HELP_1' | translate}}
+
+
+ = 15" class="btn btn-link" (click)="openAddRule()">{{'TAG_RETENTION.ADD_RULE' | translate}}
+
+
+
+
+
+ 0)" #cronScheduleComponent [labelCurrent]="label" [labelEdit]='label' [originCron]='originCron()' (inputvalue)="openConfirm($event)">
+
+
+ {{'TAG_RETENTION.RETENTION_RUNS' | translate}}
+
+
+ 0)" class="btn btn-outline"
+ (click)="isRetentionRunOpened=true">
+
+ {{'TAG_RETENTION.RUN_NOW' | translate}}
+ 0)" class="btn btn-outline"
+ (click)="whatIfRun()">{{'TAG_RETENTION.WHAT_IF_RUN' | translate}}
+
+
+ {{'TAG_RETENTION.ABORT' | translate}}
+
+
+
+
+
+
+ {{'TAG_RETENTION.SERIAL' | translate}}
+
+
+ {{'TAG_RETENTION.STATUS' | translate}}
+
+
+ {{'TAG_RETENTION.DRY_RUN' | translate}}
+
+
+ {{'TAG_RETENTION.START_TIME' | translate}}
+
+
+ {{'TAG_RETENTION.DURATION' | translate}}
+
+
+ {{'TAG_RETENTION.NO_EXECUTION' | translate}}
+
+
+
+
+ {{execution.id}}
+
+ {{execution.status}}
+ {{execution.dry_run ? 'YES' : 'NO'}}
+ {{execution.start_time|date:'short'}}
+ {{execution.duration}}
+
+
+ {{'TAG_RETENTION.REPOSITORY' | translate}}
+ {{'TAG_RETENTION.STATUS' | translate}}
+ {{'TAG_RETENTION.RETAINED' | translate}}/{{'TAG_RETENTION.TOTAL' | translate}}
+ {{'TAG_RETENTION.START_TIME' | translate}}
+ {{'TAG_RETENTION.DURATION' | translate}}
+ {{'TAG_RETENTION.LOG' | translate}}
+
+ {{'TAG_RETENTION.NO_HISTORY' | translate}}
+
+
+ {{task.repository}}
+ {{task.status}}
+ {{task.retained}}/{{task.total}}
+ {{task.start_time|date:'short'}}
+ {{task.duration}}
+ {{'TAG_RETENTION.LOG' | translate}}
+
+
+
+ {{innerPagination.firstItem + 1}}
+ -
+ {{innerPagination.lastItem + 1 }} {{'ROBOT_ACCOUNT.OF' |
+ translate}}
+ {{innerPagination.totalItems }} {{'ROBOT_ACCOUNT.ITEMS' | translate}}
+
+
+
+
+
+
+ {{pagination?.firstItem + 1}}
+ -
+ {{pagination?.lastItem + 1 }} {{'ROBOT_ACCOUNT.OF' |
+ translate}}
+ {{pagination.totalItems }} {{'ROBOT_ACCOUNT.ITEMS' | translate}}
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.RETENTION_RUN' | translate}}
+
+
+
+
+
+ {{'TAG_RETENTION.RETENTION_RUN_EXPLAIN' | translate}}
+
+
+
+
+
+
+
+
+
+
+ {{'TAG_RETENTION.RETENTION_RUN_ABORTED' | translate}}
+
+ {{'TAG_RETENTION.RETENTION_RUN_ABORTED_EXPLAIN' | translate}}
+
+
+
+
+ {{'TAG_RETENTION.SCHEDULE' | translate}}
+
+
+
+
+
+ {{'TAG_RETENTION.SCHEDULE_WARNING' | translate}}
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/tag-retention/tag-retention.component.scss b/src/portal/src/app/project/tag-retention/tag-retention.component.scss
new file mode 100644
index 000000000..1a0a4ce41
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/tag-retention.component.scss
@@ -0,0 +1,63 @@
+.color-97 {
+ color: #979797;
+}
+
+.rule {
+ height: 24px;
+ line-height: 24px;
+}
+
+.rule-name {
+ overflow: hidden;
+ white-space: nowrap;
+ text-overflow: ellipsis;
+ font-size: .541667rem;
+}
+
+.ml-5 {
+ margin-left: 5px;
+}
+
+.width-60 {
+ width: 60px;
+}
+
+.hand {
+ cursor: pointer;
+}
+
+.datagrid-container {
+ padding: 0;
+}
+
+.color-79b {
+ color: #0079b8;
+}
+
+.backdrop-transparent {
+ position: fixed;
+ top: 0;
+ bottom: 0;
+ right: 0;
+ left: 0;
+ opacity: 0;
+ z-index: 999;
+}
+.cron-selection {
+ margin-top: 20px;
+ display: flex;
+ align-items: center;
+}
+.label-left {
+ color: #000;
+}
+.v-center {
+ height: 48px;
+ line-height: 48px;
+}
+.font-size-54 {
+ font-size: .541667rem;
+}
+.padding-left-4 {
+ padding-left: 4px;
+}
\ No newline at end of file
diff --git a/src/portal/src/app/project/tag-retention/tag-retention.component.ts b/src/portal/src/app/project/tag-retention/tag-retention.component.ts
new file mode 100644
index 000000000..a39ec13c6
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/tag-retention.component.ts
@@ -0,0 +1,392 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+import { Component, OnInit, ViewChild } from '@angular/core';
+import { ActivatedRoute } from '@angular/router';
+import { AddRuleComponent } from "./add-rule/add-rule.component";
+import { ClrDatagridStringFilterInterface } from "@clr/angular";
+import { TagRetentionService } from "./tag-retention.service";
+import { Retention, Rule } from "./retention";
+import { Project } from "../project";
+import { clone, ErrorHandler } from "@harbor/ui";
+import { OriginCron } from "@harbor/ui";
+import { CronScheduleComponent } from "@harbor/ui";
+
+const MIN = 60000;
+const SEC = 1000;
+const MIN_STR = "min";
+const SEC_STR = "sec";
+const SCHEDULE_TYPE = {
+ NONE: "None",
+ DAILY: "Daily",
+ WEEKLY: "Weekly",
+ HOURLY: "Hourly",
+ CUSTOM: "Custom"
+};
+@Component({
+ selector: 'tag-retention',
+ templateUrl: './tag-retention.component.html',
+ styleUrls: ['./tag-retention.component.scss']
+})
+export class TagRetentionComponent implements OnInit {
+ serialFilter: ClrDatagridStringFilterInterface = {
+ accepts(item: any, search: string): boolean {
+ return item.id.toString().indexOf(search) !== -1;
+ }
+ };
+ statusFilter: ClrDatagridStringFilterInterface = {
+ accepts(item: any, search: string): boolean {
+ return item.status.toLowerCase().indexOf(search.toLowerCase()) !== -1;
+ }
+ };
+ dryRunFilter: ClrDatagridStringFilterInterface = {
+ accepts(item: any, search: string): boolean {
+ let str = item.dry_run ? 'YES' : 'NO';
+ return str.indexOf(search) !== -1;
+ }
+ };
+ projectId: number;
+ isRetentionRunOpened: boolean = false;
+ isAbortedOpened: boolean = false;
+ isConfirmOpened: boolean = false;
+ cron: string;
+ selectedItem: any = null;
+ ruleIndex: number = -1;
+ index: number = -1;
+ retentionId: number;
+ retention: Retention = new Retention();
+ editIndex: number;
+ executionList = [];
+ historyList = [];
+ loadingExecutions: boolean = true;
+ loadingHistories: boolean = true;
+ label: string = 'TAG_RETENTION.TRIGGER';
+ loadingRule: boolean = false;
+ currentPage: number = 1;
+ pageSize: number = 10;
+ totalCount: number = 0;
+ @ViewChild('cronScheduleComponent')
+ cronScheduleComponent: CronScheduleComponent;
+ @ViewChild('addRule') addRuleComponent: AddRuleComponent;
+ constructor(
+ private route: ActivatedRoute,
+ private tagRetentionService: TagRetentionService,
+ private errorHandler: ErrorHandler,
+ ) {
+ }
+ originCron(): OriginCron {
+ let originCron: OriginCron = {
+ type: SCHEDULE_TYPE.NONE,
+ cron: ""
+ };
+ originCron.cron = this.retention.trigger.settings.cron;
+ if (originCron.cron === "") {
+ originCron.type = SCHEDULE_TYPE.NONE;
+ } else if (originCron.cron === "0 0 * * * *") {
+ originCron.type = SCHEDULE_TYPE.HOURLY;
+ } else if (originCron.cron === "0 0 0 * * *") {
+ originCron.type = SCHEDULE_TYPE.DAILY;
+ } else if (originCron.cron === "0 0 0 * * 0") {
+ originCron.type = SCHEDULE_TYPE.WEEKLY;
+ } else {
+ originCron.type = SCHEDULE_TYPE.CUSTOM;
+ }
+ return originCron;
+ }
+
+ ngOnInit() {
+ this.projectId = +this.route.snapshot.parent.params['id'];
+ this.retention.scope = {
+ level: "project",
+ ref: this.projectId
+ };
+ let resolverData = this.route.snapshot.parent.data;
+ if (resolverData) {
+ let project = resolverData["projectResolver"];
+ if (project.metadata && project.metadata.retention_id) {
+ this.retentionId = project.metadata.retention_id;
+ }
+ }
+ this.getRetention();
+ this.getMetadata();
+ }
+ openConfirm(cron: string) {
+ if (cron) {
+ this.isConfirmOpened = true;
+ this.cron = cron;
+ } else {
+ this.updateCron(cron);
+ }
+ }
+ closeConfirm() {
+ this.isConfirmOpened = false;
+ this.updateCron(this.cron);
+ }
+ updateCron(cron: string) {
+ let retention: Retention = clone(this.retention);
+ retention.trigger.settings.cron = cron;
+ if (!this.retentionId) {
+ this.tagRetentionService.createRetention(retention).subscribe(
+ response => {
+ this.cronScheduleComponent.isEditMode = false;
+ this.refreshAfterCreatRetention();
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ } else {
+ this.tagRetentionService.updateRetention(this.retentionId, retention).subscribe(
+ response => {
+ this.cronScheduleComponent.isEditMode = false;
+ this.getRetention();
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+ }
+ getMetadata() {
+ this.tagRetentionService.getRetentionMetadata().subscribe(
+ response => {
+ this.addRuleComponent.metadata = response;
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+
+ getRetention() {
+ if (this.retentionId) {
+ this.tagRetentionService.getRetention(this.retentionId).subscribe(
+ response => {
+ this.retention = response;
+ this.loadingRule = false;
+ }, error => {
+ this.errorHandler.error(error);
+ this.loadingRule = false;
+ });
+ }
+ }
+
+ editRuleByIndex(index) {
+ this.editIndex = index;
+ this.addRuleComponent.rule = clone(this.retention.rules[index]);
+ this.addRuleComponent.editRuleOrigin = clone(this.retention.rules[index]);
+ this.addRuleComponent.open();
+ this.addRuleComponent.isAdd = false;
+ this.ruleIndex = -1;
+ }
+ toggleDisable(index, isActionDisable) {
+ let retention: Retention = clone(this.retention);
+ retention.rules[index].disabled = isActionDisable;
+ this.ruleIndex = -1;
+ this.loadingRule = true;
+ this.tagRetentionService.updateRetention(this.retentionId, retention).subscribe(
+ response => {
+ this.getRetention();
+ }, error => {
+ this.loadingRule = false;
+ this.errorHandler.error(error);
+ });
+ }
+ deleteRule(index) {
+ let retention: Retention = clone(this.retention);
+ retention.rules.splice(index, 1);
+ // if rules is empty, clear schedule.
+ if (retention.rules && retention.rules.length === 0) {
+ retention.trigger.settings.cron = "";
+ }
+ this.ruleIndex = -1;
+ this.loadingRule = true;
+ this.tagRetentionService.updateRetention(this.retentionId, retention).subscribe(
+ response => {
+ this.getRetention();
+ }, error => {
+ this.loadingRule = false;
+ this.errorHandler.error(error);
+ });
+ }
+
+ openAddRule() {
+ this.addRuleComponent.open();
+ this.addRuleComponent.isAdd = true;
+ this.addRuleComponent.rule = new Rule();
+ }
+
+ runRetention() {
+ this.isRetentionRunOpened = false;
+ this.tagRetentionService.runNowTrigger(this.retentionId).subscribe(
+ response => {
+ this.refreshList();
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+
+ whatIfRun() {
+ this.tagRetentionService.whatIfRunTrigger(this.retentionId).subscribe(
+ response => {
+ this.refreshList();
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+
+ refreshList() {
+ this.index = -1 ;
+ this.selectedItem = null;
+ this.loadingExecutions = true;
+ if (this.retentionId) {
+ this.tagRetentionService.getRunNowList(this.retentionId, this.currentPage, this.pageSize).subscribe(
+ response => {
+ // Get total count
+ if (response.headers) {
+ let xHeader: string = response.headers.get("x-total-count");
+ if (xHeader) {
+ this.totalCount = parseInt(xHeader, 0);
+ }
+ }
+ this.executionList = response.body as Array;
+ this.loadingExecutions = false;
+ TagRetentionComponent.calculateDuration(this.executionList);
+ }, error => {
+ this.loadingExecutions = false;
+ this.errorHandler.error(error);
+ });
+ } else {
+ this.loadingExecutions = false;
+ }
+ }
+
+ static calculateDuration(arr: Array) {
+ if (arr && arr.length > 0) {
+ for (let i = 0; i < arr.length; i++) {
+ let duration = new Date(arr[i].end_time).getTime() - new Date(arr[i].start_time).getTime();
+ let min = Math.floor(duration / MIN);
+ let sec = Math.floor((duration % MIN) / SEC);
+ arr[i]['duration'] = "";
+ if ((min || sec) && duration > 0) {
+ if (min) {
+ arr[i]['duration'] += '' + min + MIN_STR;
+ }
+ if (sec) {
+ arr[i]['duration'] += '' + sec + SEC_STR;
+ }
+ } else if ( min === 0 && sec === 0 && duration > 0) {
+ arr[i]['duration'] = "0";
+ } else {
+ arr[i]['duration'] = "N/A";
+ }
+ }
+ }
+ }
+
+ abortRun() {
+ this.isAbortedOpened = true;
+ this.tagRetentionService.AbortRun(this.retentionId, this.selectedItem.id).subscribe(
+ res => {
+ this.refreshList();
+ }, error => {
+ this.errorHandler.error(error);
+ });
+ }
+
+ abortRetention() {
+ this.isAbortedOpened = false;
+ }
+
+ openEditor(index) {
+ if (this.ruleIndex !== index) {
+ this.ruleIndex = index;
+ } else {
+ this.ruleIndex = -1;
+ }
+ }
+
+ openDetail(index, executionId) {
+ if (this.index !== index) {
+ this.index = index;
+ this.historyList = [];
+ this.loadingHistories = true;
+ this.tagRetentionService.getExecutionHistory(this.retentionId, executionId).subscribe(
+ res => {
+ this.loadingHistories = false;
+ this.historyList = res;
+ TagRetentionComponent.calculateDuration(this.historyList);
+ }, error => {
+ this.loadingHistories = false;
+ this.errorHandler.error(error);
+ });
+ } else {
+ this.index = -1;
+ }
+ }
+
+ refreshAfterCreatRetention() {
+ this.tagRetentionService.getProjectInfo(this.projectId).subscribe(
+ response => {
+ this.retentionId = response.metadata.retention_id;
+ this.getRetention();
+ }, error => {
+ this.loadingRule = false;
+ this.errorHandler.error(error);
+ });
+ }
+
+ clickAdd(rule) {
+ this.loadingRule = true;
+ if (this.addRuleComponent.isAdd) {
+ let retention: Retention = clone(this.retention);
+ retention.rules.push(rule);
+ if (!this.retentionId) {
+ this.tagRetentionService.createRetention(retention).subscribe(
+ response => {
+ this.refreshAfterCreatRetention();
+ }, error => {
+ this.errorHandler.error(error);
+ this.loadingRule = false;
+ });
+ } else {
+ this.tagRetentionService.updateRetention(this.retentionId, retention).subscribe(
+ response => {
+ this.getRetention();
+ }, error => {
+ this.loadingRule = false;
+ this.errorHandler.error(error);
+ });
+ }
+ } else {
+ let retention: Retention = clone(this.retention);
+ retention.rules[this.editIndex] = rule;
+ this.tagRetentionService.updateRetention(this.retentionId, retention).subscribe(
+ response => {
+ this.getRetention();
+ }, error => {
+ this.errorHandler.error(error);
+ this.loadingRule = false;
+ });
+ }
+ }
+
+ seeLog(executionId, taskId) {
+ this.tagRetentionService.seeLog(this.retentionId, executionId, taskId);
+ }
+
+ formatPattern(pattern: string): string {
+ return pattern.replace(/[{}]/g, "");
+ }
+
+ getI18nKey(str: string) {
+ return this.tagRetentionService.getI18nKey(str);
+ }
+ clrLoad() {
+ this.refreshList();
+ }
+}
diff --git a/src/portal/src/app/project/tag-retention/tag-retention.service.ts b/src/portal/src/app/project/tag-retention/tag-retention.service.ts
new file mode 100644
index 000000000..76940c324
--- /dev/null
+++ b/src/portal/src/app/project/tag-retention/tag-retention.service.ts
@@ -0,0 +1,125 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+import { Injectable } from "@angular/core";
+import { HttpClient, HttpParams, HttpResponse } from "@angular/common/http";
+import { Retention, RuleMetadate } from "./retention";
+import { Observable, throwError as observableThrowError } from "rxjs";
+import { map, catchError } from "rxjs/operators";
+import { Project } from "../project";
+import { buildHttpRequestOptionsWithObserveResponse } from "@harbor/ui";
+
+@Injectable()
+export class TagRetentionService {
+ private I18nMap: object = {
+ "retain": "ACTION_RETAIN",
+ "lastXDays": "RULE_NAME_1",
+ "latestActiveK": "RULE_NAME_2",
+ "latestPushedK": "RULE_NAME_3",
+ "latestPulledN": "RULE_NAME_4",
+ "always": "RULE_NAME_5",
+ "nDaysSinceLastPull": "RULE_NAME_6",
+ "nDaysSinceLastPush": "RULE_NAME_7",
+ "the images from the last # days": "RULE_TEMPLATE_1",
+ "the most recent active # images": "RULE_TEMPLATE_2",
+ "the most recently pushed # images": "RULE_TEMPLATE_3",
+ "the most recently pulled # images": "RULE_TEMPLATE_4",
+ "pulled within the last # days": "RULE_TEMPLATE_6",
+ "pushed within the last # days": "RULE_TEMPLATE_7",
+ "repoMatches": "MAT",
+ "repoExcludes": "EXC",
+ "matches": "MAT",
+ "excludes": "EXC",
+ "withLabels": "WITH",
+ "withoutLabels": "WITHOUT",
+ "COUNT": "UNIT_COUNT",
+ "DAYS": "UNIT_DAY",
+ "none": "NONE",
+ "nothing": "NONE"
+ };
+
+ constructor(
+ private http: HttpClient,
+ ) {
+ }
+
+ getI18nKey(str: string): string {
+ if (this.I18nMap[str.trim()]) {
+ return "TAG_RETENTION." + this.I18nMap[str.trim()];
+ }
+ return str;
+ }
+
+ getRetentionMetadata(): Observable {
+ return this.http.get(`/api/retentions/metadatas`)
+ .pipe(map(response => response as RuleMetadate))
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ getRetention(retentionId): Observable {
+ return this.http.get(`/api/retentions/${retentionId}`)
+ .pipe(map(response => response as Retention))
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ createRetention(retention: Retention) {
+ return this.http.post(`/api/retentions`, retention)
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ updateRetention(retentionId, retention: Retention) {
+ return this.http.put(`/api/retentions/${retentionId}`, retention)
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ getProjectInfo(projectId) {
+ return this.http.get(`/api/projects/${projectId}`)
+ .pipe(map(response => response as Project))
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ runNowTrigger(retentionId) {
+ return this.http.post(`/api/retentions/${retentionId}/executions`, {dry_run: false})
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ whatIfRunTrigger(retentionId) {
+ return this.http.post(`/api/retentions/${retentionId}/executions`, {dry_run: true})
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ AbortRun(retentionId, executionId) {
+ return this.http.patch(`/api/retentions/${retentionId}/executions/${executionId}`, {action: 'stop'})
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ getRunNowList(retentionId, page: number, pageSize: number) {
+ let params = new HttpParams();
+ if (page && pageSize) {
+ params = params.set('page', page + '').set('page_size', pageSize + '');
+ }
+ return this.http
+ .get>>(`/api/retentions/${retentionId}/executions`, buildHttpRequestOptionsWithObserveResponse(params))
+ .pipe(catchError(error => observableThrowError(error)), );
+ }
+
+ getExecutionHistory(retentionId, executionId) {
+ return this.http.get(`/api/retentions/${retentionId}/executions/${executionId}/tasks`)
+ .pipe(map(response => response as Array))
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ seeLog(retentionId, executionId, taskId) {
+ window.open(`api/retentions/${retentionId}/executions/${executionId}/tasks/${taskId}`, '_blank');
+ }
+}
diff --git a/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.html b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.html
new file mode 100644
index 000000000..29073e9be
--- /dev/null
+++ b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.html
@@ -0,0 +1,46 @@
+
+
+
+
+
+ {{'WEBHOOK.ENDPOINT_URL' | translate}}
+
+
+
+ {{ 'WEBHOOK.URL_IS_REQUIRED' | translate }}
+
+
+
+
+
+ {{ 'WEBHOOK.AUTH_HEADER' |
+ translate }}
+
+
+
+
+ {{'WEBHOOK.VERIFY_REMOTE_CERT' | translate}}
+
+
+
+
+ {{'CONFIG.TOOLTIP.VERIFY_REMOTE_CERT' | translate}}
+
+
+
+
+
+
+ {{'BUTTON.CONTINUE' | translate}}
+ {{'WEBHOOK.TEST_ENDPOINT_BUTTON' | translate}}
+
+
+ {{'WEBHOOK.TEST_ENDPOINT_BUTTON' | translate}}
+ {{'BUTTON.CANCEL' | translate}}
+ {{'BUTTON.SAVE' | translate}}
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.scss b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.scss
new file mode 100644
index 000000000..a91cb90b1
--- /dev/null
+++ b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.scss
@@ -0,0 +1,12 @@
+.align-center {
+ text-align: center;
+}
+
+.webhook-section {
+ margin-left: calc(50% - 10rem);
+ text-align: left;
+}
+
+.icon-tooltip {
+ margin-top: 4px;
+}
\ No newline at end of file
diff --git a/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.ts b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.ts
new file mode 100644
index 000000000..96d23ecb1
--- /dev/null
+++ b/src/portal/src/app/project/webhook/add-webhook-form/add-webhook-form.component.ts
@@ -0,0 +1,112 @@
+import {
+ Component,
+ OnInit,
+ OnChanges,
+ Input,
+ ViewChild,
+ Output,
+ EventEmitter,
+ SimpleChanges
+} from "@angular/core";
+import { Webhook, Target } from "../webhook";
+import { NgForm } from "@angular/forms";
+import {ClrLoadingState} from "@clr/angular";
+import { finalize } from "rxjs/operators";
+import { WebhookService } from "../webhook.service";
+import { WebhookEventTypes } from '../../../shared/shared.const';
+import { MessageHandlerService } from "../../../shared/message-handler/message-handler.service";
+
+@Component({
+ selector: 'add-webhook-form',
+ templateUrl: './add-webhook-form.component.html',
+ styleUrls: ['./add-webhook-form.component.scss']
+})
+export class AddWebhookFormComponent implements OnInit, OnChanges {
+ closable: boolean = true;
+ staticBackdrop: boolean = true;
+ checking: boolean = false;
+ checkBtnState: ClrLoadingState = ClrLoadingState.DEFAULT;
+ webhookForm: NgForm;
+ submitting: boolean = false;
+ webhookTarget: Target = new Target();
+
+ @Input() projectId: number;
+ @Input() webhook: Webhook;
+ @Input() isModify: boolean;
+ @Input() isOpen: boolean;
+ @Output() edit = new EventEmitter();
+ @Output() close = new EventEmitter();
+ @ViewChild("webhookForm") currentForm: NgForm;
+
+
+ constructor(
+ private webhookService: WebhookService,
+ private messageHandlerService: MessageHandlerService
+ ) { }
+
+ ngOnInit() {
+ }
+
+ ngOnChanges(changes: SimpleChanges) {
+ if (changes['isOpen'] && changes['isOpen'].currentValue) {
+ Object.assign(this.webhookTarget, this.webhook.targets[0]);
+ }
+ }
+
+ onTestEndpoint() {
+ this.checkBtnState = ClrLoadingState.LOADING;
+ this.checking = true;
+
+ this.webhookService
+ .testEndpoint(this.projectId, {
+ targets: [this.webhookTarget]
+ })
+ .pipe(finalize(() => (this.checking = false)))
+ .subscribe(
+ response => {
+ this.checkBtnState = ClrLoadingState.SUCCESS;
+ },
+ error => {
+ this.checkBtnState = ClrLoadingState.DEFAULT;
+ this.messageHandlerService.handleError(error);
+ }
+ );
+ }
+
+ onCancel() {
+ this.close.emit(false);
+ this.currentForm.reset();
+ }
+
+ onSubmit() {
+ const rx = this.isModify
+ ? this.webhookService.editWebhook(this.projectId, this.webhook.id, Object.assign(this.webhook, { targets: [this.webhookTarget] }))
+ : this.webhookService.createWebhook(this.projectId, {
+ targets: [this.webhookTarget],
+ event_types: Object.keys(WebhookEventTypes).map(key => WebhookEventTypes[key]),
+ enabled: true,
+ });
+ rx.pipe(finalize(() => (this.submitting = false)))
+ .subscribe(
+ response => {
+ this.edit.emit(this.isModify);
+ },
+ error => {
+ this.messageHandlerService.handleError(error);
+ }
+ );
+ }
+
+ setCertValue($event: any): void {
+ this.webhookTarget.skip_cert_verify = !$event;
+ }
+
+ public get isValid(): boolean {
+ return (
+ this.currentForm &&
+ this.currentForm.valid &&
+ !this.submitting &&
+ !this.checking
+ );
+ }
+}
diff --git a/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.html b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.html
new file mode 100644
index 000000000..589376278
--- /dev/null
+++ b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.html
@@ -0,0 +1,13 @@
+
+ {{'WEBHOOK.EDIT_WEBHOOK' | translate}}
+
+ {{'WEBHOOK.EDIT_WEBHOOK_DESC' | translate}}
+
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.scss b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.scss
new file mode 100644
index 000000000..e69de29bb
diff --git a/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.ts b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.ts
new file mode 100644
index 000000000..4708d7010
--- /dev/null
+++ b/src/portal/src/app/project/webhook/add-webhook/add-webhook.component.ts
@@ -0,0 +1,49 @@
+import {
+ Component,
+ OnInit,
+ Input,
+ ViewChild,
+ Output,
+ EventEmitter,
+} from "@angular/core";
+import { Webhook } from "../webhook";
+import { AddWebhookFormComponent } from "../add-webhook-form/add-webhook-form.component";
+
+@Component({
+ selector: 'add-webhook',
+ templateUrl: './add-webhook.component.html',
+ styleUrls: ['./add-webhook.component.scss']
+})
+export class AddWebhookComponent implements OnInit {
+ isOpen: boolean = false;
+ closable: boolean = true;
+ staticBackdrop: boolean = true;
+
+ @Input() projectId: number;
+ @Input() webhook: Webhook;
+ @Output() modify = new EventEmitter();
+ @ViewChild(AddWebhookFormComponent)
+ addWebhookFormComponent: AddWebhookFormComponent;
+
+
+ constructor() { }
+
+ ngOnInit() {
+ }
+
+ openAddWebhookModal() {
+ this.isOpen = true;
+ }
+
+ onCancel() {
+ this.isOpen = false;
+ }
+
+ closeModal(isModified: boolean): void {
+ if (isModified) {
+ this.modify.emit(true);
+ }
+ this.isOpen = false;
+ }
+
+}
diff --git a/src/portal/src/app/project/webhook/webhook.component.html b/src/portal/src/app/project/webhook/webhook.component.html
new file mode 100644
index 000000000..ff6d92769
--- /dev/null
+++ b/src/portal/src/app/project/webhook/webhook.component.html
@@ -0,0 +1,55 @@
+
+
+
+
+
+
+
+
+
+ Webhook endpoint: {{endpoint}}
+ {{'WEBHOOK.EDIT_BUTTON' | translate}}
+
+
+ {{'WEBHOOK.ENABLED_BUTTON' | translate}}
+ {{'WEBHOOK.DISABLED_BUTTON' | translate}}
+
+
+
+
+
+
+ {{'WEBHOOK.TYPE' | translate}}
+ {{'WEBHOOK.STATUS' | translate}}
+ {{'WEBHOOK.CREATED' | translate}}
+ {{'WEBHOOK.LAST_TRIGGERED' | translate}}
+
+ {{item.event_type}}
+
+
+
+ {{'WEBHOOK.ENABLED' | translate}}
+
+
+
+ {{'WEBHOOK.DISABLED' | translate}}
+
+
+ {{item.creation_time | date: 'short'}}
+ {{item.last_trigger_time | date: 'short'}}
+
+
+ 1 - {{lastTriggerCount}} {{'WEBHOOK.OF' | translate}} {{lastTriggerCount}} {{'WEBHOOK.ITEMS' | translate}}
+
+
+
+
+
+
+ {{'WEBHOOK.CREATE_WEBHOOK' | translate}}
+ {{'WEBHOOK.CREATE_WEBHOOK_DESC' | translate}}
+
+
+
+
+
\ No newline at end of file
diff --git a/src/portal/src/app/project/webhook/webhook.component.scss b/src/portal/src/app/project/webhook/webhook.component.scss
new file mode 100644
index 000000000..66520bd73
--- /dev/null
+++ b/src/portal/src/app/project/webhook/webhook.component.scss
@@ -0,0 +1,41 @@
+.label-top {
+ top: 12px;
+}
+
+.icon-wrap {
+ height: 14px;
+}
+
+.webhook-form-wrap {
+ width: 19rem;
+ margin: 0 auto;
+}
+
+.create-text {
+ margin: 0 auto;
+ width: 19rem;
+}
+
+.create-text-title {
+ margin-top: 1rem;
+}
+
+.endpoint-label {
+ font-weight: bold;
+}
+
+.disabled-btn {
+ color: #e12200;
+}
+
+.disabled-btn:hover {
+ color: #c92100;
+}
+
+.enabled-icon {
+ margin: -2px 5px 0 0;
+}
+.center {
+ justify-content: center;
+ align-items: center;
+}
diff --git a/src/portal/src/app/project/webhook/webhook.component.ts b/src/portal/src/app/project/webhook/webhook.component.ts
new file mode 100644
index 000000000..dddf7eb4a
--- /dev/null
+++ b/src/portal/src/app/project/webhook/webhook.component.ts
@@ -0,0 +1,156 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+import { finalize } from "rxjs/operators";
+import { TranslateService } from '@ngx-translate/core';
+import { Component, OnInit, ViewChild } from '@angular/core';
+import { AddWebhookComponent } from "./add-webhook/add-webhook.component";
+import { AddWebhookFormComponent } from "./add-webhook-form/add-webhook-form.component";
+import { ActivatedRoute } from '@angular/router';
+import { Webhook, LastTrigger } from './webhook';
+import { WebhookService } from './webhook.service';
+import { MessageHandlerService } from "../../shared/message-handler/message-handler.service";
+import { Project } from '../project';
+import {
+ ConfirmationTargets,
+ ConfirmationState,
+ ConfirmationButtons
+} from "../../shared/shared.const";
+
+import { ConfirmationMessage } from "../../shared/confirmation-dialog/confirmation-message";
+import { ConfirmationAcknowledgement } from "../../shared/confirmation-dialog/confirmation-state-message";
+import { ConfirmationDialogComponent } from "../../shared/confirmation-dialog/confirmation-dialog.component";
+
+@Component({
+ templateUrl: './webhook.component.html',
+ styleUrls: ['./webhook.component.scss'],
+ // changeDetection: ChangeDetectionStrategy.OnPush
+})
+export class WebhookComponent implements OnInit {
+ @ViewChild(AddWebhookComponent)
+ addWebhookComponent: AddWebhookComponent;
+ @ViewChild(AddWebhookFormComponent)
+ addWebhookFormComponent: AddWebhookFormComponent;
+ @ViewChild("confirmationDialogComponent")
+ confirmationDialogComponent: ConfirmationDialogComponent;
+ webhook: Webhook;
+ endpoint: string = '';
+ lastTriggers: LastTrigger[] = [];
+ lastTriggerCount: number = 0;
+ isEnabled: boolean;
+ loading: boolean = false;
+ showCreate: boolean = false;
+ loadingWebhook: boolean = true;
+ projectId: number;
+ projectName: string;
+ constructor(
+ private route: ActivatedRoute,
+ private translate: TranslateService,
+ private webhookService: WebhookService,
+ private messageHandlerService: MessageHandlerService) {}
+
+ ngOnInit() {
+ this.projectId = +this.route.snapshot.parent.params['id'];
+ let resolverData = this.route.snapshot.parent.data;
+ if (resolverData) {
+ let project = (resolverData["projectResolver"]);
+ this.projectName = project.name;
+ }
+ this.getData(this.projectId);
+ }
+
+ getData(projectId: number) {
+ this.getLastTriggers(projectId);
+ this.getWebhook(projectId);
+ }
+
+ getLastTriggers(projectId: number) {
+ this.loading = true;
+ this.webhookService
+ .listLastTrigger(projectId)
+ .pipe(finalize(() => (this.loading = false)))
+ .subscribe(
+ response => {
+ this.lastTriggers = response;
+ this.lastTriggerCount = response.length;
+ },
+ error => {
+ this.messageHandlerService.handleError(error);
+ }
+ );
+ }
+
+ getWebhook(projectId: number) {
+ this.webhookService
+ .listWebhook(projectId)
+ .pipe(finalize(() => (this.loadingWebhook = false)))
+ .subscribe(
+ response => {
+ if (response.length) {
+ this.webhook = response[0];
+ this.endpoint = this.webhook.targets[0].address;
+ this.isEnabled = this.webhook.enabled;
+ this.showCreate = false;
+ } else {
+ this.showCreate = true;
+ }
+ },
+ error => {
+ this.messageHandlerService.handleError(error);
+ }
+ );
+ }
+
+ switchWebhookStatus(enabled = false) {
+ let content = '';
+ this.translate.get(
+ enabled
+ ? 'WEBHOOK.ENABLED_WEBHOOK_SUMMARY'
+ : 'WEBHOOK.DISABLED_WEBHOOK_SUMMARY'
+ ).subscribe((res) => content = res + this.projectName);
+ let message = new ConfirmationMessage(
+ enabled ? 'WEBHOOK.ENABLED_WEBHOOK_TITLE' : 'WEBHOOK.DISABLED_WEBHOOK_TITLE',
+ content,
+ '',
+ {},
+ ConfirmationTargets.WEBHOOK,
+ enabled ? ConfirmationButtons.ENABLE_CANCEL : ConfirmationButtons.DISABLE_CANCEL
+ );
+ this.confirmationDialogComponent.open(message);
+ }
+
+ confirmSwitch(message: ConfirmationAcknowledgement) {
+ if (message &&
+ message.source === ConfirmationTargets.WEBHOOK &&
+ message.state === ConfirmationState.CONFIRMED) {
+ this.webhookService
+ .editWebhook(this.projectId, this.webhook.id, Object.assign({}, this.webhook, { enabled: !this.isEnabled }))
+ .subscribe(
+ response => {
+ this.getData(this.projectId);
+ },
+ error => {
+ this.messageHandlerService.handleError(error);
+ }
+ );
+ }
+}
+
+ editWebhook(isModify: boolean): void {
+ this.getData(this.projectId);
+ }
+
+ openAddWebhookModal(): void {
+ this.addWebhookComponent.openAddWebhookModal();
+ }
+}
diff --git a/src/portal/src/app/project/webhook/webhook.service.ts b/src/portal/src/app/project/webhook/webhook.service.ts
new file mode 100644
index 000000000..490942f25
--- /dev/null
+++ b/src/portal/src/app/project/webhook/webhook.service.ts
@@ -0,0 +1,56 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+import { throwError as observableThrowError, Observable } from "rxjs";
+import { map, catchError } from "rxjs/operators";
+import { Injectable } from "@angular/core";
+import { HttpClient } from "@angular/common/http";
+import { Webhook, LastTrigger } from "./webhook";
+
+@Injectable()
+export class WebhookService {
+ constructor(private http: HttpClient) { }
+
+ public listWebhook(projectId: number): Observable {
+ return this.http
+ .get(`/api/projects/${projectId}/webhook/policies`)
+ .pipe(map(response => response as Webhook[]))
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ public listLastTrigger(projectId: number): Observable {
+ return this.http
+ .get(`/api/projects/${projectId}/webhook/lasttrigger`)
+ .pipe(map(response => response as LastTrigger[]))
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ public editWebhook(projectId: number, policyId: number, data: any): Observable {
+ return this.http
+ .put(`/api/projects/${projectId}/webhook/policies/${policyId}`, data)
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+ public createWebhook(projectId: number, data: any): Observable {
+ return this.http
+ .post(`/api/projects/${projectId}/webhook/policies`, data)
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+
+
+ public testEndpoint(projectId: number, param): Observable {
+ return this.http
+ .post(`/api/projects/${projectId}/webhook/policies/test`, param)
+ .pipe(catchError(error => observableThrowError(error)));
+ }
+}
diff --git a/src/portal/src/app/project/webhook/webhook.ts b/src/portal/src/app/project/webhook/webhook.ts
new file mode 100644
index 000000000..4d11a8c1c
--- /dev/null
+++ b/src/portal/src/app/project/webhook/webhook.ts
@@ -0,0 +1,35 @@
+import { WebhookEventTypes } from '../../shared/shared.const';
+
+export class Webhook {
+ id: number;
+ name: string;
+ project_id: number;
+ description: string;
+ targets: Target[];
+ event_types: WebhookEventTypes[];
+ creator: string;
+ creation_time: Date;
+ update_time: Date;
+ enabled: boolean;
+}
+
+export class Target {
+ type: string;
+ address: string;
+ attachment: string;
+ auth_header: string;
+ skip_cert_verify: boolean;
+
+ constructor () {
+ this.type = 'http';
+ this.address = '';
+ this.skip_cert_verify = true;
+ }
+}
+
+export class LastTrigger {
+ enabled: boolean;
+ event_type: string;
+ creation_time: Date;
+ last_trigger_time: Date;
+}
diff --git a/src/portal/src/app/shared/about-dialog/about-dialog.component.html b/src/portal/src/app/shared/about-dialog/about-dialog.component.html
index 3734a76e0..75ec45695 100644
--- a/src/portal/src/app/shared/about-dialog/about-dialog.component.html
+++ b/src/portal/src/app/shared/about-dialog/about-dialog.component.html
@@ -11,7 +11,7 @@
{{'ABOUT.COPYRIGHT' | translate}}
- {{'ABOUT.OPEN_SOURCE_LICENSE' | translate}}
+ {{'ABOUT.OPEN_SOURCE_LICENSE' | translate}}
diff --git a/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html b/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html
index e6b6511a4..004d8acf7 100644
--- a/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html
+++ b/src/portal/src/app/shared/confirmation-dialog/confirmation-dialog.component.html
@@ -22,7 +22,15 @@
{{'BUTTON.CLOSE' | translate}}
-
+
+ {{'BUTTON.CANCEL' | translate}}
+ {{'BUTTON.ENABLE' | translate}}
+
+
+ {{'BUTTON.CANCEL' | translate}}
+ {{'BUTTON.DISABLE' | translate}}
+
+
{{'BUTTON.CANCEL' | translate}}
{{'BUTTON.SWITCH' | translate}}
{{'BUTTON.CLOSE' | translate}}
diff --git a/src/portal/src/app/shared/route/member-guard-activate.service.ts b/src/portal/src/app/shared/route/member-guard-activate.service.ts
index 2d823b12f..324c424aa 100644
--- a/src/portal/src/app/shared/route/member-guard-activate.service.ts
+++ b/src/portal/src/app/shared/route/member-guard-activate.service.ts
@@ -41,8 +41,19 @@ export class MemberGuard implements CanActivate, CanActivateChild {
this.checkMemberStatus(state.url, projectId).subscribe((res) => observer.next(res));
}
, error => {
- this.router.navigate([CommonRoutes.HARBOR_DEFAULT]);
- observer.next(false);
+ // if it is public project return true;
+ this.projectService.getProject(projectId).subscribe(project => {
+ if (project.metadata.public) {
+ observer.next(true);
+ } else {
+ this.router.navigate([CommonRoutes.HARBOR_DEFAULT]);
+ observer.next(false);
+ }
+ }, err => {
+ this.router.navigate([CommonRoutes.HARBOR_DEFAULT]);
+ observer.next(false);
+ });
+
});
} else {
this.checkMemberStatus(state.url, projectId).subscribe((res) => observer.next(res));
diff --git a/src/portal/src/app/shared/shared.const.ts b/src/portal/src/app/shared/shared.const.ts
index 8e405383a..07ed9a844 100644
--- a/src/portal/src/app/shared/shared.const.ts
+++ b/src/portal/src/app/shared/shared.const.ts
@@ -40,7 +40,8 @@ export const enum ConfirmationTargets {
CONFIG_ROUTE,
CONFIG_TAB,
HELM_CHART,
- HELM_CHART_VERSION
+ HELM_CHART_VERSION,
+ WEBHOOK
}
export const enum ActionType {
@@ -54,7 +55,7 @@ export const enum ConfirmationState {
NA, CONFIRMED, CANCEL
}
export const enum ConfirmationButtons {
- CONFIRM_CANCEL, YES_NO, DELETE_CANCEL, CLOSE, SWITCH_CANCEL
+ CONFIRM_CANCEL, YES_NO, DELETE_CANCEL, CLOSE, ENABLE_CANCEL, DISABLE_CANCEL, SWITCH_CANCEL
}
export const ProjectTypes = { 0: 'PROJECT.ALL_PROJECTS', 1: 'PROJECT.PRIVATE_PROJECTS', 2: 'PROJECT.PUBLIC_PROJECTS' };
@@ -81,3 +82,14 @@ export enum ResourceType {
CHART_VERSION = 2,
REPOSITORY_TAG = 3,
}
+
+export enum WebhookEventTypes {
+ DOWNLOAD_CHART = "downloadChart",
+ DELETE_CHART = "deleteChart",
+ UPLOAD_CHART = "uploadChart",
+ DELETE_IMAGE = "deleteImage",
+ PULL_IMAGE = "pullImage",
+ PUSH_IMAGE = "pushImage",
+ SCANNING_FAILED = "scanningFailed",
+ SCANNING_COMPLETED = "scanningCompleted",
+}
diff --git a/src/portal/src/app/shared/shared.module.ts b/src/portal/src/app/shared/shared.module.ts
index 7e607b73e..9cb09394e 100644
--- a/src/portal/src/app/shared/shared.module.ts
+++ b/src/portal/src/app/shared/shared.module.ts
@@ -76,7 +76,8 @@ const uiLibConfig: IServiceConfig = {
helmChartEndpoint: "/api/chartrepo",
downloadChartEndpoint: "/chartrepo",
gcEndpoint: "/api/system/gc",
- ScanAllEndpoint: "/api/system/scanAll"
+ ScanAllEndpoint: "/api/system/scanAll",
+ quotaUrl: "/api/quotas"
};
@NgModule({
diff --git a/src/portal/src/app/user/user.component.html b/src/portal/src/app/user/user.component.html
index aeb2ca739..9e855c479 100644
--- a/src/portal/src/app/user/user.component.html
+++ b/src/portal/src/app/user/user.component.html
@@ -9,7 +9,7 @@
-
+
{{'USER.ADD_ACTION' | translate}}
{{ISADMNISTRATOR | translate}}
@@ -35,7 +35,7 @@
{{pagination.firstItem + 1}} - {{pagination.lastItem + 1}} {{'USER.OF' | translate }}
{{pagination.totalItems}} {{'USER.ITEMS' | translate }}
-
+
diff --git a/src/portal/src/app/user/user.component.ts b/src/portal/src/app/user/user.component.ts
index 91f3ec16c..e50455871 100644
--- a/src/portal/src/app/user/user.component.ts
+++ b/src/portal/src/app/user/user.component.ts
@@ -11,13 +11,19 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-import { Component, OnInit, ViewChild, OnDestroy, ChangeDetectionStrategy, ChangeDetectorRef } from '@angular/core';
+import { Component, OnInit, ViewChild, OnDestroy } from '@angular/core';
import { Subscription, Observable, forkJoin } from "rxjs";
import { TranslateService } from '@ngx-translate/core';
import { ConfirmationState, ConfirmationTargets, ConfirmationButtons } from '../shared/shared.const';
-import { operateChanges, OperateInfo, OperationService, OperationState, errorHandler as errorHandFn } from '@harbor/ui';
+import {
+ operateChanges,
+ OperateInfo,
+ OperationService,
+ OperationState,
+ errorHandler as errorHandFn,
+} from '@harbor/ui';
import { ConfirmationDialogService } from '../shared/confirmation-dialog/confirmation-dialog.service';
import { ConfirmationMessage } from '../shared/confirmation-dialog/confirmation-message';
import { MessageHandlerService } from '../shared/message-handler/message-handler.service';
@@ -41,348 +47,305 @@ import { throwError as observableThrowError } from "rxjs";
*/
@Component({
- selector: 'harbor-user',
- templateUrl: 'user.component.html',
- styleUrls: ['user.component.scss'],
- providers: [UserService],
- changeDetection: ChangeDetectionStrategy.OnPush
+ selector: 'harbor-user',
+ templateUrl: 'user.component.html',
+ styleUrls: ['user.component.scss'],
+ providers: [UserService],
})
export class UserComponent implements OnInit, OnDestroy {
- users: User[] = [];
- originalUsers: Observable;
- selectedRow: User[] = [];
- ISADMNISTRATOR: string = "USER.ENABLE_ADMIN_ACTION";
+ users: User[] = [];
+ selectedRow: User[] = [];
+ ISADMNISTRATOR: string = "USER.ENABLE_ADMIN_ACTION";
- currentTerm: string;
- totalCount: number = 0;
- currentPage: number = 1;
- timerHandler: any;
+ currentTerm: string;
+ totalCount: number = 0;
+ currentPage: number = 1;
+ pageSize: number = 15;
+ timerHandler: any;
- private onGoing: boolean = true;
- private adminMenuText: string = "";
- private adminColumn: string = "";
- private deletionSubscription: Subscription;
- @ViewChild(NewUserModalComponent)
- newUserDialog: NewUserModalComponent;
- @ViewChild(ChangePasswordComponent)
- changePwdDialog: ChangePasswordComponent;
+ private onGoing: boolean = true;
+ private adminMenuText: string = "";
+ private adminColumn: string = "";
+ private deletionSubscription: Subscription;
+ @ViewChild(NewUserModalComponent)
+ newUserDialog: NewUserModalComponent;
+ @ViewChild(ChangePasswordComponent)
+ changePwdDialog: ChangePasswordComponent;
- constructor(
- private userService: UserService,
- private translate: TranslateService,
- private deletionDialogService: ConfirmationDialogService,
- private msgHandler: MessageHandlerService,
- private session: SessionService,
- private appConfigService: AppConfigService,
- private operationService: OperationService,
- private ref: ChangeDetectorRef) {
- this.deletionSubscription = deletionDialogService.confirmationConfirm$.subscribe(confirmed => {
- if (confirmed &&
- confirmed.source === ConfirmationTargets.USER &&
- confirmed.state === ConfirmationState.CONFIRMED) {
- this.delUser(confirmed.data);
- }
- });
- }
-
- isMySelf(uid: number): boolean {
- let currentUser = this.session.getCurrentUser();
- if (currentUser) {
- if (currentUser.user_id === uid) {
- return true;
- }
+ constructor(
+ private userService: UserService,
+ private translate: TranslateService,
+ private deletionDialogService: ConfirmationDialogService,
+ private msgHandler: MessageHandlerService,
+ private session: SessionService,
+ private appConfigService: AppConfigService,
+ private operationService: OperationService) {
+ this.deletionSubscription = deletionDialogService.confirmationConfirm$.subscribe(confirmed => {
+ if (confirmed &&
+ confirmed.source === ConfirmationTargets.USER &&
+ confirmed.state === ConfirmationState.CONFIRMED) {
+ this.delUser(confirmed.data);
+ }
+ });
}
- return false;
- }
+ isMySelf(uid: number): boolean {
+ let currentUser = this.session.getCurrentUser();
+ if (currentUser) {
+ if (currentUser.user_id === uid) {
+ return true;
+ }
+ }
- get onlySelf(): boolean {
- return this.selectedRow.length === 1 && this.isMySelf(this.selectedRow[0].user_id);
- }
-
- public get canCreateUser(): boolean {
- let appConfig = this.appConfigService.getConfig();
- if (appConfig) {
- return !(appConfig.auth_mode === 'ldap_auth' || appConfig.auth_mode === 'uaa_auth' || appConfig.auth_mode === 'oidc_auth');
- } else {
- return true;
- }
- }
-
- public get ifSameRole(): boolean {
- let usersRole: number[] = [];
- this.selectedRow.forEach(user => {
- if (user.user_id === 0 || this.isMySelf(user.user_id)) {
return false;
- }
- if (user.has_admin_role) {
- usersRole.push(1);
- } else {
- usersRole.push(0);
- }
- });
- if (usersRole.length && usersRole.every(num => num === 0)) {
- this.ISADMNISTRATOR = 'USER.ENABLE_ADMIN_ACTION';
- return true;
- }
- if (usersRole.length && usersRole.every(num => num === 1)) {
- this.ISADMNISTRATOR = 'USER.DISABLE_ADMIN_ACTION';
- return true;
- }
- return false;
- }
-
- isSystemAdmin(u: User): string {
- if (!u) {
- return "{{MISS}}";
- }
- let key: string = u.has_admin_role ? "USER.IS_ADMIN" : "USER.IS_NOT_ADMIN";
- this.translate.get(key).subscribe((res: string) => this.adminColumn = res);
- return this.adminColumn;
- }
-
- adminActions(u: User): string {
- if (!u) {
- return "{{MISS}}";
- }
- let key: string = u.has_admin_role ? "USER.DISABLE_ADMIN_ACTION" : "USER.ENABLE_ADMIN_ACTION";
- this.translate.get(key).subscribe((res: string) => this.adminMenuText = res);
- return this.adminMenuText;
- }
-
- public get inProgress(): boolean {
- return this.onGoing;
- }
-
- ngOnInit(): void { }
-
- ngOnDestroy(): void {
- if (this.deletionSubscription) {
- this.deletionSubscription.unsubscribe();
}
- if (this.timerHandler) {
- clearInterval(this.timerHandler);
- this.timerHandler = null;
- }
- }
-
- openChangePwdModal(): void {
- if (this.selectedRow.length === 1) {
- this.changePwdDialog.open(this.selectedRow[0].user_id);
+ get onlySelf(): boolean {
+ return this.selectedRow.length === 1 && this.isMySelf(this.selectedRow[0].user_id);
}
- }
+ public get canCreateUser(): boolean {
+ let appConfig = this.appConfigService.getConfig();
+ if (appConfig) {
+ return !(appConfig.auth_mode === 'ldap_auth' || appConfig.auth_mode === 'uaa_auth' || appConfig.auth_mode === 'oidc_auth');
+ } else {
+ return true;
+ }
+ }
- // Filter items by keywords
- doFilter(terms: string): void {
- this.selectedRow = [];
- this.currentTerm = terms;
- this.originalUsers.subscribe(users => {
- if (terms.trim() === "") {
- this.refreshUser((this.currentPage - 1) * 15, this.currentPage * 15);
- } else {
- let selectUsers = users.filter(user => {
- return this.isMatchFilterTerm(terms, user.username);
+ public get ifSameRole(): boolean {
+ let usersRole: number[] = [];
+ this.selectedRow.forEach(user => {
+ if (user.user_id === 0 || this.isMySelf(user.user_id)) {
+ return false;
+ }
+ if (user.has_admin_role) {
+ usersRole.push(1);
+ } else {
+ usersRole.push(0);
+ }
});
- this.totalCount = selectUsers.length;
- this.users = selectUsers.slice((this.currentPage - 1) * 15, this.currentPage * 15); // First page
-
- this.forceRefreshView(5000);
- }
- });
- }
-
- // Disable the admin role for the specified user
- changeAdminRole(): void {
- let observableLists: any[] = [];
- if (this.selectedRow.length) {
- if (this.ISADMNISTRATOR === 'USER.ENABLE_ADMIN_ACTION') {
- for (let i = 0; i < this.selectedRow.length; i++) {
- // Double confirm user is existing
- if (this.selectedRow[i].user_id === 0 || this.isMySelf(this.selectedRow[i].user_id)) {
- continue;
- }
- let updatedUser: User = new User();
- updatedUser.user_id = this.selectedRow[i].user_id;
-
- updatedUser.has_admin_role = true; // Set as admin
- observableLists.push(this.userService.updateUserRole(updatedUser));
+ if (usersRole.length && usersRole.every(num => num === 0)) {
+ this.ISADMNISTRATOR = 'USER.ENABLE_ADMIN_ACTION';
+ return true;
}
- }
- if (this.ISADMNISTRATOR === 'USER.DISABLE_ADMIN_ACTION') {
- for (let i = 0; i < this.selectedRow.length; i++) {
- // Double confirm user is existing
- if (this.selectedRow[i].user_id === 0 || this.isMySelf(this.selectedRow[i].user_id)) {
- continue;
- }
- let updatedUser: User = new User();
- updatedUser.user_id = this.selectedRow[i].user_id;
-
- updatedUser.has_admin_role = false; // Set as none admin
- observableLists.push(this.userService.updateUserRole(updatedUser));
+ if (usersRole.length && usersRole.every(num => num === 1)) {
+ this.ISADMNISTRATOR = 'USER.DISABLE_ADMIN_ACTION';
+ return true;
}
- }
-
- forkJoin(...observableLists).subscribe(() => {
- this.selectedRow = [];
- this.refresh();
- }, error => {
- this.selectedRow = [];
- this.msgHandler.handleError(error);
- });
- }
- }
-
- // Delete the specified user
- deleteUsers(users: User[]): void {
- let userArr: string[] = [];
- if (this.onlySelf) {
- return;
+ return false;
}
- if (users && users.length) {
- users.forEach(user => {
- userArr.push(user.username);
- });
+ isSystemAdmin(u: User): string {
+ if (!u) {
+ return "{{MISS}}";
+ }
+ let key: string = u.has_admin_role ? "USER.IS_ADMIN" : "USER.IS_NOT_ADMIN";
+ this.translate.get(key).subscribe((res: string) => this.adminColumn = res);
+ return this.adminColumn;
}
- // Confirm deletion
- let msg: ConfirmationMessage = new ConfirmationMessage(
- "USER.DELETION_TITLE",
- "USER.DELETION_SUMMARY",
- userArr.join(','),
- users,
- ConfirmationTargets.USER,
- ConfirmationButtons.DELETE_CANCEL
- );
- this.deletionDialogService.openComfirmDialog(msg);
- }
- delUser(users: User[]): void {
- let observableLists: any[] = [];
- if (users && users.length) {
- users.forEach(user => {
- observableLists.push(this.delOperate(user));
- });
+ adminActions(u: User): string {
+ if (!u) {
+ return "{{MISS}}";
+ }
+ let key: string = u.has_admin_role ? "USER.DISABLE_ADMIN_ACTION" : "USER.ENABLE_ADMIN_ACTION";
+ this.translate.get(key).subscribe((res: string) => this.adminMenuText = res);
+ return this.adminMenuText;
+ }
- forkJoin(...observableLists).subscribe((item) => {
+ public get inProgress(): boolean {
+ return this.onGoing;
+ }
+
+ ngOnInit(): void {
+ }
+
+ ngOnDestroy(): void {
+ if (this.deletionSubscription) {
+ this.deletionSubscription.unsubscribe();
+ }
+
+ if (this.timerHandler) {
+ clearInterval(this.timerHandler);
+ this.timerHandler = null;
+ }
+ }
+
+ openChangePwdModal(): void {
+ if (this.selectedRow.length === 1) {
+ this.changePwdDialog.open(this.selectedRow[0].user_id);
+ }
+ }
+
+ // Filter items by keywords
+ doFilter(terms: string): void {
this.selectedRow = [];
+ this.currentTerm = terms.trim();
+ this.currentPage = 1;
+ this.onGoing = true;
+ this.getUserListByPaging();
+ }
+
+ // Disable the admin role for the specified user
+ changeAdminRole(): void {
+ let observableLists: any[] = [];
+ if (this.selectedRow.length) {
+ if (this.ISADMNISTRATOR === 'USER.ENABLE_ADMIN_ACTION') {
+ for (let i = 0; i < this.selectedRow.length; i++) {
+ // Double confirm user is existing
+ if (this.selectedRow[i].user_id === 0 || this.isMySelf(this.selectedRow[i].user_id)) {
+ continue;
+ }
+ let updatedUser: User = new User();
+ updatedUser.user_id = this.selectedRow[i].user_id;
+
+ updatedUser.has_admin_role = true; // Set as admin
+ observableLists.push(this.userService.updateUserRole(updatedUser));
+ }
+ }
+ if (this.ISADMNISTRATOR === 'USER.DISABLE_ADMIN_ACTION') {
+ for (let i = 0; i < this.selectedRow.length; i++) {
+ // Double confirm user is existing
+ if (this.selectedRow[i].user_id === 0 || this.isMySelf(this.selectedRow[i].user_id)) {
+ continue;
+ }
+ let updatedUser: User = new User();
+ updatedUser.user_id = this.selectedRow[i].user_id;
+
+ updatedUser.has_admin_role = false; // Set as none admin
+ observableLists.push(this.userService.updateUserRole(updatedUser));
+ }
+ }
+
+ forkJoin(...observableLists).subscribe(() => {
+ this.selectedRow = [];
+ this.refresh();
+ }, error => {
+ this.selectedRow = [];
+ this.msgHandler.handleError(error);
+ });
+ }
+ }
+
+ // Delete the specified user
+ deleteUsers(users: User[]): void {
+ let userArr: string[] = [];
+ if (this.onlySelf) {
+ return;
+ }
+
+ if (users && users.length) {
+ users.forEach(user => {
+ userArr.push(user.username);
+ });
+ }
+ // Confirm deletion
+ let msg: ConfirmationMessage = new ConfirmationMessage(
+ "USER.DELETION_TITLE",
+ "USER.DELETION_SUMMARY",
+ userArr.join(','),
+ users,
+ ConfirmationTargets.USER,
+ ConfirmationButtons.DELETE_CANCEL
+ );
+ this.deletionDialogService.openComfirmDialog(msg);
+ }
+
+ delUser(users: User[]): void {
+ let observableLists: any[] = [];
+ if (users && users.length) {
+ users.forEach(user => {
+ observableLists.push(this.delOperate(user));
+ });
+
+ forkJoin(...observableLists).subscribe((item) => {
+ this.selectedRow = [];
+ this.currentTerm = '';
+ this.refresh();
+ });
+ }
+ }
+
+ delOperate(user: User): Observable {
+ // init operation info
+ let operMessage = new OperateInfo();
+ operMessage.name = 'OPERATION.DELETE_USER';
+ operMessage.data.id = user.user_id;
+ operMessage.state = OperationState.progressing;
+ operMessage.data.name = user.username;
+ this.operationService.publishInfo(operMessage);
+
+ if (this.isMySelf(user.user_id)) {
+ return this.translate.get('BATCH.DELETED_FAILURE').pipe(map(res => {
+ operateChanges(operMessage, OperationState.failure, res);
+ }));
+ }
+
+ return this.userService.deleteUser(user.user_id).pipe(map(() => {
+ this.translate.get('BATCH.DELETED_SUCCESS').subscribe(res => {
+ operateChanges(operMessage, OperationState.success);
+ });
+ }), catchError(error => {
+ const message = errorHandFn(error);
+ this.translate.get(message).subscribe(res =>
+ operateChanges(operMessage, OperationState.failure, res)
+ );
+ return observableThrowError(message);
+ }));
+ }
+
+ // Refresh the user list
+ refreshUser(): void {
+ this.selectedRow = [];
+ // Start to get
this.currentTerm = '';
+ this.onGoing = true;
+ this.getUserListByPaging();
+ }
+
+ // Add new user
+ addNewUser(): void {
+ if (!this.canCreateUser) {
+ return; // No response to this hacking action
+ }
+ this.newUserDialog.open();
+ }
+
+ // Add user to the user list
+ addUserToList(user: User): void {
+ // Currently we can only add it by reloading all
this.refresh();
- });
- }
- }
-
- delOperate(user: User): Observable {
- // init operation info
- let operMessage = new OperateInfo();
- operMessage.name = 'OPERATION.DELETE_USER';
- operMessage.data.id = user.user_id;
- operMessage.state = OperationState.progressing;
- operMessage.data.name = user.username;
- this.operationService.publishInfo(operMessage);
-
- if (this.isMySelf(user.user_id)) {
- return this.translate.get('BATCH.DELETED_FAILURE').pipe(map(res => {
- operateChanges(operMessage, OperationState.failure, res);
- }));
}
- return this.userService.deleteUser(user.user_id).pipe(map(() => {
- this.translate.get('BATCH.DELETED_SUCCESS').subscribe(res => {
- operateChanges(operMessage, OperationState.success);
- });
- }), catchError(error => {
- const message = errorHandFn(error);
- this.translate.get(message).subscribe(res =>
- operateChanges(operMessage, OperationState.failure, res)
- );
- return observableThrowError(message);
- }));
- }
-
- // Refresh the user list
- refreshUser(from: number, to: number): void {
- this.selectedRow = [];
- // Start to get
- this.currentTerm = '';
- this.onGoing = true;
-
- this.originalUsers = this.userService.getUsers();
- this.originalUsers.subscribe(users => {
- this.onGoing = false;
-
- this.totalCount = users.length;
- this.users = users.slice(from, to); // First page
-
- this.forceRefreshView(5000);
-
- return users;
- }, error => {
- this.onGoing = false;
- this.msgHandler.handleError(error);
- this.forceRefreshView(5000);
- });
- }
-
- // Add new user
- addNewUser(): void {
- if (!this.canCreateUser) {
- return; // No response to this hacking action
+ // Data loading
+ load(state: any): void {
+ this.selectedRow = [];
+ this.onGoing = true;
+ this.getUserListByPaging();
}
- this.newUserDialog.open();
- }
- // Add user to the user list
- addUserToList(user: User): void {
- // Currently we can only add it by reloading all
- this.refresh();
- }
-
- // Data loading
- load(state: any): void {
- this.selectedRow = [];
- if (state && state.page) {
- if (this.originalUsers) {
- this.originalUsers.subscribe(users => {
- this.users = users.slice(state.page.from, state.page.to + 1);
- });
- this.forceRefreshView(5000);
- } else {
- this.refreshUser(state.page.from, state.page.to + 1);
- }
- } else {
- // Refresh
- this.refresh();
+ refresh(): void {
+ this.currentPage = 1; // Refresh pagination
+ this.refreshUser();
}
- }
- refresh(): void {
- this.currentPage = 1; // Refresh pagination
- this.refreshUser(0, 15);
- }
-
- SelectedChange(): void {
- this.forceRefreshView(5000);
- }
-
- forceRefreshView(duration: number): void {
- // Reset timer
- if (this.timerHandler) {
- clearInterval(this.timerHandler);
+ getUserListByPaging() {
+ this.userService.getUserListByPaging(this.currentPage, this.pageSize, this.currentTerm)
+ .subscribe(response => {
+ // Get total count
+ if (response.headers) {
+ let xHeader: string = response.headers.get("X-Total-Count");
+ if (xHeader) {
+ this.totalCount = parseInt(xHeader, 0);
+ }
+ }
+ this.users = response.body as User[];
+ this.onGoing = false;
+ }, error => {
+ this.msgHandler.handleError(error);
+ this.onGoing = false;
+ });
}
- this.timerHandler = setInterval(() => this.ref.markForCheck(), 100);
- setTimeout(() => {
- if (this.timerHandler) {
- clearInterval(this.timerHandler);
- this.timerHandler = null;
- }
- }, duration);
- }
-
- private isMatchFilterTerm(terms: string, testedItem: string): boolean {
- return testedItem.toLowerCase().indexOf(terms.toLowerCase()) !== -1;
- }
-
}
diff --git a/src/portal/src/app/user/user.service.ts b/src/portal/src/app/user/user.service.ts
index 90d032afc..717f1ea3d 100644
--- a/src/portal/src/app/user/user.service.ts
+++ b/src/portal/src/app/user/user.service.ts
@@ -12,14 +12,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
import { Injectable } from '@angular/core';
-import { HttpClient } from '@angular/common/http';
+import { HttpClient, HttpParams, HttpResponse } from '@angular/common/http';
import { map, catchError } from "rxjs/operators";
import { Observable, throwError as observableThrowError } from "rxjs";
-import {HTTP_JSON_OPTIONS, HTTP_GET_OPTIONS} from "@harbor/ui";
+import {HTTP_JSON_OPTIONS, HTTP_GET_OPTIONS, buildHttpRequestOptionsWithObserveResponse} from "@harbor/ui";
import { User, LDAPUser } from './user';
import LDAPUsertoUser from './user';
+
const userMgmtEndpoint = '/api/users';
const userListSearch = '/api/users/search?';
const ldapUserEndpoint = '/api/ldap/users';
@@ -34,7 +35,19 @@ const ldapUserEndpoint = '/api/ldap/users';
export class UserService {
constructor(private http: HttpClient) { }
-
+ // Get paging user list
+ getUserListByPaging(page: number, pageSize: number, username?: string) {
+ let params = new HttpParams();
+ if (page && pageSize) {
+ params = params.set('page', page + '').set('page_size', pageSize + '');
+ }
+ if (username) {
+ params = params.set('username', username);
+ }
+ return this.http
+ .get>(userMgmtEndpoint, buildHttpRequestOptionsWithObserveResponse(params)).pipe(
+ catchError(error => observableThrowError(error)), );
+ }
// Handle the related exceptions
handleError(error: any): Observable {
return observableThrowError(error.error || error);
@@ -47,9 +60,10 @@ export class UserService {
, catchError(error => this.handleError(error)));
}
getUsers(): Observable {
- return this.http.get(userMgmtEndpoint, HTTP_GET_OPTIONS)
- .pipe(map(response => response as User[])
- , catchError(error => this.handleError(error)));
+ return this.http.get(userMgmtEndpoint)
+ .pipe(map(((response: any) => {
+ return response as User[];
+ }), catchError(error => this.handleError(error))));
}
// Add new user
diff --git a/src/portal/src/i18n/lang/en-us-lang.json b/src/portal/src/i18n/lang/en-us-lang.json
index 74f99e0e9..53e369b58 100644
--- a/src/portal/src/i18n/lang/en-us-lang.json
+++ b/src/portal/src/i18n/lang/en-us-lang.json
@@ -31,6 +31,7 @@
"TEST_MAIL": "TEST MAIL SERVER",
"CLOSE": "CLOSE",
"TEST_LDAP": "TEST LDAP SERVER",
+ "TEST_OIDC": "TEST OIDC SERVER",
"MORE_INFO": "More info...",
"YES": "YES",
"NO": "NO",
@@ -42,7 +43,12 @@
"ACTIONS": "Actions",
"BROWSE": "Browse",
"UPLOAD": "Upload",
- "NO_FILE": "No file selected"
+ "NO_FILE": "No file selected",
+ "ADD": "ADD",
+ "RUN": "RUN",
+ "CONTINUE": "CONTINUE",
+ "ENABLE": "ENABLE",
+ "DISABLE": "DISABLE"
},
"BATCH": {
"DELETED_SUCCESS": "Deleted successfully",
@@ -58,6 +64,7 @@
"TOOLTIP": {
"NAME_FILTER": "Filter the name of the resource. Leave empty or use '**' to match all. 'library/**' only matches resources under 'library'. For more patterns, please refer to the user guide.",
"TAG_FILTER": "Filter the tag/version part of the resources. Leave empty or use '**' to match all. '1.0*' only matches the tags that starts with '1.0'. For more patterns, please refer to the user guide.",
+ "LABEL_FILTER": "Filter the resources according to labels.",
"RESOURCE_FILTER": "Filter the type of resources.",
"PUSH_BASED": "Push the resources from the local Harbor to the remote registry.",
"PULL_BASED": "Pull the resources from the remote registry to the local Harbor.",
@@ -216,9 +223,15 @@
"TOGGLED_SUCCESS": "Toggled project successfully.",
"FAILED_TO_DELETE_PROJECT": "Project contains repositories or replication rules or helm-charts cannot be deleted.",
"INLINE_HELP_PUBLIC": "When a project is set to public, anyone has read permission to the repositories under this project, and the user does not need to run \"docker login\" before pulling images under this project.",
- "OF": "of"
+ "OF": "of",
+ "COUNT_QUOTA": "Count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "COUNT_QUOTA_TIP": "Please enter an integer between '1' & '100,000,000', '-1' for unlimited.",
+ "STORAGE_QUOTA_TIP": "The upper limit of Storage Quota only takes integer values, capped at '1024TB'. Enter '-1' for unlimited quota",
+ "QUOTA_UNLIMIT_TIP": "For unlimited quota, please enter '-1'."
},
"PROJECT_DETAIL": {
+ "SUMMARY": "Summary",
"REPOSITORIES": "Repositories",
"REPLICATION": "Replication",
"USERS": "Members",
@@ -227,7 +240,8 @@
"PROJECTS": "Projects",
"CONFIG": "Configuration",
"HELMCHART": "Helm Charts",
- "ROBOT_ACCOUNTS": "Robot Accounts"
+ "ROBOT_ACCOUNTS": "Robot Accounts",
+ "WEBHOOKS": "Webhooks"
},
"PROJECT_CONFIG": {
"REGISTRY": "Project registry",
@@ -310,10 +324,10 @@
"ENABLE_ACCOUNT": "Enable Account",
"DELETE": "Delete",
"CREAT_ROBOT_ACCOUNT": "Creat Robot Account",
- "PULL_PERMISSION": "Image pull",
- "PULL_PUSH_PERMISSION": "Image pull / push",
- "PUSH_CHART_PERMISSION": "Helm chart push",
- "PULL_CHART_PERMISSION": "Helm chart pull",
+ "PERMISSIONS_IMAGE": "Image",
+ "PERMISSIONS_HELMCHART": "Helm Chart",
+ "PUSH": "Push",
+ "PULL": "Pull",
"FILTER_PLACEHOLDER": "Filter Robot Accounts",
"ROBOT_NAME": "Cannot contain special characters(~#$%) and maximum length should be 255 characters.",
"ACCOUNT_EXISTING": "Robot Account is already exists.",
@@ -321,12 +335,43 @@
"CREATED_SUCCESS": "Created '{{param}}' successfully.",
"COPY_SUCCESS": "Copy token successfully of '{{param}}'",
"DELETION_TITLE": "Confirm removal of robot accounts",
- "DELETION_SUMMARY": "Do you want to delete robot accounts {{param}}?"
+ "DELETION_SUMMARY": "Do you want to delete robot accounts {{param}}?",
+ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.",
+ "EXPORT_TO_FILE" : "export to file"
+ },
+ "WEBHOOK": {
+ "EDIT_BUTTON": "EDIT",
+ "ENABLED_BUTTON": "ENABLE",
+ "DISABLED_BUTTON": "DISABLE",
+ "TYPE": "Webhook",
+ "STATUS": "Status",
+ "CREATED": "Created",
+ "ENABLED": "Enabled",
+ "DISABLED": "Disabled",
+ "OF": "of",
+ "ITEMS": "items",
+ "LAST_TRIGGERED": "Last Triggered",
+ "EDIT_WEBHOOK": "Webhook Endpoint",
+ "CREATE_WEBHOOK": "Getting started with webhooks",
+ "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications",
+ "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.",
+ "ENDPOINT_URL": "Endpoint URL",
+ "URL_IS_REQUIRED": "Endpoint URL is required.",
+ "AUTH_HEADER": "Auth Header",
+ "VERIFY_REMOTE_CERT": "Verify Remote Certificate",
+ "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT",
+ "CANCEL_BUTTON": "CANCEL",
+ "SAVE_BUTTON": "SAVE",
+ "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks",
+ "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ",
+ "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks",
+ "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project "
},
"GROUP": {
"GROUP": "Group",
"GROUPS": "Groups",
"IMPORT_LDAP_GROUP": "Import LDAP Group",
+ "IMPORT_HTTP_GROUP": "New HTTP Group",
"ADD": "New Group",
"EDIT": "Edit",
"DELETE": "Delete",
@@ -339,8 +384,17 @@
"ADD_GROUP_SUCCESS": "Add group success",
"EDIT_GROUP_SUCCESS": "Edit group success",
"LDAP_TYPE": "LDAP",
+ "HTTP_TYPE": "HTTP",
"OF": "of",
- "ITEMS": "items"
+ "ITEMS": "items",
+ "NEW_MEMBER": "New Group Member",
+ "NEW_USER_INFO": "Add a group to be a member of this project with specified role",
+ "ROLE": "Role",
+ "SYS_ADMIN": "System Admin",
+ "PROJECT_ADMIN": "Project Admin",
+ "PROJECT_MASTER": "Master",
+ "DEVELOPER": "Developer",
+ "GUEST": "Guest"
},
"AUDIT_LOG": {
"USERNAME": "Username",
@@ -553,6 +607,8 @@
"TAGS_COUNT": "Tags",
"PULL_COUNT": "Pulls",
"PULL_COMMAND": "Pull Command",
+ "PULL_TIME": "Pull Time",
+ "PUSH_TIME": "Push Time",
"MY_REPOSITORY": "My Repository",
"PUBLIC_REPOSITORY": "Public Repository",
"DELETION_TITLE_REPO": "Confirm Repository Deletion",
@@ -658,6 +714,19 @@
"ADD_LABEL_TO_CHART_VERSION": "Add labels to this chart version",
"STATUS": "Status"
},
+ "SUMMARY": {
+ "QUOTAS": "quotas",
+ "PROJECT_REPOSITORY": "Project repositories",
+ "PROJECT_HELM_CHART": "Project Helm Chart",
+ "PROJECT_MEMBER": "Project members",
+ "PROJECT_QUOTAS": "Project quotas",
+ "ARTIFACT_COUNT": "Artifact count",
+ "STORAGE_CONSUMPTION": "Storage consumption",
+ "ADMIN": "Admin(s)",
+ "MASTER": "Master(s)",
+ "DEVELOPER": "Developer(s)",
+ "GUEST": "Guest(s)"
+ },
"ALERT": {
"FORM_CHANGE_CONFIRMATION": "Some changes are not saved yet. Do you want to cancel?"
},
@@ -682,7 +751,9 @@
"LABEL": "Labels",
"REPOSITORY": "Repository",
"REPO_READ_ONLY": "Repository Read Only",
+ "WEBHOOK_NOTIFICATION_ENABLED": "Webhooks enabled",
"SYSTEM": "System Settings",
+ "PROJECT_QUOTAS": "Project Quotas",
"VULNERABILITY": "Vulnerability",
"GC": "Garbage Collection",
"CONFIRM_TITLE": "Confirm to cancel",
@@ -715,6 +786,7 @@
"ROOT_CERT": "Registry Root Certificate",
"ROOT_CERT_LINK": "Download",
"REGISTRY_CERTIFICATE": "Registry certificate",
+ "NO_CHANGE": "Save abort because nothing changed",
"TOOLTIP": {
"SELF_REGISTRATION_ENABLE": "Enable sign up.",
"SELF_REGISTRATION_DISABLE": "Disable sign up.",
@@ -725,13 +797,14 @@
"LDAP_UID": "The attribute used in a search to match a user. It could be uid, cn, email, sAMAccountName or other attributes depending on your LDAP/AD.",
"LDAP_SCOPE": "The scope to search for users.",
"TOKEN_EXPIRATION": "The expiration time (in minutes) of a token created by the token service. Default is 30 minutes.",
- "ROBOT_TOKEN_EXPIRATION": "The expiration time ( in days) of the token of the robot account, Default is 30 days. Show the number of days converted from minutes and rounds down",
+ "ROBOT_TOKEN_EXPIRATION": "The expiration time (in days) of the token of the robot account, Default is 30 days. Show the number of days converted from minutes and rounds down",
"PRO_CREATION_RESTRICTION": "The flag to define what users have permission to create projects. By default, everyone can create a project. Set to 'Admin Only' so that only an administrator can create a project.",
"ROOT_CERT_DOWNLOAD": "Download the root certificate of registry.",
"SCANNING_POLICY": "Set image scanning policy based on different requirements. 'None': No active policy; 'Daily At': Triggering scanning at the specified time everyday.",
"VERIFY_CERT": "Verify Cert from LDAP Server",
"READONLY_TOOLTIP": "In read-only mode, you can not delete repositories or tags or push images. ",
"REPO_TOOLTIP": "Users can not do any operations to the images in this mode.",
+ "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed",
"HOURLY_CRON":"Run once an hour, beginning of hour. Equivalent to 0 0 * * * *.",
"WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Equivalent to 0 0 0 * * 0.",
"DAILY_CRON":"Run once a day, midnight. Equivalent to 0 0 0 * * *."
@@ -752,7 +825,7 @@
"LDAP_GROUP_GID": "LDAP Group GID",
"LDAP_GROUP_GID_INFO": "The attribute used in a search to match a user, it could be uid, cn or other attributes depending on your LDAP/AD. the group in Harbor is named with this attribute by default.",
"LDAP_GROUP_ADMIN_DN": "LDAP Group Admin DN",
- "LDAP_GROUP_ADMIN_DN_INFO": "Specify an LDAP group DN. all LDAP user in this group will have harbor admin privilege. Keep it blank if you do not want to.",
+ "LDAP_GROUP_ADMIN_DN_INFO": "Specify an LDAP group DN. All LDAP user in this group will have harbor admin privilege. Keep it blank if you do not want to.",
"LDAP_GROUP_MEMBERSHIP": "LDAP Group Membership",
"LDAP_GROUP_MEMBERSHIP_INFO": "The attribute indicates the membership of LDAP group, default value is memberof, in some LDAP server it could be \"ismemberof\"",
"GROUP_SCOPE": "LDAP Group Scope",
@@ -768,7 +841,7 @@
"HTTP_AUTH": {
"ENDPOINT": "Server Endpoint",
"TOKEN_REVIEW": "Token Review Endpoint",
- "ALWAYS_ONBOARD": "Always Onboard",
+ "SKIP_SEARCH": "Skip Search",
"VERIFY_CERT": "Verify Certificate"
},
"OIDC": {
@@ -801,7 +874,8 @@
"TEST_MAIL_FAILED": "Failed to verify mail server with error: {{param}}.",
"TEST_LDAP_FAILED": "Failed to verify LDAP server with error: {{param}}.",
"LEAVING_CONFIRMATION_TITLE": "Confirm to leave",
- "LEAVING_CONFIRMATION_SUMMARY": "Changes have not been saved yet. Do you want to leave current page?"
+ "LEAVING_CONFIRMATION_SUMMARY": "Changes have not been saved yet. Do you want to leave current page?",
+ "TEST_OIDC_SUCCESS": "Connection to OIDC server is verified."
},
"PAGE_NOT_FOUND": {
"MAIN_TITLE": "Page not found",
@@ -927,6 +1001,28 @@
"PLACEHOLDER": "We couldn't find any labels!",
"NAME_ALREADY_EXISTS": "Label name already exists."
},
+ "QUOTA": {
+ "PROJECT": "Project",
+ "OWNER": "Owner",
+ "COUNT": "Count",
+ "STORAGE": "Storage",
+ "EDIT": "Edit",
+ "DELETE": "Delete",
+ "OF": "of",
+ "PROJECT_QUOTA_DEFAULT_ARTIFACT": "Default artifact count per project",
+ "PROJECT_QUOTA_DEFAULT_DISK": "Default disk space per project",
+ "EDIT_PROJECT_QUOTAS": "Edit Project Quotas",
+ "EDIT_DEFAULT_PROJECT_QUOTAS": "Edit Default Project Quotas",
+ "SET_QUOTAS": "Set the project quotas for project '{{params}}'",
+ "SET_DEFAULT_QUOTAS": "Set the default project quotas when creating new projects",
+ "COUNT_QUOTA": "Artifact count",
+ "COUNT_DEFAULT_QUOTA": "Default artifact count",
+ "STORAGE_QUOTA": "Storage consumption",
+ "STORAGE_DEFAULT_QUOTA": "Default storage consumption",
+ "SAVE_SUCCESS": "Quota edit success",
+ "UNLIMITED": "unlimited",
+ "INVALID_INPUT": "invalid input"
+ },
"WEEKLY": {
"MONDAY": "Monday",
"TUESDAY": "Tuesday",
@@ -1013,6 +1109,106 @@
"MSG_SUCCESS": "Retag successfully",
"TIP_REPO": "A repository name is broken up into path components. A component of a repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it must match the regular expression [a-z0-9]+(?:[._-][a-z0-9]+)*.If a repository name has two or more path components, they must be separated by a forward slash ('/').The total length of a repository name, including slashes, must be less the 256 characters.",
"TIP_TAG": "A tag is a label applied to a Docker image in a repository. Tags are how various images in a repository are distinguished from each other.It need to match Regex: (`[\\w][\\w.-]{0,127}`)"
+ },
+ "CVE_WHITELIST": {
+ "DEPLOYMENT_SECURITY": "Deployment security",
+ "CVE_WHITELIST": "CVE whitelist",
+ "SYS_WHITELIST_EXPLAIN": "System whitelist allows vulnerabilities in this list to be ignored when calculating the vulnerability of an image.",
+ "ADD_SYS": "Add CVE IDs to the system whitelist",
+ "WARNING_SYS": "The system CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "WARNING_PRO": "The project CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "ADD": "ADD",
+ "ENTER": "Enter CVE ID(s)",
+ "HELP": "Separator: commas or newline characters",
+ "NONE": "None",
+ "EXPIRES_AT": "Expires at",
+ "NEVER_EXPIRES": "Never expires",
+ "PRO_WHITELIST_EXPLAIN": "Project whitelist allows vulnerabilities in this list to be ignored in this project when pushing and pulling images.",
+ "PRO_OR_SYS": "You can either use the default whitelist configured at the system level or click on 'Project whitelist' to create a new whitelist",
+ "MERGE_INTO": "Add individual CVE IDs before clicking 'ADD SYSTEM' to add system whitelist as well.",
+ "SYS_WHITELIST": "System whitelist",
+ "PRO_WHITELIST": "Project whitelist",
+ "ADD_SYSTEM": "ADD SYSTEM"
+ },
+ "TAG_RETENTION": {
+ "TAG_RETENTION": "Tag Retention",
+ "RETENTION_RULES": "Retention rules",
+ "RULE_NAME_1": " the images from the last {{number}} days",
+ "RULE_NAME_2": " the most recent active {{number}} images",
+ "RULE_NAME_3": " the most recently pushed {{number}} images",
+ "RULE_NAME_4": " the most recently pulled {{number}} images",
+ "RULE_NAME_5": " always",
+ "ADD_RULE": "ADD RULE",
+ "ADD_RULE_HELP_1": "Click the ADD RULE button to add a rule.",
+ "ADD_RULE_HELP_2": "Tag retention polices run once a day.",
+ "RETENTION_RUNS": "Retention runs",
+ "RUN_NOW": "RUN NOW",
+ "WHAT_IF_RUN": "DRY RUN",
+ "ABORT": "ABORT",
+ "SERIAL": "ID",
+ "STATUS": "Status",
+ "DRY_RUN": "Dry Run",
+ "START_TIME": "Start Time",
+ "DURATION": "Duration",
+ "DETAILS": "Details",
+ "REPOSITORY": "Repository",
+ "EDIT": "Edit",
+ "DISABLE": "Disable",
+ "ENABLE": "Enable",
+ "DELETE": "Delete",
+ "ADD_TITLE": "Add Tag Retention Rule",
+ "ADD_SUBTITLE": "Specify a tag retention rule for this project. All tag retention rules are independently calculated and each rule can be applied to a selected list of repositories.",
+ "BY_WHAT": "By image count or number of days",
+ "RULE_TEMPLATE_1": " the images from the last # days",
+ "RULE_TEMPLATE_2": " the most recent active # images",
+ "RULE_TEMPLATE_3": " the most recently pushed # images",
+ "RULE_TEMPLATE_4": " the most recently pulled # images",
+ "RULE_TEMPLATE_5": " always",
+ "ACTION_RETAIN": " retain",
+ "UNIT_DAY": "DAYS",
+ "UNIT_COUNT": "COUNT",
+ "NUMBER": "NUMBER",
+ "IN_REPOSITORIES": "For the repositories",
+ "REP_SEPARATOR": "Enter multiple comma separated repos,repo*,or **",
+ "TAGS": "Tags",
+ "MATCHES_TAGS": "Matches tags",
+ "MATCHES_EXCEPT_TAGS": "Matches except tags",
+ "TAG_SEPARATOR": "Enter multiple comma separated tags,tag*,**,or regex",
+ "LABELS": "Labels",
+ "MATCHES_LABELS": "Matches Labels",
+ "MATCHES_EXCEPT_LABELS": "Matches except Labels",
+ "REP_LABELS": "Enter multiple comma separated labels",
+ "RETENTION_RUN": "Retention Run",
+ "RETENTION_RUN_EXPLAIN": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted. Press CANCEL and use a DRY RUN to simulate the effect of this policy. Otherwise press RUN to proceed.",
+ "RETENTION_RUN_ABORTED": "Retention Run Aborted",
+ "RETENTION_RUN_ABORTED_EXPLAIN": "This retention run has been aborted. Images already deleted are irreversible. You can initiate another run to continue to delete images. In order to simulate a run, you can use the “DRY RUN”.",
+ "LOADING": "Loading...",
+ "NO_EXECUTION": "We couldn't find any executions!",
+ "NO_HISTORY": "We couldn't find any histories!",
+ "DELETION": "Deletions",
+ "EDIT_TITLE": "Edit Tag Retention Rule",
+ "LOG": "Log",
+ "EXCLUDES": "Excludes",
+ "MATCHES": "Matches",
+ "REPO": " repositories",
+ "EXC": " excluding ",
+ "MAT": " matching ",
+ "AND": " and",
+ "WITH": " with ",
+ "WITHOUT": " without ",
+ "LOWER_LABELS": " labels",
+ "WITH_CONDITION": " with",
+ "LOWER_TAGS": " tags",
+ "TRIGGER": "Schedule",
+ "RETAINED": "Retained",
+ "TOTAL": "Total",
+ "NONE": " none",
+ "RULE_NAME_6": " the images pulled within the last {{number}} days",
+ "RULE_NAME_7": " the images pushed within the last {{number}} days",
+ "RULE_TEMPLATE_6": " the images pulled within the last # days",
+ "RULE_TEMPLATE_7": " the images pushed within the last # days",
+ "SCHEDULE": "Schedule",
+ "SCHEDULE_WARNING": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted."
}
}
diff --git a/src/portal/src/i18n/lang/es-es-lang.json b/src/portal/src/i18n/lang/es-es-lang.json
index b99e31c3b..78be3a7cb 100644
--- a/src/portal/src/i18n/lang/es-es-lang.json
+++ b/src/portal/src/i18n/lang/es-es-lang.json
@@ -31,6 +31,7 @@
"TEST_MAIL": "COMPROBAR SERVIDOR DE CORREO",
"CLOSE": "CERRAR",
"TEST_LDAP": "COMPROBAR SERVIDOR LDAP",
+ "TEST_OIDC": "TEST OIDC SERVER",
"MORE_INFO": "Más información...",
"YES": "SI",
"NO": "NO",
@@ -42,7 +43,12 @@
"ACTIONS": "Actions",
"BROWSE": "Browse",
"UPLOAD": "Upload",
- "NO_FILE": "No file selected"
+ "NO_FILE": "No file selected",
+ "ADD": "ADD",
+ "RUN": "RUN",
+ "CONTINUE": "CONTINUE",
+ "ENABLE": "ENABLE",
+ "DISABLE": "DISABLE"
},
"BATCH": {
"DELETED_SUCCESS": "Deleted successfully",
@@ -58,6 +64,7 @@
"TOOLTIP": {
"NAME_FILTER": "Filter the name of the resource. Leave empty or use '**' to match all. 'library/**' only matches resources under 'library'. For more patterns, please refer to the user guide.",
"TAG_FILTER": "Filter the tag/version part of the resources. Leave empty or use '**' to match all. '1.0*' only matches the tags that starts with '1.0'. For more patterns, please refer to the user guide.",
+ "LABEL_FILTER": "Filter the resources according to labels.",
"RESOURCE_FILTER": "Filter the type of resources.",
"PUSH_BASED": "Push the resources from the local Harbor to the remote registry.",
"PULL_BASED": "Pull the resources from the remote registry to the local Harbor.",
@@ -217,9 +224,15 @@
"TOGGLED_SUCCESS": "Proyecto alternado satisfactoriamente.",
"FAILED_TO_DELETE_PROJECT": "Project contains repositories or replication rules or helm-charts cannot be deleted.",
"INLINE_HELP_PUBLIC": "Cuando un proyecto se marca como público, todo el mundo tiene permisos de lectura sobre los repositorio de dicho proyecto, y no hace falta hacer \"docker login\" antes de subir imágenes a ellos.",
- "OF": "of"
+ "OF": "of",
+ "COUNT_QUOTA": "Count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "COUNT_QUOTA_TIP": "Please enter an integer between '1' & '100,000,000', '-1' for unlimited",
+ "STORAGE_QUOTA_TIP": "The upper limit of Storage Quota only takes integer values, capped at '1024TB'. Enter '-1' for unlimited quota",
+ "QUOTA_UNLIMIT_TIP": "For unlimited quota, please enter '-1'."
},
"PROJECT_DETAIL": {
+ "SUMMARY": "Summary",
"REPOSITORIES": "Repositorios",
"REPLICATION": "Replicación",
"USERS": "Miembros",
@@ -228,7 +241,8 @@
"PROJECTS": "Proyectos",
"CONFIG": "Configuración",
"HELMCHART": "Helm Charts",
- "ROBOT_ACCOUNTS": "Robot Accounts"
+ "ROBOT_ACCOUNTS": "Robot Accounts",
+ "WEBHOOKS": "Webhooks"
},
"PROJECT_CONFIG": {
"REGISTRY": "Registro de proyectos",
@@ -311,10 +325,10 @@
"ENABLE_ACCOUNT": "Enable Account",
"DELETE": "Delete",
"CREAT_ROBOT_ACCOUNT": "Creat Robot Account",
- "PULL_PERMISSION": "Image pull",
- "PULL_PUSH_PERMISSION": "Image pull / push",
- "PUSH_CHART_PERMISSION": "Helm chart push",
- "PULL_CHART_PERMISSION": "Helm chart pull",
+ "PERMISSIONS_IMAGE": "Image",
+ "PERMISSIONS_HELMCHART": "Helm Chart",
+ "PUSH": "Push",
+ "PULL": "Pull",
"FILTER_PLACEHOLDER": "Filter Robot Accounts",
"ROBOT_NAME": "Cannot contain special characters(~#$%) and maximum length should be 255 characters.",
"ACCOUNT_EXISTING": "Robot Account is already exists.",
@@ -322,12 +336,43 @@
"CREATED_SUCCESS": "Created '{{param}}' successfully.",
"COPY_SUCCESS": "Copy token successfully of '{{param}}'",
"DELETION_TITLE": "Confirm removal of robot accounts",
- "DELETION_SUMMARY": "Do you want to delete robot accounts {{param}}?"
+ "DELETION_SUMMARY": "Do you want to delete robot accounts {{param}}?",
+ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.",
+ "EXPORT_TO_FILE" : "export to file"
+ },
+ "WEBHOOK": {
+ "EDIT_BUTTON": "EDIT",
+ "ENABLED_BUTTON": "ENABLE",
+ "DISABLED_BUTTON": "DISABLE",
+ "TYPE": "Webhook",
+ "STATUS": "Status",
+ "CREATED": "Created",
+ "ENABLED": "Enabled",
+ "DISABLED": "Disabled",
+ "OF": "of",
+ "ITEMS": "items",
+ "LAST_TRIGGERED": "Last Triggered",
+ "EDIT_WEBHOOK": "Webhook Endpoint",
+ "CREATE_WEBHOOK": "Getting started with webhooks",
+ "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications",
+ "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.",
+ "ENDPOINT_URL": "Endpoint URL",
+ "URL_IS_REQUIRED": "Endpoint URL is required.",
+ "AUTH_HEADER": "Auth Header",
+ "VERIFY_REMOTE_CERT": "Verify Remote Certificate",
+ "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT",
+ "CANCEL_BUTTON": "CANCEL",
+ "SAVE_BUTTON": "SAVE",
+ "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks",
+ "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ",
+ "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks",
+ "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project "
},
"GROUP": {
"GROUP": "Group",
"GROUPS": "Groups",
"IMPORT_LDAP_GROUP": "Import LDAP Group",
+ "IMPORT_HTTP_GROUP": "New HTTP Group",
"ADD": "Add",
"EDIT": "Edit",
"DELETE": "Delete",
@@ -339,8 +384,17 @@
"ADD_GROUP_SUCCESS": "Add group success",
"EDIT_GROUP_SUCCESS": "Edit group success",
"LDAP_TYPE": "LDAP",
+ "HTTP_TYPE": "HTTP",
"OF": "of",
- "ITEMS": "items"
+ "ITEMS": "items",
+ "NEW_MEMBER": "New Group Member",
+ "NEW_USER_INFO": "Add a group to be a member of this project with specified role",
+ "ROLE": "Role",
+ "SYS_ADMIN": "System Admin",
+ "PROJECT_ADMIN": "Project Admin",
+ "PROJECT_MASTER": "Master",
+ "DEVELOPER": "Developer",
+ "GUEST": "Guest"
},
"AUDIT_LOG": {
"USERNAME": "Nombre de usuario",
@@ -554,6 +608,8 @@
"TAGS_COUNT": "Etiquetas",
"PULL_COUNT": "Pulls",
"PULL_COMMAND": "Comando Pull",
+ "PULL_TIME": "Pull Time",
+ "PUSH_TIME": "Push Time",
"MY_REPOSITORY": "Mi Repositorio",
"PUBLIC_REPOSITORY": "Repositorio Público",
"DELETION_TITLE_REPO": "Confirmar Eliminación de Repositorio",
@@ -659,6 +715,19 @@
"ADD_LABEL_TO_CHART_VERSION": "Add labels to this chart version",
"STATUS": "Status"
},
+ "SUMMARY": {
+ "QUOTAS": "quotas",
+ "PROJECT_REPOSITORY": "Project repositories",
+ "PROJECT_HELM_CHART": "Project Helm Chart",
+ "PROJECT_MEMBER": "Project members",
+ "PROJECT_QUOTAS": "Project quotas",
+ "ARTIFACT_COUNT": "Artifact count",
+ "STORAGE_CONSUMPTION": "Storage consumption",
+ "ADMIN": "Admin(s)",
+ "MASTER": "Master(s)",
+ "DEVELOPER": "Developer(s)",
+ "GUEST": "Guest(s)"
+ },
"ALERT": {
"FORM_CHANGE_CONFIRMATION": "Algunos cambios no se han guardado aún. ¿Quiere cancelar?"
},
@@ -683,6 +752,7 @@
"REPOSITORY": "Repository",
"REPO_READ_ONLY": "Repository Read Only",
"SYSTEM": "Opciones del Sistema",
+ "PROJECT_QUOTAS": "Project Quotas",
"VULNERABILITY": "Vulnerability",
"GC": "Garbage Collection",
"CONFIRM_TITLE": "Confirma cancelación",
@@ -715,6 +785,7 @@
"ROOT_CERT": "Registro Certificado Raíz",
"ROOT_CERT_LINK": "Descargar",
"REGISTRY_CERTIFICATE": "Certificado de registro",
+ "NO_CHANGE": "Save abort because nothing changed",
"TOOLTIP": {
"SELF_REGISTRATION_ENABLE": "Activar registro.",
"SELF_REGISTRATION_DISABLE": "Disable sign up.",
@@ -732,6 +803,7 @@
"VERIFY_CERT": "Verify Cert from LDAP Server",
"READONLY_TOOLTIP": "In read-only mode, you can not delete repositories or tags or push images. ",
"GC_POLICY": "",
+ "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed",
"HOURLY_CRON":"Run once an hour, beginning of hour. Equivalente a 0 0 * * * *.",
"WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Equivalente a 0 0 0 * * 0.",
"DAILY_CRON":"Run once a day, midnight. Equivalente a 0 0 0 * * *."
@@ -768,7 +840,7 @@
"HTTP_AUTH": {
"ENDPOINT": "Server Endpoint",
"TOKEN_REVIEW": "Review Endpoint De Token",
- "ALWAYS_ONBOARD": "Always Onboard",
+ "SKIP_SEARCH": "Skip Search",
"VERIFY_CERT": "Authentication Verify Cert"
},
"OIDC": {
@@ -801,7 +873,8 @@
"TEST_MAIL_FAILED": "Fallo al verificar el servidor de correo con el error: {{param}}.",
"TEST_LDAP_FAILED": "Fallo al verificar el servidor LDAP con el error: {{param}}.",
"LEAVING_CONFIRMATION_TITLE": "Confirme la salida",
- "LEAVING_CONFIRMATION_SUMMARY": "Los cambios no han sido guardados aún. ¿Quiere abandonar la página actual?"
+ "LEAVING_CONFIRMATION_SUMMARY": "Los cambios no han sido guardados aún. ¿Quiere abandonar la página actual?",
+ "TEST_OIDC_SUCCESS": "Connection to OIDC server is verified."
},
"PAGE_NOT_FOUND": {
"MAIN_TITLE": "Página no encontrada",
@@ -928,6 +1001,28 @@
"PLACEHOLDER": "We couldn't find any labels!",
"NAME_ALREADY_EXISTS": "Label name already exists."
},
+ "QUOTA": {
+ "PROJECT": "Project",
+ "OWNER": "Owner",
+ "COUNT": "Count",
+ "STORAGE": "Storage",
+ "EDIT": "Edit",
+ "DELETE": "Delete",
+ "OF": "of",
+ "PROJECT_QUOTA_DEFAULT_ARTIFACT": "Default artifact count per project",
+ "PROJECT_QUOTA_DEFAULT_DISK": "Default disk space per project",
+ "EDIT_PROJECT_QUOTAS": "Edit Project Quotas",
+ "EDIT_DEFAULT_PROJECT_QUOTAS": "Edit Default Project Quotas",
+ "SET_QUOTAS": "Set the project quotas for project '{{params}}'",
+ "SET_DEFAULT_QUOTAS": "Set the default project quotas when creating new projects",
+ "COUNT_QUOTA": "Count quota",
+ "COUNT_DEFAULT_QUOTA": "Default count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "STORAGE_DEFAULT_QUOTA": "Default storage quota",
+ "SAVE_SUCCESS": "Quota edit success",
+ "UNLIMITED": "unlimited",
+ "INVALID_INPUT": "invalid input"
+ },
"WEEKLY": {
"MONDAY": "Monday",
"TUESDAY": "Tuesday",
@@ -1011,6 +1106,106 @@
"MSG_SUCCESS": "Retag successfully",
"TIP_REPO": "A repository name is broken up into path components. A component of a repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it must match the regular expression [a-z0-9]+(?:[._-][a-z0-9]+)*.If a repository name has two or more path components, they must be separated by a forward slash ('/').The total length of a repository name, including slashes, must be less the 256 characters.",
"TIP_TAG": "A tag is a label applied to a Docker image in a repository. Tags are how various images in a repository are distinguished from each other.It need to match Regex: (`[\\w][\\w.-]{0,127}`)"
+ },
+ "CVE_WHITELIST": {
+ "DEPLOYMENT_SECURITY": "Deployment security",
+ "CVE_WHITELIST": "CVE whitelist",
+ "SYS_WHITELIST_EXPLAIN": "System whitelist allows vulnerabilities in this list to be ignored when calculating the vulnerability of an image.",
+ "ADD_SYS": "Add CVE IDs to the system whitelist",
+ "WARNING_SYS": "The system CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "WARNING_PRO": "The project CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "ADD": "ADD",
+ "ENTER": "Enter CVE ID(s)",
+ "HELP": "Separator: commas or newline characters",
+ "NONE": "None",
+ "EXPIRES_AT": "Expires at",
+ "NEVER_EXPIRES": "Never expires",
+ "PRO_WHITELIST_EXPLAIN": "Project whitelist allows vulnerabilities in this list to be ignored in this project when pushing and pulling images.",
+ "PRO_OR_SYS": "You can either use the default whitelist configured at the system level or click on 'Project whitelist' to create a new whitelist",
+ "MERGE_INTO": "Add individual CVE IDs before clicking 'ADD SYSTEM' to add system whitelist as well.",
+ "SYS_WHITELIST": "System whitelist",
+ "PRO_WHITELIST": "Project whitelist",
+ "ADD_SYSTEM": "ADD SYSTEM"
+ },
+ "TAG_RETENTION": {
+ "TAG_RETENTION": "Tag Retention",
+ "RETENTION_RULES": "Retention rules",
+ "RULE_NAME_1": " the images from the last {{number}} days",
+ "RULE_NAME_2": " the most recent active {{number}} images",
+ "RULE_NAME_3": " the most recently pushed {{number}} images",
+ "RULE_NAME_4": " the most recently pulled {{number}} images",
+ "RULE_NAME_5": " always",
+ "ADD_RULE": "ADD RULE",
+ "ADD_RULE_HELP_1": "Click the ADD RULE button to add a rule.",
+ "ADD_RULE_HELP_2": "Tag retention polices run once a day.",
+ "RETENTION_RUNS": "Retention runs",
+ "RUN_NOW": "RUN NOW",
+ "WHAT_IF_RUN": "DRY RUN",
+ "ABORT": "ABORT",
+ "SERIAL": "ID",
+ "STATUS": "Status",
+ "DRY_RUN": "Dry Run",
+ "START_TIME": "Start Time",
+ "DURATION": "Duration",
+ "DETAILS": "Details",
+ "REPOSITORY": "Repository",
+ "EDIT": "Edit",
+ "DISABLE": "Disable",
+ "ENABLE": "Enable",
+ "DELETE": "Delete",
+ "ADD_TITLE": "Add Tag Retention Rule",
+ "ADD_SUBTITLE": "Specify a tag retention rule for this project. All tag retention rules are independently calculated and each rule can be applied to a selected list of repositories.",
+ "BY_WHAT": "By image count or number of days",
+ "RULE_TEMPLATE_1": "the images from the last # days",
+ "RULE_TEMPLATE_2": "the most recent active # images",
+ "RULE_TEMPLATE_3": "the most recently pushed # images",
+ "RULE_TEMPLATE_4": "the most recently pulled # images",
+ "RULE_TEMPLATE_5": "always",
+ "ACTION_RETAIN": " retain",
+ "UNIT_DAY": "DAYS",
+ "UNIT_COUNT": "COUNT",
+ "NUMBER": "NUMBER",
+ "IN_REPOSITORIES": "For the repositories",
+ "REP_SEPARATOR": "Enter multiple comma separated repos,repo*,or **",
+ "TAGS": "Tags",
+ "MATCHES_TAGS": "Matches tags",
+ "MATCHES_EXCEPT_TAGS": "Matches except tags",
+ "TAG_SEPARATOR": "Enter multiple comma separated tags,tag*,**,or regex",
+ "LABELS": "Labels",
+ "MATCHES_LABELS": "Matches Labels",
+ "MATCHES_EXCEPT_LABELS": "Matches except Labels",
+ "REP_LABELS": "Enter multiple comma separated labels",
+ "RETENTION_RUN": "Retention Run",
+ "RETENTION_RUN_EXPLAIN": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted. Press CANCEL and use a DRY RUN to simulate the effect of this policy. Otherwise press RUN to proceed.",
+ "RETENTION_RUN_ABORTED": "Retention Run Aborted",
+ "RETENTION_RUN_ABORTED_EXPLAIN": "This retention run has been aborted. Images already deleted are irreversible. You can initiate another run to continue to delete images. In order to simulate a run, you can use the “DRY RUN”.",
+ "LOADING": "Loading...",
+ "NO_EXECUTION": "We couldn't find any executions!",
+ "NO_HISTORY": "We couldn't find any histories!",
+ "DELETION": "Deletions",
+ "EDIT_TITLE": "Edit Tag Retention Rule",
+ "LOG": "Log",
+ "EXCLUDES": "Excludes",
+ "MATCHES": "Matches",
+ "REPO": " repositories",
+ "EXC": " excluding ",
+ "MAT": " matching ",
+ "AND": " and",
+ "WITH": " with ",
+ "WITHOUT": " without ",
+ "LOWER_LABELS": " labels",
+ "WITH_CONDITION": " with",
+ "LOWER_TAGS": " tags",
+ "TRIGGER": "Schedule",
+ "RETAINED": "Retained",
+ "TOTAL": "Total",
+ "NONE": "none",
+ "RULE_NAME_6": " the images pulled within the last {{number}} days",
+ "RULE_NAME_7": " the images pushed within the last {{number}} days",
+ "RULE_TEMPLATE_6": " the images pulled within the last # days",
+ "RULE_TEMPLATE_7": " the images pushed within the last # days",
+ "SCHEDULE": "Schedule",
+ "SCHEDULE_WARNING": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted."
}
}
diff --git a/src/portal/src/i18n/lang/fr-fr-lang.json b/src/portal/src/i18n/lang/fr-fr-lang.json
index ae649aebb..33a1ffb99 100644
--- a/src/portal/src/i18n/lang/fr-fr-lang.json
+++ b/src/portal/src/i18n/lang/fr-fr-lang.json
@@ -31,6 +31,7 @@
"TEST_MAIL": "TESTER LE SERVEUR MAIL",
"CLOSE": "FERMER",
"TEST_LDAP": "TESTER LE SERVEUR LDAP",
+ "TEST_OIDC": "TEST OIDC SERVER",
"MORE_INFO": "Plus d'informations...",
"YES": "OUI",
"NO": "NON",
@@ -39,7 +40,12 @@
"ACTIONS": "Actions",
"BROWSE": "Browse",
"UPLOAD": "Upload",
- "NO_FILE": "No file selected"
+ "NO_FILE": "No file selected",
+ "ADD": "ADD",
+ "RUN": "RUN",
+ "CONTINUE": "CONTINUE",
+ "ENABLE": "ENABLE",
+ "DISABLE": "DISABLE"
},
"BATCH": {
"DELETED_SUCCESS": "Deleted successfully",
@@ -55,6 +61,7 @@
"TOOLTIP": {
"NAME_FILTER": "Filter the name of the resource. Leave empty or use '**' to match all. 'library/**' only matches resources under 'library'. For more patterns, please refer to the user guide.",
"TAG_FILTER": "Filter the tag/version part of the resources. Leave empty or use '**' to match all. '1.0*' only matches the tags that starts with '1.0'. For more patterns, please refer to the user guide.",
+ "LABEL_FILTER": "Filter the resources according to labels.",
"RESOURCE_FILTER": "Filter the type of resources.",
"PUSH_BASED": "Push the resources from the local Harbor to the remote registry.",
"PULL_BASED": "Pull the resources from the remote registry to the local Harbor.",
@@ -210,9 +217,15 @@
"TOGGLED_SUCCESS": "Projet basculé avec succès.",
"FAILED_TO_DELETE_PROJECT": "Project contains repositories or replication rules or helm-charts cannot be deleted.",
"INLINE_HELP_PUBLIC": "Lorsqu'un projet est mis en public, n'importe qui a l'autorisation de lire les dépôts sous ce projet, et l'utilisateur n' a pas besoin d'exécuter \"docker login\" avant de prendre des images de ce projet.",
- "OF": "de"
+ "OF": "de",
+ "COUNT_QUOTA": "Count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "COUNT_QUOTA_TIP": "Please enter an integer between '1' & '100,000,000', '-1' for unlimited",
+ "STORAGE_QUOTA_TIP": "The upper limit of Storage Quota only takes integer values, capped at '1024TB'. Enter '-1' for unlimited quota",
+ "QUOTA_UNLIMIT_TIP": "For unlimited quota, please enter '-1'."
},
"PROJECT_DETAIL": {
+ "SUMMARY": "Summary",
"REPOSITORIES": "Dépôts",
"REPLICATION": "Réplication",
"USERS": "Membres",
@@ -221,7 +234,8 @@
"PROJECTS": "Projets",
"CONFIG": "Configuration",
"HELMCHART": "Helm Charts",
- "ROBOT_ACCOUNTS": "Robot Accounts"
+ "ROBOT_ACCOUNTS": "Robot Accounts",
+ "WEBHOOKS": "Webhooks"
},
"PROJECT_CONFIG": {
"REGISTRY": "Dépôt du Projet",
@@ -302,10 +316,11 @@
"ENABLE_ACCOUNT": "permettre à compte ",
"DELETE": "Supprimer",
"CREAT_ROBOT_ACCOUNT": "créat robot compte ",
- "PULL_PERMISSION": "Image pull",
- "PULL_PUSH_PERMISSION": "Image pull / push",
- "PUSH_CHART_PERMISSION": "Helm chart push",
- "PULL_CHART_PERMISSION": "Helm chart pull",
+ "PERMISSIONS_IMAGE": "Image",
+ "PERMISSIONS_HELMCHART": "Helm Chart",
+ "PUSH": "Push",
+ "PULL": "Pull",
+
"FILTER_PLACEHOLDER": "Filter Robot Accounts",
"ROBOT_NAME": "ne peut pas contenir de caractères spéciaux(~#$%) et la longueur maximale devrait être de 255 caractères.",
"ACCOUNT_EXISTING": "le robot est existe déjà.",
@@ -313,12 +328,43 @@
"CREATED_SUCCESS": "Created '{{param}}' successfully.",
"COPY_SUCCESS": "Copy token successfully of '{{param}}'",
"DELETION_TITLE": "confirmer l'enlèvement des comptes du robot ",
- "DELETION_SUMMARY": "Voulez-vous supprimer la règle {{param}}?"
+ "DELETION_SUMMARY": "Voulez-vous supprimer la règle {{param}}?",
+ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.",
+ "EXPORT_TO_FILE" : "export to file"
+ },
+ "WEBHOOK": {
+ "EDIT_BUTTON": "EDIT",
+ "ENABLED_BUTTON": "ENABLE",
+ "DISABLED_BUTTON": "DISABLE",
+ "TYPE": "Webhook",
+ "STATUS": "Status",
+ "CREATED": "Created",
+ "ENABLED": "Enabled",
+ "DISABLED": "Disabled",
+ "OF": "of",
+ "ITEMS": "items",
+ "LAST_TRIGGERED": "Last Triggered",
+ "EDIT_WEBHOOK": "Webhook Endpoint",
+ "CREATE_WEBHOOK": "Getting started with webhooks",
+ "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications",
+ "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.",
+ "ENDPOINT_URL": "Endpoint URL",
+ "URL_IS_REQUIRED": "Endpoint URL is required.",
+ "AUTH_HEADER": "Auth Header",
+ "VERIFY_REMOTE_CERT": "Verify Remote Certificate",
+ "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT",
+ "CANCEL_BUTTON": "CANCEL",
+ "SAVE_BUTTON": "SAVE",
+ "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks",
+ "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ",
+ "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks",
+ "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project "
},
"GROUP": {
"Group": "Group",
"GROUPS": "Groups",
"IMPORT_LDAP_GROUP": "Import LDAP Group",
+ "IMPORT_HTTP_GROUP": "New HTTP Group",
"ADD": "Add",
"EDIT": "Edit",
"DELETE": "Delete",
@@ -331,8 +377,17 @@
"ADD_GROUP_SUCCESS": "Add group success",
"EDIT_GROUP_SUCCESS": "Edit group success",
"LDAP_TYPE": "LDAP",
+ "HTTP_TYPE": "HTTP",
"OF": "of",
- "ITEMS": "items"
+ "ITEMS": "items",
+ "NEW_MEMBER": "New Group Member",
+ "NEW_USER_INFO": "Add a group to be a member of this project with specified role",
+ "ROLE": "Role",
+ "SYS_ADMIN": "System Admin",
+ "PROJECT_ADMIN": "Project Admin",
+ "PROJECT_MASTER": "Master",
+ "DEVELOPER": "Developer",
+ "GUEST": "Guest"
},
"AUDIT_LOG": {
"USERNAME": "Nom d'utilisateur",
@@ -542,6 +597,8 @@
"TAGS_COUNT": "Tags",
"PULL_COUNT": "Pulls",
"PULL_COMMAND": "Commande de Pull",
+ "PULL_TIME": "Pull Time",
+ "PUSH_TIME": "Push Time",
"MY_REPOSITORY": "Mon Dépôt",
"PUBLIC_REPOSITORY": "Dépôt Public",
"DELETION_TITLE_REPO": "Confirmer la Suppresion du Dépôt",
@@ -644,6 +701,19 @@
"ADD_LABEL_TO_CHART_VERSION": "Add labels to this chart version",
"STATUS": "Status"
},
+ "SUMMARY": {
+ "QUOTAS": "quotas",
+ "PROJECT_REPOSITORY": "Project repositories",
+ "PROJECT_HELM_CHART": "Project Helm Chart",
+ "PROJECT_MEMBER": "Project members",
+ "PROJECT_QUOTAS": "Project quotas",
+ "ARTIFACT_COUNT": "Artifact count",
+ "STORAGE_CONSUMPTION": "Storage consumption",
+ "ADMIN": "Admin(s)",
+ "MASTER": "Master(s)",
+ "DEVELOPER": "Developer(s)",
+ "GUEST": "Guest(s)"
+ },
"ALERT": {
"FORM_CHANGE_CONFIRMATION": "Certaines modifications ne sont pas encore enregistrées. Voulez-vous annuler ?"
},
@@ -666,6 +736,7 @@
"EMAIL": "Email",
"LABEL": "Labels",
"SYSTEM": "Réglages Système",
+ "PROJECT_QUOTAS": "Project Quotas",
"CONFIRM_TITLE": "Confirmer pour annuler",
"CONFIRM_SUMMARY": "Certaines modifications n'ont pas été sauvegardées. Voulez-vous les défaire ?",
"SAVE_SUCCESS": "La configuration a été sauvegardée avec succès.",
@@ -696,6 +767,7 @@
"ROOT_CERT_LINK": "Télécharger",
"GC": "Garbage Collection",
"REGISTRY_CERTIFICATE": "certificat d'enregistrement",
+ "NO_CHANGE": "Save abort because nothing changed",
"TOOLTIP": {
"SELF_REGISTRATION_ENABLE": "Activer l'inscription.",
"SELF_REGISTRATION_DISABLE": "Désactiver l'inscription.",
@@ -712,6 +784,7 @@
"SCANNING_POLICY": "Définissez la politique d'analyse des images en fonction des différentes exigences. 'Aucune' : pas de politique active; 'Tousles jours à' : déclenchement du balayage à l'heure spécifiée tous les jours.",
"READONLY_TOOLTIP": "In read-only mode, you can not delete repositories or tags or push images. ",
"GC_POLICY": "",
+ "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed",
"HOURLY_CRON":"Run once an hour, beginning of hour. Équivalent à 0 0 * * * *.",
"WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Équivalent à 0 0 0 * * 0.",
"DAILY_CRON":"Run once a day, midnight. Équivalent à 0 0 0 * * *."
@@ -741,7 +814,7 @@
"HTTP_AUTH": {
"ENDPOINT": "serveur paramètre",
"TOKEN_REVIEW": "examen symbolique paramètre",
- "ALWAYS_ONBOARD": "always onboard",
+ "SKIP_SEARCH": "Skip Search",
"VERIFY_CERT": "authentification vérifier cert"
},
"OIDC": {
@@ -774,7 +847,8 @@
"TEST_MAIL_FAILED": "Echec de la vérification du serveur de mail avec erreurs : {{param}}.",
"TEST_LDAP_FAILED": "Echec de la vérification du serveur LDAP avec erreurs : {{param}}.",
"LEAVING_CONFIRMATION_TITLE": "Confirmer pour quitter",
- "LEAVING_CONFIRMATION_SUMMARY": "Les modifications n'ont pas encore été enregistrées. Voulez-vous quitter la page actuelle ?"
+ "LEAVING_CONFIRMATION_SUMMARY": "Les modifications n'ont pas encore été enregistrées. Voulez-vous quitter la page actuelle ?",
+ "TEST_OIDC_SUCCESS": "Connection to OIDC server is verified."
},
"PAGE_NOT_FOUND": {
"MAIN_TITLE": "Page introuvable",
@@ -899,6 +973,28 @@
"PLACEHOLDER": "We couldn't find any labels!",
"NAME_ALREADY_EXISTS": "Label name already exists."
},
+ "QUOTA": {
+ "PROJECT": "Project",
+ "OWNER": "Owner",
+ "COUNT": "Count",
+ "STORAGE": "Storage",
+ "EDIT": "Edit",
+ "DELETE": "Delete",
+ "OF": "of",
+ "PROJECT_QUOTA_DEFAULT_ARTIFACT": "Default artifact count per project",
+ "PROJECT_QUOTA_DEFAULT_DISK": "Default disk space per project",
+ "EDIT_PROJECT_QUOTAS": "Edit Project Quotas",
+ "EDIT_DEFAULT_PROJECT_QUOTAS": "Edit Default Project Quotas",
+ "SET_QUOTAS": "Set the project quotas for project '{{params}}'",
+ "SET_DEFAULT_QUOTAS": "Set the default project quotas when creating new projects",
+ "COUNT_QUOTA": "Count quota",
+ "COUNT_DEFAULT_QUOTA": "Default count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "STORAGE_DEFAULT_QUOTA": "Default storage quota",
+ "SAVE_SUCCESS": "Quota edit success",
+ "UNLIMITED": "unlimited",
+ "INVALID_INPUT": "invalid input"
+ },
"WEEKLY": {
"MONDAY": "Monday",
"TUESDAY": "Tuesday",
@@ -982,6 +1078,106 @@
"MSG_SUCCESS": "Retag successfully",
"TIP_REPO": "A repository name is broken up into path components. A component of a repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it must match the regular expression [a-z0-9]+(?:[._-][a-z0-9]+)*.If a repository name has two or more path components, they must be separated by a forward slash ('/').The total length of a repository name, including slashes, must be less the 256 characters.",
"TIP_TAG": "A tag is a label applied to a Docker image in a repository. Tags are how various images in a repository are distinguished from each other.It need to match Regex: (`[\\w][\\w.-]{0,127}`)"
+ },
+ "CVE_WHITELIST": {
+ "DEPLOYMENT_SECURITY": "Deployment security",
+ "CVE_WHITELIST": "CVE whitelist",
+ "SYS_WHITELIST_EXPLAIN": "System whitelist allows vulnerabilities in this list to be ignored when calculating the vulnerability of an image.",
+ "ADD_SYS": "Add CVE IDs to the system whitelist",
+ "WARNING_SYS": "The system CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "WARNING_PRO": "The project CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "ADD": "ADD",
+ "ENTER": "Enter CVE ID(s)",
+ "HELP": "Separator: commas or newline characters",
+ "NONE": "None",
+ "EXPIRES_AT": "Expires at",
+ "NEVER_EXPIRES": "Never expires",
+ "PRO_WHITELIST_EXPLAIN": "Project whitelist allows vulnerabilities in this list to be ignored in this project when pushing and pulling images.",
+ "PRO_OR_SYS": "You can either use the default whitelist configured at the system level or click on 'Project whitelist' to create a new whitelist",
+ "MERGE_INTO": "Add individual CVE IDs before clicking 'ADD SYSTEM' to add system whitelist as well.",
+ "SYS_WHITELIST": "System whitelist",
+ "PRO_WHITELIST": "Project whitelist",
+ "ADD_SYSTEM": "ADD SYSTEM"
+ },
+ "TAG_RETENTION": {
+ "TAG_RETENTION": "Tag Retention",
+ "RETENTION_RULES": "Retention rules",
+ "RULE_NAME_1": " the images from the last {{number}} days",
+ "RULE_NAME_2": " the most recent active {{number}} images",
+ "RULE_NAME_3": " the most recently pushed {{number}} images",
+ "RULE_NAME_4": " the most recently pulled {{number}} images",
+ "RULE_NAME_5": " always",
+ "ADD_RULE": "ADD RULE",
+ "ADD_RULE_HELP_1": "Click the ADD RULE button to add a rule.",
+ "ADD_RULE_HELP_2": "Tag retention polices run once a day.",
+ "RETENTION_RUNS": "Retention runs",
+ "RUN_NOW": "RUN NOW",
+ "WHAT_IF_RUN": "DRY RUN",
+ "ABORT": "ABORT",
+ "SERIAL": "ID",
+ "STATUS": "Status",
+ "DRY_RUN": "Dry Run",
+ "START_TIME": "Start Time",
+ "DURATION": "Duration",
+ "DETAILS": "Details",
+ "REPOSITORY": "Repository",
+ "EDIT": "Edit",
+ "DISABLE": "Disable",
+ "ENABLE": "Enable",
+ "DELETE": "Delete",
+ "ADD_TITLE": "Add Tag Retention Rule",
+ "ADD_SUBTITLE": "Specify a tag retention rule for this project. All tag retention rules are independently calculated and each rule can be applied to a selected list of repositories.",
+ "BY_WHAT": "By image count or number of days",
+ "RULE_TEMPLATE_1": "the images from the last # days",
+ "RULE_TEMPLATE_2": "the most recent active # images",
+ "RULE_TEMPLATE_3": "the most recently pushed # images",
+ "RULE_TEMPLATE_4": "the most recently pulled # images",
+ "RULE_TEMPLATE_5": "always",
+ "ACTION_RETAIN": " retain",
+ "UNIT_DAY": "DAYS",
+ "UNIT_COUNT": "COUNT",
+ "NUMBER": "NUMBER",
+ "IN_REPOSITORIES": "For the repositories",
+ "REP_SEPARATOR": "Enter multiple comma separated repos,repo*,or **",
+ "TAGS": "Tags",
+ "MATCHES_TAGS": "Matches tags",
+ "MATCHES_EXCEPT_TAGS": "Matches except tags",
+ "TAG_SEPARATOR": "Enter multiple comma separated tags,tag*,**,or regex",
+ "LABELS": "Labels",
+ "MATCHES_LABELS": "Matches Labels",
+ "MATCHES_EXCEPT_LABELS": "Matches except Labels",
+ "REP_LABELS": "Enter multiple comma separated labels",
+ "RETENTION_RUN": "Retention Run",
+ "RETENTION_RUN_EXPLAIN": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted. Press CANCEL and use a DRY RUN to simulate the effect of this policy. Otherwise press RUN to proceed.",
+ "RETENTION_RUN_ABORTED": "Retention Run Aborted",
+ "RETENTION_RUN_ABORTED_EXPLAIN": "This retention run has been aborted. Images already deleted are irreversible. You can initiate another run to continue to delete images. In order to simulate a run, you can use the “DRY RUN”.",
+ "LOADING": "Loading...",
+ "NO_EXECUTION": "We couldn't find any executions!",
+ "NO_HISTORY": "We couldn't find any histories!",
+ "DELETION": "Deletions",
+ "EDIT_TITLE": "Edit Tag Retention Rule",
+ "LOG": "Log",
+ "EXCLUDES": "Excludes",
+ "MATCHES": "Matches",
+ "REPO": " repositories",
+ "EXC": " excluding ",
+ "MAT": " matching ",
+ "AND": " and",
+ "WITH": " with ",
+ "WITHOUT": " without ",
+ "LOWER_LABELS": " labels",
+ "WITH_CONDITION": " with",
+ "LOWER_TAGS": " tags",
+ "TRIGGER": "Schedule",
+ "RETAINED": "Retained",
+ "TOTAL": "Total",
+ "NONE": "none",
+ "RULE_NAME_6": " the images pulled within the last {{number}} days",
+ "RULE_NAME_7": " the images pushed within the last {{number}} days",
+ "RULE_TEMPLATE_6": " the images pulled within the last # days",
+ "RULE_TEMPLATE_7": " the images pushed within the last # days",
+ "SCHEDULE": "Schedule",
+ "SCHEDULE_WARNING": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted."
}
}
diff --git a/src/portal/src/i18n/lang/pt-br-lang.json b/src/portal/src/i18n/lang/pt-br-lang.json
index a829f8f18..37d3db8db 100644
--- a/src/portal/src/i18n/lang/pt-br-lang.json
+++ b/src/portal/src/i18n/lang/pt-br-lang.json
@@ -31,6 +31,7 @@
"TEST_MAIL": "TESTAR SERVIDOR DE EMAIL",
"CLOSE": "FECHAR",
"TEST_LDAP": "TESTAR SERVIDOR DE LDAP",
+ "TEST_OIDC": "TEST OIDC SERVER",
"MORE_INFO": "Mais informações...",
"YES": "SIM",
"NO": "NÃO",
@@ -42,7 +43,12 @@
"ACTIONS": "Ações",
"BROWSE": "Navegar",
"UPLOAD": "Upload",
- "NO_FILE": "Nenhum arquivo selecionado"
+ "NO_FILE": "Nenhum arquivo selecionado",
+ "ADD": "ADD",
+ "RUN": "RUN",
+ "CONTINUE": "CONTINUE",
+ "ENABLE": "ENABLE",
+ "DISABLE": "DISABLE"
},
"BATCH": {
"DELETED_SUCCESS": "Removido com sucesso",
@@ -58,6 +64,7 @@
"TOOLTIP": {
"NAME_FILTER": "Filter the name of the resource. Leave empty or use '**' to match all. 'library/**' only matches resources under 'library'. For more patterns, please refer to the user guide.",
"TAG_FILTER": "Filter the tag/version part of the resources. Leave empty or use '**' to match all. '1.0*' only matches the tags that starts with '1.0'. For more patterns, please refer to the user guide.",
+ "LABEL_FILTER": "Filter the resources according to labels.",
"RESOURCE_FILTER": "Filter the type of resources.",
"PUSH_BASED": "Push the resources from the local Harbor to the remote registry.",
"PULL_BASED": "Pull the resources from the remote registry to the local Harbor.",
@@ -214,9 +221,15 @@
"TOGGLED_SUCCESS": "Projeto alterado com sucesso.",
"FAILED_TO_DELETE_PROJECT": "Project contains repositories or replication rules or helm-charts cannot be deleted.",
"INLINE_HELP_PUBLIC": "Quando um projeto é marcado como público, qualquer um tem permissões de leitura aos repositórios desse projeto, e o usuário não precisa executar \"docker login\" antes de baixar imagens desse projeto.",
- "OF": "de"
+ "OF": "de",
+ "COUNT_QUOTA": "Count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "COUNT_QUOTA_TIP": "Please enter an integer between '1' & '100,000,000', '-1' for unlimited",
+ "STORAGE_QUOTA_TIP": "The upper limit of Storage Quota only takes integer values, capped at '1024TB'. Enter '-1' for unlimited quota",
+ "QUOTA_UNLIMIT_TIP": "For unlimited quota, please enter '-1'."
},
"PROJECT_DETAIL": {
+ "SUMMARY": "Summary",
"REPOSITORIES": "Repositórios",
"REPLICATION": "Replicação",
"USERS": "Membros",
@@ -225,7 +238,8 @@
"PROJECTS": "Projetos",
"CONFIG": "Configuração",
"HELMCHART": "Helm Charts",
- "ROBOT_ACCOUNTS": "Robot Accounts"
+ "ROBOT_ACCOUNTS": "Robot Accounts",
+ "WEBHOOKS": "Webhooks"
},
"PROJECT_CONFIG": {
"REGISTRY": "Registro do Projeto",
@@ -308,10 +322,10 @@
"ENABLE_ACCOUNT": "Ativar conta",
"DELETE": "Remover",
"CREAT_ROBOT_ACCOUNT": "CRIA robô conta",
- "PULL_PERMISSION": "Image pull",
- "PULL_PUSH_PERMISSION": "Image pull / push",
- "PUSH_CHART_PERMISSION": "Helm chart push",
- "PULL_CHART_PERMISSION": "Helm chart pull",
+ "PERMISSIONS_IMAGE": "Image",
+ "PERMISSIONS_HELMCHART": "Helm Chart",
+ "PUSH": "Push",
+ "PULL": "Pull",
"FILTER_PLACEHOLDER": "Filtro robot accounts",
"ROBOT_NAME": "Não Pode conter caracteres especiais(~#$%) e comprimento máximo deveria ser 255 caracteres.",
"ACCOUNT_EXISTING": "Robô conta já existe.",
@@ -319,12 +333,15 @@
"CREATED_SUCCESS": "Created '{{param}}' successfully.",
"COPY_SUCCESS": "Copy token successfully of '{{param}}'",
"DELETION_TITLE": "Confirmar a remoção do robô Contas",
- "DELETION_SUMMARY": "Você quer remover a regra {{param}}?"
+ "DELETION_SUMMARY": "Você quer remover a regra {{param}}?",
+ "PULL_IS_MUST" : "Pull permission is checked by default and can not be modified.",
+ "EXPORT_TO_FILE" : "export to file"
},
"GROUP": {
"GROUP": "Grupo",
"GROUPS": "Grupos",
"IMPORT_LDAP_GROUP": "Importar grupo do LDAP",
+ "IMPORT_HTTP_GROUP": "New HTTP Group",
"ADD": "Novo Grupo",
"EDIT": "Editar",
"DELETE": "Remover",
@@ -337,8 +354,45 @@
"ADD_GROUP_SUCCESS": "Grupo adicionado com sucesso",
"EDIT_GROUP_SUCCESS": "Grupo editado com sucesso",
"LDAP_TYPE": "LDAP",
+ "HTTP_TYPE": "HTTP",
"OF": "de",
- "ITEMS": "itens"
+ "ITEMS": "itens",
+ "NEW_MEMBER": "New Group Member",
+ "NEW_USER_INFO": "Add a group to be a member of this project with specified role",
+ "ROLE": "Role",
+ "SYS_ADMIN": "System Admin",
+ "PROJECT_ADMIN": "Project Admin",
+ "PROJECT_MASTER": "Master",
+ "DEVELOPER": "Developer",
+ "GUEST": "Guest"
+ },
+ "WEBHOOK": {
+ "EDIT_BUTTON": "EDIT",
+ "ENABLED_BUTTON": "ENABLE",
+ "DISABLED_BUTTON": "DISABLE",
+ "TYPE": "Webhook",
+ "STATUS": "Status",
+ "CREATED": "Created",
+ "ENABLED": "Enabled",
+ "DISABLED": "Disabled",
+ "OF": "of",
+ "ITEMS": "items",
+ "LAST_TRIGGERED": "Last Triggered",
+ "EDIT_WEBHOOK": "Webhook Endpoint",
+ "CREATE_WEBHOOK": "Getting started with webhooks",
+ "EDIT_WEBHOOK_DESC": "Specify the endpoint for receiving webhook notifications",
+ "CREATE_WEBHOOK_DESC": "To get started with webhooks, provide an endpoint and credentials to access the webhook server.",
+ "ENDPOINT_URL": "Endpoint URL",
+ "URL_IS_REQUIRED": "Endpoint URL is required.",
+ "AUTH_HEADER": "Auth Header",
+ "VERIFY_REMOTE_CERT": "Verify Remote Certificate",
+ "TEST_ENDPOINT_BUTTON": "TEST ENDPOINT",
+ "CANCEL_BUTTON": "CANCEL",
+ "SAVE_BUTTON": "SAVE",
+ "ENABLED_WEBHOOK_TITLE": "Enable Project Webhooks",
+ "ENABLED_WEBHOOK_SUMMARY": "Do you want to enable webhooks for project ",
+ "DISABLED_WEBHOOK_TITLE": "Disable Project Webhooks",
+ "DISABLED_WEBHOOK_SUMMARY": "Do you want to disable webhooks for project "
},
"AUDIT_LOG": {
"USERNAME": "Nome do usuário",
@@ -553,6 +607,8 @@
"TAGS_COUNT": "Tags",
"PULL_COUNT": "Pulls",
"PULL_COMMAND": "Comando de Pull",
+ "PULL_TIME": "Pull Time",
+ "PUSH_TIME": "Push Time",
"MY_REPOSITORY": "Meu Repositório",
"PUBLIC_REPOSITORY": "Repositório Público",
"DELETION_TITLE_REPO": "Confirmar remoção de repositório",
@@ -654,6 +710,19 @@
"ADD_LABEL_TO_CHART_VERSION": "Add labels to this chart version",
"STATUS": "Status"
},
+ "SUMMARY": {
+ "QUOTAS": "quotas",
+ "PROJECT_REPOSITORY": "Project repositories",
+ "PROJECT_HELM_CHART": "Project Helm Chart",
+ "PROJECT_MEMBER": "Project members",
+ "PROJECT_QUOTAS": "Project quotas",
+ "ARTIFACT_COUNT": "Artifact count",
+ "STORAGE_CONSUMPTION": "Storage consumption",
+ "ADMIN": "Admin(s)",
+ "MASTER": "Master(s)",
+ "DEVELOPER": "Developer(s)",
+ "GUEST": "Guest(s)"
+ },
"ALERT": {
"FORM_CHANGE_CONFIRMATION": "Algumas alterações ainda não foram salvas. Você deseja cancelar?"
},
@@ -678,6 +747,7 @@
"REPOSITORY": "Repositório",
"REPO_READ_ONLY": "Repositório somente leitura",
"SYSTEM": "Configurações do Sistema",
+ "PROJECT_QUOTAS": "Project Quotas",
"VULNERABILITY": "Vulnerabilidade",
"GC": "Garbage Collection",
"CONFIRM_TITLE": "Confirme para cancelar",
@@ -709,6 +779,8 @@
"PRO_CREATION_ADMIN": "Apenas Administradores",
"ROOT_CERT": "Certificado Raiz do Registry",
"ROOT_CERT_LINK": "Download",
+ "REGISTRY_CERTIFICATE": "Registry certificate",
+ "NO_CHANGE": "Save abort because nothing changed",
"TOOLTIP": {
"SELF_REGISTRATION_ENABLE": "Habilitar registro.",
"SELF_REGISTRATION_DISABLE": "Desabilitar registro.",
@@ -726,6 +798,7 @@
"VERIFY_CERT": "Verificar o Certificado do Servidor LDAP",
"READONLY_TOOLTIP": "Em modo somente leitura, você não pode remover repositórios ou tags ou enviar imagens. ",
"REPO_TOOLTIP": "Usuários não podem efetuar qualquer operação nas imagens nesse modo.",
+ "WEBHOOK_TOOLTIP": "Enable webhooks to receive callbacks at your designated endpoints when certain actions such as image or chart being pushed, pulled, deleted, scanned are performed",
"HOURLY_CRON":"Run once an hour, beginning of hour. Equivalente a 0 0 * * * *.",
"WEEKLY_CRON":"Run once a week, midnight between Sat/Sun. Equivalente a 0 0 0 * * 0.",
"DAILY_CRON":"Run once a day, midnight. Equivalente a 0 0 0 * * *."
@@ -762,7 +835,7 @@
"HTTP_AUTH": {
"ENDPOINT": "Server endpoint",
"TOKEN_REVIEW": "Ponto final do Token Review",
- "ALWAYS_ONBOARD": "Sempre Onboard",
+ "SKIP_SEARCH": "Skip Search",
"VERIFY_CERT": "Verificar certificado de Authentication"
},
"OIDC": {
@@ -795,7 +868,8 @@
"TEST_MAIL_FAILED": "Falha ao verificar servidor de Email com erro: {{param}}.",
"TEST_LDAP_FAILED": "Falha ao verificar servidor de LDAP com erro: {{param}}.",
"LEAVING_CONFIRMATION_TITLE": "Confirme para sair",
- "LEAVING_CONFIRMATION_SUMMARY": "As alterações ainda não foram salvas. Você deseja sair da página atual?"
+ "LEAVING_CONFIRMATION_SUMMARY": "As alterações ainda não foram salvas. Você deseja sair da página atual?",
+ "TEST_OIDC_SUCCESS": "Connection to OIDC server is verified."
},
"PAGE_NOT_FOUND": {
"MAIN_TITLE": "Página não encontrada",
@@ -918,6 +992,28 @@
"PLACEHOLDER": "Não foi possível encontrar nenhuma Label!",
"NAME_ALREADY_EXISTS": "Nome da Label já existe."
},
+ "QUOTA": {
+ "PROJECT": "Project",
+ "OWNER": "Owner",
+ "COUNT": "Count",
+ "STORAGE": "Storage",
+ "EDIT": "Edit",
+ "DELETE": "Delete",
+ "OF": "of",
+ "PROJECT_QUOTA_DEFAULT_ARTIFACT": "Default artifact count per project",
+ "PROJECT_QUOTA_DEFAULT_DISK": "Default disk space per project",
+ "EDIT_PROJECT_QUOTAS": "Edit Project Quotas",
+ "EDIT_DEFAULT_PROJECT_QUOTAS": "Edit Default Project Quotas",
+ "SET_QUOTAS": "Set the project quotas for project '{{params}}'",
+ "SET_DEFAULT_QUOTAS": "Set the default project quotas when creating new projects",
+ "COUNT_QUOTA": "Count quota",
+ "COUNT_DEFAULT_QUOTA": "Default count quota",
+ "STORAGE_QUOTA": "Storage quota",
+ "STORAGE_DEFAULT_QUOTA": "Default storage quota",
+ "SAVE_SUCCESS": "Quota edit success",
+ "UNLIMITED": "unlimited",
+ "INVALID_INPUT": "invalid input"
+ },
"WEEKLY": {
"MONDAY": "Segunda Feira",
"TUESDAY": "Terça Feira",
@@ -1007,6 +1103,106 @@
"MSG_SUCCESS": "Retag successfully",
"TIP_REPO": "A repository name is broken up into path components. A component of a repository name must be at least one lowercase, alpha-numeric characters, optionally separated by periods, dashes or underscores. More strictly, it must match the regular expression [a-z0-9]+(?:[._-][a-z0-9]+)*.If a repository name has two or more path components, they must be separated by a forward slash ('/').The total length of a repository name, including slashes, must be less the 256 characters.",
"TIP_TAG": "A tag is a label applied to a Docker image in a repository. Tags are how various images in a repository are distinguished from each other.It need to match Regex: (`[\\w][\\w.-]{0,127}`)"
+ },
+ "CVE_WHITELIST": {
+ "DEPLOYMENT_SECURITY": "Deployment security",
+ "CVE_WHITELIST": "CVE whitelist",
+ "SYS_WHITELIST_EXPLAIN": "System whitelist allows vulnerabilities in this list to be ignored when calculating the vulnerability of an image.",
+ "ADD_SYS": "Add CVE IDs to the system whitelist",
+ "WARNING_SYS": "The system CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "WARNING_PRO": "The project CVE whitelist has expired. You can enable the whitelist by extending the expiration date.",
+ "ADD": "ADD",
+ "ENTER": "Enter CVE ID(s)",
+ "HELP": "Separator: commas or newline characters",
+ "NONE": "None",
+ "EXPIRES_AT": "Expires at",
+ "NEVER_EXPIRES": "Never expires",
+ "PRO_WHITELIST_EXPLAIN": "Project whitelist allows vulnerabilities in this list to be ignored in this project when pushing and pulling images.",
+ "PRO_OR_SYS": "You can either use the default whitelist configured at the system level or click on 'Project whitelist' to create a new whitelist",
+ "MERGE_INTO": "Add individual CVE IDs before clicking 'ADD SYSTEM' to add system whitelist as well.",
+ "SYS_WHITELIST": "System whitelist",
+ "PRO_WHITELIST": "Project whitelist",
+ "ADD_SYSTEM": "ADD SYSTEM"
+ },
+ "TAG_RETENTION": {
+ "TAG_RETENTION": "Tag Retention",
+ "RETENTION_RULES": "Retention rules",
+ "RULE_NAME_1": " the images from the last {{number}} days",
+ "RULE_NAME_2": " the most recent active {{number}} images",
+ "RULE_NAME_3": " the most recently pushed {{number}} images",
+ "RULE_NAME_4": " the most recently pulled {{number}} images",
+ "RULE_NAME_5": " always",
+ "ADD_RULE": "ADD RULE",
+ "ADD_RULE_HELP_1": "Click the ADD RULE button to add a rule.",
+ "ADD_RULE_HELP_2": "Tag retention polices run once a day.",
+ "RETENTION_RUNS": "Retention runs",
+ "RUN_NOW": "RUN NOW",
+ "WHAT_IF_RUN": "DRY RUN",
+ "ABORT": "ABORT",
+ "SERIAL": "ID",
+ "STATUS": "Status",
+ "DRY_RUN": "Dry Run",
+ "START_TIME": "Start Time",
+ "DURATION": "Duration",
+ "DETAILS": "Details",
+ "REPOSITORY": "Repository",
+ "EDIT": "Edit",
+ "DISABLE": "Disable",
+ "ENABLE": "Enable",
+ "DELETE": "Delete",
+ "ADD_TITLE": "Add Tag Retention Rule",
+ "ADD_SUBTITLE": "Specify a tag retention rule for this project. All tag retention rules are independently calculated and each rule can be applied to a selected list of repositories.",
+ "BY_WHAT": "By image count or number of days",
+ "RULE_TEMPLATE_1": "the images from the last # days",
+ "RULE_TEMPLATE_2": "the most recent active # images",
+ "RULE_TEMPLATE_3": "the most recently pushed # images",
+ "RULE_TEMPLATE_4": "the most recently pulled # images",
+ "RULE_TEMPLATE_5": "always",
+ "ACTION_RETAIN": " retain",
+ "UNIT_DAY": "DAYS",
+ "UNIT_COUNT": "COUNT",
+ "NUMBER": "NUMBER",
+ "IN_REPOSITORIES": "For the repositories",
+ "REP_SEPARATOR": "Enter multiple comma separated repos,repo*,or **",
+ "TAGS": "Tags",
+ "MATCHES_TAGS": "Matches tags",
+ "MATCHES_EXCEPT_TAGS": "Matches except tags",
+ "TAG_SEPARATOR": "Enter multiple comma separated tags,tag*,**,or regex",
+ "LABELS": "Labels",
+ "MATCHES_LABELS": "Matches Labels",
+ "MATCHES_EXCEPT_LABELS": "Matches except Labels",
+ "REP_LABELS": "Enter multiple comma separated labels",
+ "RETENTION_RUN": "Retention Run",
+ "RETENTION_RUN_EXPLAIN": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted. Press CANCEL and use a DRY RUN to simulate the effect of this policy. Otherwise press RUN to proceed.",
+ "RETENTION_RUN_ABORTED": "Retention Run Aborted",
+ "RETENTION_RUN_ABORTED_EXPLAIN": "This retention run has been aborted. Images already deleted are irreversible. You can initiate another run to continue to delete images. In order to simulate a run, you can use the “DRY RUN”.",
+ "LOADING": "Loading...",
+ "NO_EXECUTION": "We couldn't find any executions!",
+ "NO_HISTORY": "We couldn't find any histories!",
+ "DELETION": "Deletions",
+ "EDIT_TITLE": "Edit Tag Retention Rule",
+ "LOG": "Log",
+ "EXCLUDES": "Excludes",
+ "MATCHES": "Matches",
+ "REPO": " repositories",
+ "EXC": " excluding ",
+ "MAT": " matching ",
+ "AND": " and",
+ "WITH": " with ",
+ "WITHOUT": " without ",
+ "LOWER_LABELS": " labels",
+ "WITH_CONDITION": " with",
+ "LOWER_TAGS": " tags",
+ "TRIGGER": "Schedule",
+ "RETAINED": "Retained",
+ "TOTAL": "Total",
+ "NONE": "none",
+ "RULE_NAME_6": " the images pulled within the last {{number}} days",
+ "RULE_NAME_7": " the images pushed within the last {{number}} days",
+ "RULE_TEMPLATE_6": " the images pulled within the last # days",
+ "RULE_TEMPLATE_7": " the images pushed within the last # days",
+ "SCHEDULE": "Schedule",
+ "SCHEDULE_WARNING": "Executing the retention policy can have adverse effects to the images in this project and affected image tags will be deleted."
}
diff --git a/src/portal/src/i18n/lang/zh-cn-lang.json b/src/portal/src/i18n/lang/zh-cn-lang.json
index 0c0f71dcf..313b23703 100644
--- a/src/portal/src/i18n/lang/zh-cn-lang.json
+++ b/src/portal/src/i18n/lang/zh-cn-lang.json
@@ -31,6 +31,7 @@
"TEST_MAIL": "测试邮件服务器",
"CLOSE": "关闭",
"TEST_LDAP": "测试LDAP服务器",
+ "TEST_OIDC": "测试OIDC服务器",
"MORE_INFO": "更多信息...",
"YES": "是",
"NO": "否",
@@ -42,7 +43,12 @@
"ACTIONS": "操作",
"BROWSE": "选择文件",
"UPLOAD": "上传",
- "NO_FILE": "未选择文件"
+ "NO_FILE": "未选择文件",
+ "ADD": "添加",
+ "RUN": "执行",
+ "CONTINUE": "继续",
+ "ENABLE": "启用",
+ "DISABLE": "关闭"
},
"BATCH": {
"DELETED_SUCCESS": "删除成功",
@@ -58,6 +64,7 @@
"TOOLTIP": {
"NAME_FILTER": "过滤资源的名字。不填或者“”匹配所有资源;“library/”只匹配“library”下的资源。更多的匹配模式请参考用户手册。",
"TAG_FILTER": "过滤资源的tag/version。不填或者“”匹配所有;“1.0*”只匹配以“1.0”开头的tag/version。",
+ "LABEL_FILTER": "根据标签筛选资源。",
"RESOURCE_FILTER": "过滤资源的类型。",
"PUSH_BASED": "把资源由本地Harbor推送到远端仓库。",
"PULL_BASED": "把资源由远端仓库拉取到本地Harbor。",
@@ -86,8 +93,8 @@
"NONEMPTY": "不能为空",
"ENDPOINT_FORMAT": "Endpoint必须以http://或https://开头。",
"OIDC_ENDPOIT_FORMAT": "Endpoint必须以https://开头。",
- "OIDC_NAME": "OIDC提供商的名称.",
- "OIDC_ENDPOINT": "OIDC服务器的地址.",
+ "OIDC_NAME": "OIDC提供商的名称。",
+ "OIDC_ENDPOINT": "OIDC服务器的地址。",
"OIDC_SCOPE": "在身份验证期间发送到OIDC服务器的scope。它必须包含“openid”和“offline_access”。如果您使用Google,请从此字段中删除“脱机访问”。",
"OIDC_VERIFYCERT": "如果您的OIDC服务器是通过自签名证书托管的,请取消选中此框。"
},
@@ -215,9 +222,15 @@
"DELETED_SUCCESS": "成功删除项目。",
"TOGGLED_SUCCESS": "切换状态成功。",
"FAILED_TO_DELETE_PROJECT": "项目包含镜像仓库或同步规则或Helm Charts,无法删除。",
- "INLINE_HELP_PUBLIC": "当项目设为公开后,任何人都有此项目下镜像的读权限。命令行用户不需要“docker login”就可以拉取此项目下的镜像。"
+ "INLINE_HELP_PUBLIC": "当项目设为公开后,任何人都有此项目下镜像的读权限。命令行用户不需要“docker login”就可以拉取此项目下的镜像。",
+ "COUNT_QUOTA": "存储数量",
+ "STORAGE_QUOTA": "存储容量",
+ "COUNT_QUOTA_TIP": "请输入一个'1' ~ '100000000'之间的整数, '-1'表示不设置上限。",
+ "STORAGE_QUOTA_TIP": "存储配额的上限仅采用整数值,上限为1024TB。输入“-1”作为无限制配额。",
+ "QUOTA_UNLIMIT_TIP": "如果你想要对存储不设置上限,请输入-1。"
},
"PROJECT_DETAIL": {
+ "SUMMARY": "概要",
"REPOSITORIES": "镜像仓库",
"REPLICATION": "同步",
"USERS": "成员",
@@ -226,7 +239,8 @@
"PROJECTS": "项目",
"CONFIG": "配置管理",
"HELMCHART": "Helm Charts",
- "ROBOT_ACCOUNTS": "机器人账户"
+ "ROBOT_ACCOUNTS": "机器人账户",
+ "WEBHOOKS": "Webhooks"
},
"PROJECT_CONFIG": {
"REGISTRY": "项目仓库",
@@ -298,7 +312,7 @@
"NEW_ROBOT_ACCOUNT": "添加机器人账户",
"ENABLED_STATE": "启用状态",
"EXPIRATION": "过期时间",
- "NUMBER_REQUIRED":"此项为必填项且为不为0的整数.",
+ "NUMBER_REQUIRED":"此项为必填项且为不为0的整数。",
"TOKEN_EXPIRATION":"机器人账户令牌过期时间(天)",
"DESCRIPTION": "描述",
"ACTION": "操作",
@@ -309,23 +323,54 @@
"ENABLE_ACCOUNT": "启用账户",
"DELETE": "删除",
"CREAT_ROBOT_ACCOUNT": "创建机器人账户",
- "PULL_PERMISSION": "Pull 镜像",
- "PULL_PUSH_PERMISSION": "Push和Pull 镜像",
- "PUSH_CHART_PERMISSION": "推送Chart",
- "PULL_CHART_PERMISSION": "拉取Chart",
+ "PERMISSIONS_IMAGE": "镜像",
+ "PERMISSIONS_HELMCHART": "Helm Chart",
+ "PUSH": "推送",
+ "PULL": "拉取",
"FILTER_PLACEHOLDER": "过滤机器人账户",
- "ROBOT_NAME": "不能包含特殊字符(~#$%)且长度不能超过255.",
- "ACCOUNT_EXISTING": "机器人账户已经存在.",
+ "ROBOT_NAME": "不能包含特殊字符(~#$%)且长度不能超过255。",
+ "ACCOUNT_EXISTING": "机器人账户已经存在。",
"ALERT_TEXT": "这是唯一一次复制您的个人访问令牌的机会",
- "CREATED_SUCCESS": "创建账户 '{{param}}' 成功.",
+ "CREATED_SUCCESS": "创建账户 '{{param}}' 成功。",
"COPY_SUCCESS": "成功复制 '{{param}}' 的令牌",
"DELETION_TITLE": "删除账户确认",
- "DELETION_SUMMARY": "你确认删除机器人账户 {{param}}?"
+ "DELETION_SUMMARY": "你确认删除机器人账户 {{param}}?",
+ "PULL_IS_MUST" : "拉取权限默认选中且不可修改。",
+ "EXPORT_TO_FILE" : "导出到文件中"
+ },
+ "WEBHOOK": {
+ "EDIT_BUTTON": "编辑",
+ "ENABLED_BUTTON": "启用",
+ "DISABLED_BUTTON": "停用",
+ "TYPE": "Webhook",
+ "STATUS": "状态",
+ "CREATED": "创建时间",
+ "ENABLED": "启用",
+ "DISABLED": "停用",
+ "OF": "共计",
+ "ITEMS": "条记录",
+ "LAST_TRIGGERED": "最近触发事件",
+ "EDIT_WEBHOOK": "Webhook 目标",
+ "CREATE_WEBHOOK": "创建 Webhooks",
+ "EDIT_WEBHOOK_DESC": "指定接收 Webhook 通知的目标",
+ "CREATE_WEBHOOK_DESC": "为了启用 webhook, 请提供 Endpoint 和凭据以访问 Webhook 服务器。",
+ "ENDPOINT_URL": "Endpoint 地址",
+ "URL_IS_REQUIRED": "Endpoint 地址必填",
+ "AUTH_HEADER": "Auth Header",
+ "VERIFY_REMOTE_CERT": "验证远程证书",
+ "TEST_ENDPOINT_BUTTON": "测试 ENDPOINT",
+ "CANCEL_BUTTON": "取消",
+ "SAVE_BUTTON": "保存",
+ "ENABLED_WEBHOOK_TITLE": "启用项目的 Webhooks",
+ "ENABLED_WEBHOOK_SUMMARY": "你希望开启项目的 Webhooks 吗?",
+ "DISABLED_WEBHOOK_TITLE": "停用项目的 Webhooks",
+ "DISABLED_WEBHOOK_SUMMARY": "你希望停用项目的 Webhooks 吗?"
},
"GROUP": {
"GROUP": "组",
"GROUPS": "组",
"IMPORT_LDAP_GROUP": "导入LDAP组",
+ "IMPORT_HTTP_GROUP": "新建HTTP组",
"ADD": "新增",
"EDIT": "编辑",
"DELETE": "删除",
@@ -338,8 +383,17 @@
"ADD_GROUP_SUCCESS": "添加组成功",
"EDIT_GROUP_SUCCESS": "修改组成功",
"LDAP_TYPE": "LDAP",
+ "HTTP_TYPE": "HTTP",
"OF": "共计",
- "ITEMS": "条记录"
+ "ITEMS": "条记录",
+ "NEW_MEMBER": "新建组成员",
+ "NEW_USER_INFO": "添加一个组作为具有指定角色的此项目的成员",
+ "ROLE": "权限",
+ "SYS_ADMIN": "系统管理员",
+ "PROJECT_ADMIN": "项目管理员",
+ "PROJECT_MASTER": "维护人员",
+ "DEVELOPER": "开发者",
+ "GUEST": "访客"
},
"AUDIT_LOG": {
"USERNAME": "用户名",
@@ -554,6 +608,8 @@
"TAGS_COUNT": "标签数",
"PULL_COUNT": "下载数",
"PULL_COMMAND": "Pull命令",
+ "PULL_TIME": "拉取时间",
+ "PUSH_TIME": "推送时间",
"MY_REPOSITORY": "我的仓库",
"PUBLIC_REPOSITORY": "公共仓库",
"DELETION_TITLE_REPO": "删除镜像仓库确认",
@@ -659,6 +715,19 @@
"ADD_LABEL_TO_CHART_VERSION": "添加标签到此 Chart Version",
"STATUS": "状态"
},
+ "SUMMARY": {
+ "QUOTAS": "容量",
+ "PROJECT_REPOSITORY": "项目镜像仓库",
+ "PROJECT_HELM_CHART": "项目 Helm Chart",
+ "PROJECT_MEMBER": "项目成员",
+ "PROJECT_QUOTAS": "项目容量",
+ "ARTIFACT_COUNT": "Artifact 数量",
+ "STORAGE_CONSUMPTION": "存储消耗",
+ "ADMIN": "管理员",
+ "MASTER": "维护人员",
+ "DEVELOPER": "开发者",
+ "GUEST": "访客"
+ },
"ALERT": {
"FORM_CHANGE_CONFIRMATION": "表单内容改变,确认是否取消?"
},
@@ -682,7 +751,9 @@
"LABEL": "标签",
"REPOSITORY": "仓库",
"REPO_READ_ONLY": "仓库只读",
+ "WEBHOOK_NOTIFICATION_ENABLED": "开启 WEBHOOK",
"SYSTEM": "系统设置",
+ "PROJECT_QUOTAS": "项目定额",
"VULNERABILITY": "漏洞",
"GC": "垃圾清理",
"CONFIRM_TITLE": "确认取消",
@@ -715,6 +786,7 @@
"ROOT_CERT": "镜像库根证书",
"ROOT_CERT_LINK": "下载",
"REGISTRY_CERTIFICATE": "注册证书",
+ "NO_CHANGE": "Save abort because nothing changed",
"TOOLTIP": {
"SELF_REGISTRATION_ENABLE": "激活注册功能。",
"SELF_REGISTRATION_DISABLE": "禁用注册功能。",
@@ -725,13 +797,14 @@
"LDAP_UID": "在搜索中用来匹配用户的属性,可以是uid,cn,email,sAMAccountName或者其它LDAP/AD服务器支持的属性。",
"LDAP_SCOPE": "搜索用户的范围。",
"TOKEN_EXPIRATION": "由令牌服务创建的令牌的过期时间(分钟),默认为30分钟。",
- "ROBOT_TOKEN_EXPIRATION": "机器人账户的令牌的过期时间(天),默认为30天,显示的结果为分钟转化的天数并向下取整。",
+ "ROBOT_TOKEN_EXPIRATION": "机器人账户的令牌的过期时间(天),默认为30天,显示的结果为分钟转化的天数并向下取整。",
"PRO_CREATION_RESTRICTION": "用来确定哪些用户有权限创建项目,默认为’所有人‘,设置为’仅管理员‘则只有管理员可以创建项目。",
- "ROOT_CERT_DOWNLOAD": "下载镜像库根证书.",
+ "ROOT_CERT_DOWNLOAD": "下载镜像库根证书。",
"SCANNING_POLICY": "基于不同需求设置镜像扫描策略。‘无’:不设置任何策略;‘每日定时’:每天在设置的时间定时执行扫描。",
"VERIFY_CERT": "检查来自LDAP服务端的证书",
"READONLY_TOOLTIP": "选中,表示正在维护状态,不可删除仓库及标签,也不可以推送镜像。",
"REPO_TOOLTIP": "用户在此模式下无法对图像执行任何操作。",
+ "WEBHOOK_TOOLTIP": "当执行推送,拉动,删除,扫描图像或图表等特定操作时,启用 webhooks 以在指定端点接收回调",
"HOURLY_CRON":"每小时运行一次。相当于 0 0 * * * *",
"WEEKLY_CRON":"每周一次,周六/周日午夜之间开始。相当于 0 0 * * * *",
"DAILY_CRON":"每天午夜运行一次。相当于 0 0 * * * *"
@@ -767,7 +840,7 @@
"HTTP_AUTH": {
"ENDPOINT": "Server Endpoint",
"TOKEN_REVIEW": "Token Review Endpoint",
- "ALWAYS_ONBOARD": "Always Onboard",
+ "SKIP_SEARCH": "Skip Search",
"VERIFY_CERT": "Authentication验证证书"
},
"OIDC": {
@@ -800,7 +873,8 @@
"TEST_MAIL_FAILED": "验证邮件服务器失败,错误: {{param}}。",
"TEST_LDAP_FAILED": "验证LDAP服务器失败,错误: {{param}}。",
"LEAVING_CONFIRMATION_TITLE": "确定离开",
- "LEAVING_CONFIRMATION_SUMMARY": "有未保存的配置更改, 确认离开当前页面?"
+ "LEAVING_CONFIRMATION_SUMMARY": "有未保存的配置更改, 确认离开当前页面?",
+ "TEST_OIDC_SUCCESS": "OIDC服务器的连通正常。"
},
"PAGE_NOT_FOUND": {
"MAIN_TITLE": "页面不存在",
@@ -859,8 +933,8 @@
},
"CHART": {
"SCANNING_TIME": "扫描完成时间:",
- "TOOLTIPS_TITLE": "{{totalPackages}}个{{package}}中的{{totalVulnerability}}个含有{{vulnerability}}.",
- "TOOLTIPS_TITLE_SINGULAR": "{{totalPackages}}个{{package}}中的{{totalVulnerability}}个含有{{vulnerability}}.",
+ "TOOLTIPS_TITLE": "{{totalPackages}}个{{package}}中的{{totalVulnerability}}个含有{{vulnerability}}。",
+ "TOOLTIPS_TITLE_SINGULAR": "{{totalPackages}}个{{package}}中的{{totalVulnerability}}个含有{{vulnerability}}。",
"TOOLTIPS_TITLE_ZERO": "没有发现可识别的漏洞包"
},
"SEVERITY": {
@@ -926,6 +1000,28 @@
"PLACEHOLDER": "未发现任何标签!",
"NAME_ALREADY_EXISTS": "标签名已存在。"
},
+ "QUOTA": {
+ "PROJECT": "项目",
+ "OWNER": "创建者",
+ "COUNT": "数量",
+ "STORAGE": "存储",
+ "EDIT": "修改",
+ "DELETE": "删除",
+ "OF": "of",
+ "PROJECT_QUOTA_DEFAULT_ARTIFACT": "每个项目的默认项目计数",
+ "PROJECT_QUOTA_DEFAULT_DISK": "每个项目的默认磁盘空间",
+ "EDIT_PROJECT_QUOTAS": "修改项目容量",
+ "EDIT_DEFAULT_PROJECT_QUOTAS": "修改项目默认配额",
+ "SET_QUOTAS": "设置项目“{{params}}”的项目配额",
+ "SET_DEFAULT_QUOTAS": "创建新项目时设置默认项目配额",
+ "COUNT_QUOTA": "配额数量",
+ "COUNT_DEFAULT_QUOTA": "默认配额数量",
+ "STORAGE_QUOTA": "配额存储",
+ "STORAGE_DEFAULT_QUOTA": "默认配额存储",
+ "SAVE_SUCCESS": "项目容量修改成功",
+ "UNLIMITED": "不设限",
+ "INVALID_INPUT": "输入错误"
+ },
"WEEKLY": {
"MONDAY": "周一",
"TUESDAY": "周二",
@@ -1009,6 +1105,106 @@
"MSG_SUCCESS": "同步成功",
"TIP_REPO": "镜像仓库名被分解为路径组件。仓库名必须至少有一个小写字母、字母数字字符,可选句点、破折号或下划线分隔。严格意义上说,它必须匹配正则表达式[a-z0-9]+(?[.-][a-z0-9]+)*.如果仓库名有两个或多个路径组件,则它们必须用正斜杠('/')分隔。包括斜杠在内的仓库名的总长度必须小于256个字符。",
"TIP_TAG": "标签是应用于存储库中的Docker映像的一种标签,它用于区分多种镜像。它需要匹配Regex:([\\w][\\w.-]{0,127})"
+ },
+ "CVE_WHITELIST": {
+ "DEPLOYMENT_SECURITY": "部署安全性",
+ "CVE_WHITELIST": "CVE白名单",
+ "SYS_WHITELIST_EXPLAIN": "在计算镜像的的安全性漏洞时,在系统的CVE白名单中的漏洞将会被忽略。",
+ "ADD_SYS": "可添加一条或多条CVE ID至系统的CVE白名单中",
+ "WARNING_SYS": "系统的CVE白名单已过期. 请延长有效期以使白名单生效",
+ "WARNING_PRO": "该项目的CVE白名单已过期. 请延长有效期以使白名单生效",
+ "ADD": "添加",
+ "ENTER": "输入一条或多条CVE ID",
+ "HELP": "CVE ID之间请用英文逗号隔开或者换行",
+ "NONE": "无",
+ "EXPIRES_AT": "有效期至",
+ "NEVER_EXPIRES": "永不过期",
+ "PRO_WHITELIST_EXPLAIN": "在推送和拉取镜像时,在项目的CVE白名单中的漏洞将会被忽略",
+ "PRO_OR_SYS": "您可以选择使用系统的CVE白名单作为该项目的白名单,也可勾选“启用项目白名单”项来建立该项目自己的CVE白名单,",
+ "MERGE_INTO": "您可以点击“添加系统白名单”项将系统白名单合并至该项目白名单中,并可为该项目白名单添加特有的CVE IDs",
+ "SYS_WHITELIST": "启用系统白名单",
+ "PRO_WHITELIST": "启用项目白名单",
+ "ADD_SYSTEM": "添加系统白名单"
+ },
+ "TAG_RETENTION": {
+ "TAG_RETENTION": "Tag保留",
+ "RETENTION_RULES": "保留规则",
+ "RULE_NAME_1": "最近{{number}}天的镜像",
+ "RULE_NAME_2": "最近活跃的{{number}}个镜像",
+ "RULE_NAME_3": "最近推送的{{number}}个镜像",
+ "RULE_NAME_4": "最近拉取的{{number}}个镜像",
+ "RULE_NAME_5": "全部镜像",
+ "ADD_RULE": "添加规则",
+ "ADD_RULE_HELP_1": "点击添加按钮可添加规则",
+ "ADD_RULE_HELP_2": "Tag保留策略每天运行一次.",
+ "RETENTION_RUNS": "运行保留策略",
+ "RUN_NOW": "立即运行",
+ "WHAT_IF_RUN": "模拟运行",
+ "ABORT": "中止",
+ "SERIAL": "ID",
+ "STATUS": "状态",
+ "DRY_RUN": "模拟运行",
+ "START_TIME": "开始时间",
+ "DURATION": "持续时间",
+ "DETAILS": "详情",
+ "REPOSITORY": "仓库",
+ "EDIT": "编辑",
+ "DISABLE": "禁用",
+ "ENABLE": "启用",
+ "DELETE": "删除",
+ "ADD_TITLE": "添加Tag保留规则",
+ "ADD_SUBTITLE": "为当前项目指定tag保留规则。所有tag保留规则独立计算并且适用于所有符合条件的仓库。",
+ "BY_WHAT": "以镜像或天数为条件",
+ "RULE_TEMPLATE_1": "最近#天的镜像",
+ "RULE_TEMPLATE_2": "最近活跃的#个镜像",
+ "RULE_TEMPLATE_3": "最近推送的#个镜像",
+ "RULE_TEMPLATE_4": "最近拉取的#个镜像",
+ "RULE_TEMPLATE_5": "全部",
+ "ACTION_RETAIN": " 保留",
+ "UNIT_DAY": "天数",
+ "UNIT_COUNT": "个数",
+ "NUMBER": "数量",
+ "IN_REPOSITORIES": "应用到仓库",
+ "REP_SEPARATOR": "使用逗号分隔repos,repo*和**",
+ "TAGS": "Tags",
+ "MATCHES_TAGS": "匹配tags",
+ "MATCHES_EXCEPT_TAGS": "排除tags",
+ "TAG_SEPARATOR": "使用逗号分割tags,tag*,**,or regex",
+ "LABELS": "标签",
+ "MATCHES_LABELS": "匹配标签",
+ "MATCHES_EXCEPT_LABELS": "排除标签",
+ "REP_LABELS": "使用逗号分割标签",
+ "RETENTION_RUN": "运行保留策略",
+ "RETENTION_RUN_EXPLAIN": "执行保留策略将对该项目中的镜像产生反向影响,受影响的镜像tags将会被删除。您可选择取消或者使用模拟运行,或者点击运行以继续。",
+ "RETENTION_RUN_ABORTED": "中止运行保留策略",
+ "RETENTION_RUN_ABORTED_EXPLAIN": "已中止运行保留策略,已删除的镜像不可恢复。您可执行另一个运行命令以便继续删除镜像。如需模拟运行,请点击模拟运行按钮。",
+ "LOADING": "载入中...",
+ "NO_EXECUTION": "暂无记录!",
+ "NO_HISTORY": "暂无记录!",
+ "DELETION": "删除记录",
+ "EDIT_TITLE": "编辑Tag保留规则",
+ "LOG": "日志",
+ "EXCLUDES": "排除",
+ "MATCHES": "匹配",
+ "REPO": "仓库",
+ "EXC": "排除",
+ "MAT": "匹配",
+ "AND": "且",
+ "WITH": "有",
+ "WITHOUT": "没有",
+ "LOWER_LABELS": "标签",
+ "WITH_CONDITION": "基于条件",
+ "LOWER_TAGS": "tags",
+ "TRIGGER": "定时执行",
+ "RETAINED": "保留数",
+ "TOTAL": "总数",
+ "NONE": "空",
+ "RULE_NAME_6": "最近{{number}}天被拉取过的镜像",
+ "RULE_NAME_7": "最近{{number}}天被推送过的镜像",
+ "RULE_TEMPLATE_6": "最近#天被拉取过的镜像",
+ "RULE_TEMPLATE_7": "最近#天被推送过的镜像",
+ "SCHEDULE": "定时任务",
+ "SCHEDULE_WARNING": "执行保留策略将对该项目中的镜像产生反向影响,受影响的镜像tags将会被删除。"
}
}
diff --git a/src/portal/src/styles.css b/src/portal/src/styles.css
index ec72024cf..4632d046b 100644
--- a/src/portal/src/styles.css
+++ b/src/portal/src/styles.css
@@ -77,7 +77,7 @@ body {
}
.datagrid-header{
- z-index: 1 !important;
+ z-index: 0 !important;
}
.color-green {
@@ -86,4 +86,8 @@ body {
.color-red {
color: red;
-}
\ No newline at end of file
+}
+
+.datagrid-table,.datagrid-header{
+ position: inherit !important;
+}
diff --git a/src/replication/adapter/adapter.go b/src/replication/adapter/adapter.go
index ed502c347..81c49f0a9 100644
--- a/src/replication/adapter/adapter.go
+++ b/src/replication/adapter/adapter.go
@@ -17,10 +17,19 @@ package adapter
import (
"errors"
"fmt"
+ "io"
+ "github.com/docker/distribution"
+ "github.com/goharbor/harbor/src/replication/filter"
"github.com/goharbor/harbor/src/replication/model"
)
+// const definition
+const (
+ UserAgentReplication = "harbor-replication-service"
+ MaxConcurrency = 100
+)
+
var registry = map[model.RegistryType]Factory{}
// Factory creates a specific Adapter according to the params
@@ -37,6 +46,81 @@ type Adapter interface {
HealthCheck() (model.HealthStatus, error)
}
+// ImageRegistry defines the capabilities that an image registry should have
+type ImageRegistry interface {
+ FetchImages(filters []*model.Filter) ([]*model.Resource, error)
+ ManifestExist(repository, reference string) (exist bool, digest string, err error)
+ PullManifest(repository, reference string, accepttedMediaTypes []string) (manifest distribution.Manifest, digest string, err error)
+ PushManifest(repository, reference, mediaType string, payload []byte) error
+ // the "reference" can be "tag" or "digest", the function needs to handle both
+ DeleteManifest(repository, reference string) error
+ BlobExist(repository, digest string) (exist bool, err error)
+ PullBlob(repository, digest string) (size int64, blob io.ReadCloser, err error)
+ PushBlob(repository, digest string, size int64, blob io.Reader) error
+}
+
+// ChartRegistry defines the capabilities that a chart registry should have
+type ChartRegistry interface {
+ FetchCharts(filters []*model.Filter) ([]*model.Resource, error)
+ ChartExist(name, version string) (bool, error)
+ DownloadChart(name, version string) (io.ReadCloser, error)
+ UploadChart(name, version string, chart io.Reader) error
+ DeleteChart(name, version string) error
+}
+
+// Repository defines an repository object, it can be image repository, chart repository and etc.
+type Repository struct {
+ ResourceType string `json:"resource_type"`
+ Name string `json:"name"`
+}
+
+// GetName returns the name
+func (r *Repository) GetName() string {
+ return r.Name
+}
+
+// GetFilterableType returns the filterable type
+func (r *Repository) GetFilterableType() filter.FilterableType {
+ return filter.FilterableTypeRepository
+}
+
+// GetResourceType returns the resource type
+func (r *Repository) GetResourceType() string {
+ return r.ResourceType
+}
+
+// GetLabels returns the labels
+func (r *Repository) GetLabels() []string {
+ return nil
+}
+
+// VTag defines an vTag object, it can be image tag, chart version and etc.
+type VTag struct {
+ ResourceType string `json:"resource_type"`
+ Name string `json:"name"`
+ Labels []string `json:"labels"`
+}
+
+// GetFilterableType returns the filterable type
+func (v *VTag) GetFilterableType() filter.FilterableType {
+ return filter.FilterableTypeVTag
+}
+
+// GetResourceType returns the resource type
+func (v *VTag) GetResourceType() string {
+ return v.ResourceType
+}
+
+// GetName returns the name
+func (v *VTag) GetName() string {
+ return v.Name
+}
+
+// GetLabels returns the labels
+func (v *VTag) GetLabels() []string {
+ return v.Labels
+}
+
// RegisterFactory registers one adapter factory to the registry
func RegisterFactory(t model.RegistryType, factory Factory) error {
if len(t) == 0 {
diff --git a/src/replication/adapter/aliacr/adapter.go b/src/replication/adapter/aliacr/adapter.go
new file mode 100644
index 000000000..386f2b3c1
--- /dev/null
+++ b/src/replication/adapter/aliacr/adapter.go
@@ -0,0 +1,262 @@
+package aliacr
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "path/filepath"
+ "regexp"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/services/cr"
+ "github.com/goharbor/harbor/src/common/utils"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/registry/auth"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/goharbor/harbor/src/replication/util"
+)
+
+func init() {
+ if err := adp.RegisterFactory(model.RegistryTypeAliAcr, func(registry *model.Registry) (adp.Adapter, error) {
+ return newAdapter(registry)
+ }); err != nil {
+ log.Errorf("failed to register factory for %s: %v", model.RegistryTypeAliAcr, err)
+ return
+ }
+ log.Infof("the factory for adapter %s registered", model.RegistryTypeAliAcr)
+}
+
+// example:
+// https://registry.%s.aliyuncs.com
+// https://cr.%s.aliyuncs.com
+var regRegion = regexp.MustCompile("https://(registry|cr)\\.([\\w\\-]+)\\.aliyuncs\\.com")
+
+func getRegion(url string) (region string, err error) {
+ if url == "" {
+ return "", errors.New("empty url")
+ }
+ rs := regRegion.FindStringSubmatch(url)
+ if rs == nil {
+ return "", errors.New("Invalid Rgistry|CR service url")
+ }
+ // fmt.Println(rs)
+ return rs[2], nil
+}
+
+func newAdapter(registry *model.Registry) (*adapter, error) {
+ region, err := getRegion(registry.URL)
+ if err != nil {
+ return nil, err
+ }
+ // fix url (allow user input cr service url)
+ registry.URL = fmt.Sprintf(registryEndpointTpl, region)
+
+ credential := NewAuth(region, registry.Credential.AccessKey, registry.Credential.AccessSecret)
+ authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
+ Transport: util.GetHTTPTransport(registry.Insecure),
+ }, credential)
+ nativeRegistry, err := native.NewAdapterWithCustomizedAuthorizer(registry, authorizer)
+ if err != nil {
+ return nil, err
+ }
+
+ return &adapter{
+ region: region,
+ registry: registry,
+ domain: fmt.Sprintf(endpointTpl, region),
+ Adapter: nativeRegistry,
+ }, nil
+}
+
+// adapter for to aliyun docker registry
+type adapter struct {
+ *native.Adapter
+ region string
+ domain string
+ registry *model.Registry
+}
+
+var _ adp.Adapter = &adapter{}
+
+// Info ...
+func (a *adapter) Info() (info *model.RegistryInfo, err error) {
+ info = &model.RegistryInfo{
+ Type: model.RegistryTypeAliAcr,
+ SupportedResourceTypes: []model.ResourceType{
+ model.ResourceTypeImage,
+ },
+ SupportedResourceFilters: []*model.FilterStyle{
+ {
+ Type: model.FilterTypeName,
+ Style: model.FilterStyleTypeText,
+ },
+ {
+ Type: model.FilterTypeTag,
+ Style: model.FilterStyleTypeText,
+ },
+ },
+ SupportedTriggers: []model.TriggerType{
+ model.TriggerTypeManual,
+ model.TriggerTypeScheduled,
+ },
+ }
+ return
+}
+
+// FetchImages AliACR not support /v2/_catalog of Registry, we'll list all resources via Aliyun's API
+func (a *adapter) FetchImages(filters []*model.Filter) (resources []*model.Resource, err error) {
+ log.Debugf("FetchImages.filters: %#v\n", filters)
+
+ var client *cr.Client
+ client, err = cr.NewClientWithAccessKey(a.region, a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)
+ if err != nil {
+ return
+ }
+
+ // get filter pattern
+ var repoPattern string
+ var tagsPattern string
+ for _, filter := range filters {
+ if filter.Type == model.FilterTypeName {
+ repoPattern = filter.Value.(string)
+ }
+ if filter.Type == model.FilterTypeTag {
+ tagsPattern = filter.Value.(string)
+ }
+ }
+
+ // list repos
+ var repositories []aliRepo
+ for {
+ var repoListResp *aliRepoResp
+ repoListResp, err = a.listRepo(a.region, client)
+ if err != nil {
+ return
+ }
+ if repoPattern != "" {
+ for _, repo := range repoListResp.Data.Repos {
+
+ var ok bool
+ ok, err = util.Match(repoPattern, filepath.Join(repo.RepoNamespace, repo.RepoName))
+ if err != nil {
+ return
+ }
+ if ok {
+ repositories = append(repositories, repo)
+ }
+ }
+ } else {
+ repositories = append(repositories, repoListResp.Data.Repos...)
+ }
+
+ if repoListResp.Data.Total-(repoListResp.Data.Page*repoListResp.Data.PageSize) <= 0 {
+ break
+ }
+ }
+ log.Debugf("FetchImages.repositories: %#v\n", repositories)
+
+ var rawResources = make([]*model.Resource, len(repositories))
+ runner := utils.NewLimitedConcurrentRunner(adp.MaxConcurrency)
+ defer runner.Cancel()
+
+ for i, r := range repositories {
+ index := i
+ repo := r
+ runner.AddTask(func() error {
+ var tags []string
+ tags, err = a.getTags(repo, client)
+ if err != nil {
+ return fmt.Errorf("List tags for repo '%s' error: %v", repo.RepoName, err)
+ }
+
+ var filterTags []string
+ if tagsPattern != "" {
+ for _, tag := range tags {
+ var ok bool
+ ok, err = util.Match(tagsPattern, tag)
+ if err != nil {
+ return fmt.Errorf("Match tag '%s' error: %v", tag, err)
+ }
+ if ok {
+ filterTags = append(filterTags, tag)
+ }
+ }
+ } else {
+ filterTags = tags
+ }
+
+ if len(filterTags) > 0 {
+ rawResources[index] = &model.Resource{
+ Type: model.ResourceTypeImage,
+ Registry: a.registry,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: filepath.Join(repo.RepoNamespace, repo.RepoName),
+ },
+ Vtags: filterTags,
+ Labels: []string{},
+ },
+ }
+ }
+
+ return nil
+ })
+ }
+ runner.Wait()
+
+ if runner.IsCancelled() {
+ return nil, fmt.Errorf("FetchImages error when collect tags for repos")
+ }
+
+ for _, r := range rawResources {
+ if r != nil {
+ resources = append(resources, r)
+ }
+ }
+
+ return
+}
+
+func (a *adapter) listRepo(region string, c *cr.Client) (resp *aliRepoResp, err error) {
+ var reposReq = cr.CreateGetRepoListRequest()
+ var reposResp = cr.CreateGetRepoListResponse()
+ reposReq.SetDomain(a.domain)
+ reposResp, err = c.GetRepoList(reposReq)
+ if err != nil {
+ return
+ }
+ resp = &aliRepoResp{}
+ json.Unmarshal(reposResp.GetHttpContentBytes(), resp)
+
+ return
+}
+
+func (a *adapter) getTags(repo aliRepo, c *cr.Client) (tags []string, err error) {
+ log.Debugf("[ali-acr.getTags]%s: %#v\n", a.domain, repo)
+ var tagsReq = cr.CreateGetRepoTagsRequest()
+ var tagsResp = cr.CreateGetRepoTagsResponse()
+ tagsReq.SetDomain(a.domain)
+ tagsReq.RepoNamespace = repo.RepoNamespace
+ tagsReq.RepoName = repo.RepoName
+ for {
+ fmt.Printf("[GetRepoTags.req] %#v\n", tagsReq)
+ tagsResp, err = c.GetRepoTags(tagsReq)
+ if err != nil {
+ return
+ }
+
+ var resp = &aliTagResp{}
+ json.Unmarshal(tagsResp.GetHttpContentBytes(), resp)
+ for _, tag := range resp.Data.Tags {
+ tags = append(tags, tag.Tag)
+ }
+
+ if resp.Data.Total-(resp.Data.Page*resp.Data.PageSize) <= 0 {
+ break
+ }
+ }
+
+ return
+}
diff --git a/src/replication/adapter/aliacr/adapter_test.go b/src/replication/adapter/aliacr/adapter_test.go
new file mode 100644
index 000000000..94fd41eb3
--- /dev/null
+++ b/src/replication/adapter/aliacr/adapter_test.go
@@ -0,0 +1,203 @@
+package aliacr
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/utils/test"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAdapter_NewAdapter(t *testing.T) {
+ factory, err := adp.GetFactory("BadName")
+ assert.Nil(t, factory)
+ assert.NotNil(t, err)
+
+ factory, err = adp.GetFactory(model.RegistryTypeAliAcr)
+ assert.Nil(t, err)
+ assert.NotNil(t, factory)
+
+ // test case for URL is registry.
+ adapter, err := factory(&model.Registry{
+ Type: model.RegistryTypeAliAcr,
+ Credential: &model.Credential{
+ AccessKey: "MockAccessKey",
+ AccessSecret: "MockAccessSecret",
+ },
+ URL: "https://registry.test-region.aliyuncs.com",
+ })
+ assert.Nil(t, err)
+ assert.NotNil(t, adapter)
+
+ // test case for URL is cr service.
+ adapter, err = factory(&model.Registry{
+ Type: model.RegistryTypeAliAcr,
+ Credential: &model.Credential{
+ AccessKey: "MockAccessKey",
+ AccessSecret: "MockAccessSecret",
+ },
+ URL: "https://cr.test-region.aliyuncs.com",
+ })
+ assert.Nil(t, err)
+ assert.NotNil(t, adapter)
+
+}
+
+func getMockAdapter(t *testing.T, hasCred, health bool) (*adapter, *httptest.Server) {
+ server := test.NewServer(
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if health {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusBadRequest)
+ }
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodPost,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if buf, e := ioutil.ReadAll(&io.LimitedReader{R: r.Body, N: 80}); e == nil {
+ fmt.Println("\t", string(buf))
+ }
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ )
+
+ registry := &model.Registry{
+ Type: model.RegistryTypeAliAcr,
+ URL: server.URL,
+ }
+ if hasCred {
+ registry.Credential = &model.Credential{
+ AccessKey: "MockAccessKey",
+ AccessSecret: "MockAccessSecret",
+ }
+ }
+ nativeRegistry, err := native.NewAdapter(registry)
+ if err != nil {
+ panic(err)
+ }
+ return &adapter{
+ Adapter: nativeRegistry,
+ region: "test-region",
+ domain: server.URL,
+ registry: registry,
+ }, server
+}
+
+func TestAdapter_Info(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ info, err := a.Info()
+ assert.Nil(t, err)
+ assert.NotNil(t, info)
+
+ assert.EqualValues(t, 1, len(info.SupportedResourceTypes))
+ assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
+}
+
+func Test_getRegion(t *testing.T) {
+ tests := []struct {
+ name string
+ url string
+ wantRegion string
+ wantErr bool
+ }{
+ {"registry shanghai", "https://registry.cn-shanghai.aliyuncs.com", "cn-shanghai", false},
+ {"invalid registry shanghai", "http://registry.cn-shanghai.aliyuncs.com", "", true},
+ {"registry hangzhou", "https://registry.cn-hangzhou.aliyuncs.com", "cn-hangzhou", false},
+ {"cr shanghai", "https://cr.cn-shanghai.aliyuncs.com", "cn-shanghai", false},
+ {"cr hangzhou", "https://cr.cn-hangzhou.aliyuncs.com", "cn-hangzhou", false},
+ {"invalid cr url", "https://acr.cn-hangzhou.aliyuncs.com", "", true},
+ {"invalid registry url", "https://registry.cn-hangzhou.ali.com", "", true},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotRegion, err := getRegion(tt.url)
+ if tt.wantErr {
+ assert.NotNil(t, err)
+ }
+ assert.Equal(t, tt.wantRegion, gotRegion)
+ })
+ }
+}
+
+var urlForBenchmark = []string{
+ "https://cr.cn-hangzhou.aliyuncs.com",
+ "https://registry.cn-shanghai.aliyuncs.com",
+}
+
+func BenchmarkGetRegion(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, url := range urlForBenchmark {
+ getRegion(url)
+ }
+ }
+}
+
+func Test_adapter_FetchImages(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ var filters = []*model.Filter{}
+ var resources, err = a.FetchImages(filters)
+ assert.NotNil(t, err)
+ assert.Nil(t, resources)
+}
+func Test_aliyunAuthCredential_isCacheTokenValid(t *testing.T) {
+ type fields struct {
+ region string
+ accessKey string
+ secretKey string
+ cacheToken *registryTemporaryToken
+ cacheTokenExpiredAt time.Time
+ }
+
+ var nilTime time.Time
+ tests := []struct {
+ name string
+ fields fields
+ want bool
+ }{
+ {"nil cacheTokenExpiredAt", fields{"test-region", "MockAccessKey", "MockSecretKey", nil, nilTime}, false},
+ {"nil cacheToken", fields{"test-region", "MockAccessKey", "MockSecretKey", nil, time.Time{}}, false},
+ {"expired", fields{"test-region", "MockAccessKey", "MockSecretKey", ®istryTemporaryToken{}, time.Now().AddDate(0, 0, -1)}, false},
+ {"ok", fields{"test-region", "MockAccessKey", "MockSecretKey", ®istryTemporaryToken{}, time.Now().AddDate(0, 0, 1)}, true},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ a := &aliyunAuthCredential{
+ region: tt.fields.region,
+ accessKey: tt.fields.accessKey,
+ secretKey: tt.fields.secretKey,
+ cacheToken: tt.fields.cacheToken,
+ cacheTokenExpiredAt: tt.fields.cacheTokenExpiredAt,
+ }
+ if got := a.isCacheTokenValid(); got != tt.want {
+ assert.Equal(t, got, tt.want)
+ }
+ })
+ }
+}
diff --git a/src/replication/adapter/aliacr/auth.go b/src/replication/adapter/aliacr/auth.go
new file mode 100644
index 000000000..349d7d7ee
--- /dev/null
+++ b/src/replication/adapter/aliacr/auth.go
@@ -0,0 +1,84 @@
+package aliacr
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/services/cr"
+ "github.com/goharbor/harbor/src/common/http/modifier"
+ "github.com/goharbor/harbor/src/common/utils/log"
+)
+
+// Credential ...
+type Credential modifier.Modifier
+
+// Implements interface Credential
+type aliyunAuthCredential struct {
+ region string
+ accessKey string
+ secretKey string
+ cacheToken *registryTemporaryToken
+ cacheTokenExpiredAt time.Time
+}
+
+type registryTemporaryToken struct {
+ user string
+ password string
+}
+
+var _ Credential = &aliyunAuthCredential{}
+
+// NewAuth will get a temporary docker registry username and password via aliyun cr service API.
+func NewAuth(region, accessKey, secretKey string) Credential {
+ return &aliyunAuthCredential{
+ region: region,
+ accessKey: accessKey,
+ secretKey: secretKey,
+ cacheToken: ®istryTemporaryToken{},
+ }
+}
+
+func (a *aliyunAuthCredential) Modify(r *http.Request) (err error) {
+ if !a.isCacheTokenValid() {
+ log.Debugf("[aliyunAuthCredential.Modify.updateToken]Host: %s\n", r.Host)
+ var client *cr.Client
+ client, err = cr.NewClientWithAccessKey(a.region, a.accessKey, a.secretKey)
+ if err != nil {
+ return
+ }
+
+ var tokenRequest = cr.CreateGetAuthorizationTokenRequest()
+ var tokenResponse = cr.CreateGetAuthorizationTokenResponse()
+ tokenRequest.SetDomain(fmt.Sprintf(endpointTpl, a.region))
+ tokenResponse, err = client.GetAuthorizationToken(tokenRequest)
+ if err != nil {
+ return
+ }
+ var v authorizationToken
+ json.Unmarshal(tokenResponse.GetHttpContentBytes(), &v)
+ a.cacheTokenExpiredAt = v.Data.ExpireDate.ToTime()
+ a.cacheToken.user = v.Data.TempUserName
+ a.cacheToken.password = v.Data.AuthorizationToken
+ } else {
+ log.Debug("[aliyunAuthCredential] USE CACHE TOKEN!!!")
+ }
+
+ r.SetBasicAuth(a.cacheToken.user, a.cacheToken.password)
+ return
+}
+
+func (a *aliyunAuthCredential) isCacheTokenValid() bool {
+ if &a.cacheTokenExpiredAt == nil {
+ return false
+ }
+ if a.cacheToken == nil {
+ return false
+ }
+ if time.Now().After(a.cacheTokenExpiredAt) {
+ return false
+ }
+
+ return true
+}
diff --git a/src/replication/adapter/aliacr/types.go b/src/replication/adapter/aliacr/types.go
new file mode 100644
index 000000000..ecf8779fb
--- /dev/null
+++ b/src/replication/adapter/aliacr/types.go
@@ -0,0 +1,79 @@
+package aliacr
+
+import "time"
+
+const (
+ defaultTemporaryTokenExpiredTime = time.Hour * 1
+ registryEndpointTpl = "https://registry.%s.aliyuncs.com"
+ endpointTpl = "cr.%s.aliyuncs.com"
+)
+
+type authorizationToken struct {
+ Data struct {
+ ExpireDate timeUnix `json:"expireDate"`
+ AuthorizationToken string `json:"authorizationToken"`
+ TempUserName string `json:"tempUserName"`
+ } `json:"data"`
+ RequestID string `json:"requestId"`
+}
+
+type timeUnix int64
+
+func (t timeUnix) ToTime() time.Time {
+ return time.Unix(int64(t)/1000, 0)
+}
+
+func (t timeUnix) String() string {
+ return t.ToTime().String()
+}
+
+type aliRepoResp struct {
+ Data struct {
+ Page int `json:"page"`
+ Total int `json:"total"`
+ PageSize int `json:"pageSize"`
+ Repos []aliRepo `json:"repos"`
+ } `json:"data"`
+ RequestID string `json:"requestId"`
+}
+
+type aliRepo struct {
+ Summary string `json:"summary"`
+ RegionID string `json:"regionId"`
+ RepoName string `json:"repoName"`
+ RepoNamespace string `json:"repoNamespace"`
+ RepoStatus string `json:"repoStatus"`
+ RepoID int `json:"repoId"`
+ RepoType string `json:"repoType"`
+ RepoBuildType string `json:"repoBuildType"`
+ GmtCreate int64 `json:"gmtCreate"`
+ RepoOriginType string `json:"repoOriginType"`
+ GmtModified int64 `json:"gmtModified"`
+ RepoDomainList struct {
+ Internal string `json:"internal"`
+ Public string `json:"public"`
+ Vpc string `json:"vpc"`
+ } `json:"repoDomainList"`
+ Downloads int `json:"downloads"`
+ RepoAuthorizeType string `json:"repoAuthorizeType"`
+ Logo string `json:"logo"`
+ Stars int `json:"stars"`
+}
+
+type aliTagResp struct {
+ Data struct {
+ Total int `json:"total"`
+ PageSize int `json:"pageSize"`
+ Page int `json:"page"`
+ Tags []struct {
+ ImageUpdate int64 `json:"imageUpdate"`
+ ImageID string `json:"imageId"`
+ Digest string `json:"digest"`
+ ImageSize int `json:"imageSize"`
+ Tag string `json:"tag"`
+ ImageCreate int64 `json:"imageCreate"`
+ Status string `json:"status"`
+ } `json:"tags"`
+ } `json:"data"`
+ RequestID string `json:"requestId"`
+}
diff --git a/src/replication/adapter/awsecr/adapter.go b/src/replication/adapter/awsecr/adapter.go
new file mode 100644
index 000000000..69ee60b49
--- /dev/null
+++ b/src/replication/adapter/awsecr/adapter.go
@@ -0,0 +1,183 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package awsecr
+
+import (
+ "errors"
+ "net/http"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ awsecrapi "github.com/aws/aws-sdk-go/service/ecr"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/registry"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
+ "github.com/goharbor/harbor/src/replication/model"
+)
+
+const (
+ regionPattern = "https://(?:api|\\d+\\.dkr)\\.ecr\\.([\\w\\-]+)\\.amazonaws\\.com"
+)
+
+var (
+ regionRegexp = regexp.MustCompile(regionPattern)
+)
+
+func init() {
+ if err := adp.RegisterFactory(model.RegistryTypeAwsEcr, func(registry *model.Registry) (adp.Adapter, error) {
+ return newAdapter(registry)
+ }); err != nil {
+ log.Errorf("failed to register factory for %s: %v", model.RegistryTypeAwsEcr, err)
+ return
+ }
+ log.Infof("the factory for adapter %s registered", model.RegistryTypeAwsEcr)
+}
+
+func newAdapter(registry *model.Registry) (*adapter, error) {
+ region, err := parseRegion(registry.URL)
+ if err != nil {
+ return nil, err
+ }
+ authorizer := NewAuth(region, registry.Credential.AccessKey, registry.Credential.AccessSecret, registry.Insecure)
+ dockerRegistry, err := native.NewAdapterWithCustomizedAuthorizer(registry, authorizer)
+ if err != nil {
+ return nil, err
+ }
+ return &adapter{
+ registry: registry,
+ Adapter: dockerRegistry,
+ region: region,
+ }, nil
+}
+
+func parseRegion(url string) (string, error) {
+ rs := regionRegexp.FindStringSubmatch(url)
+ if rs == nil {
+ return "", errors.New("Bad aws url")
+ }
+ return rs[1], nil
+}
+
+type adapter struct {
+ *native.Adapter
+ registry *model.Registry
+ region string
+ forceEndpoint *string
+}
+
+func (*adapter) Info() (info *model.RegistryInfo, err error) {
+ return &model.RegistryInfo{
+ Type: model.RegistryTypeAwsEcr,
+ SupportedResourceTypes: []model.ResourceType{
+ model.ResourceTypeImage,
+ },
+ SupportedResourceFilters: []*model.FilterStyle{
+ {
+ Type: model.FilterTypeName,
+ Style: model.FilterStyleTypeText,
+ },
+ {
+ Type: model.FilterTypeTag,
+ Style: model.FilterStyleTypeText,
+ },
+ },
+ SupportedTriggers: []model.TriggerType{
+ model.TriggerTypeManual,
+ model.TriggerTypeScheduled,
+ },
+ }, nil
+}
+
+// HealthCheck checks health status of a registry
+func (a *adapter) HealthCheck() (model.HealthStatus, error) {
+ if a.registry.Credential == nil ||
+ len(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {
+ log.Errorf("no credential to ping registry %s", a.registry.URL)
+ return model.Unhealthy, nil
+ }
+ if err := a.PingGet(); err != nil {
+ log.Errorf("failed to ping registry %s: %v", a.registry.URL, err)
+ return model.Unhealthy, nil
+ }
+ return model.Healthy, nil
+}
+
+// PrepareForPush nothing need to do.
+func (a *adapter) PrepareForPush(resources []*model.Resource) error {
+ for _, resource := range resources {
+ if resource == nil {
+ return errors.New("the resource cannot be nil")
+ }
+ if resource.Metadata == nil {
+ return errors.New("the metadata of resource cannot be nil")
+ }
+ if resource.Metadata.Repository == nil {
+ return errors.New("the namespace of resource cannot be nil")
+ }
+ if len(resource.Metadata.Repository.Name) == 0 {
+ return errors.New("the name of the namespace cannot be nil")
+ }
+
+ err := a.createRepository(resource.Metadata.Repository.Name)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *adapter) createRepository(repository string) error {
+ if a.registry.Credential == nil ||
+ len(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {
+ return errors.New("no credential ")
+ }
+ cred := credentials.NewStaticCredentials(
+ a.registry.Credential.AccessKey,
+ a.registry.Credential.AccessSecret,
+ "")
+ if a.region == "" {
+ return errors.New("no region parsed")
+ }
+ config := &aws.Config{
+ Credentials: cred,
+ Region: &a.region,
+ HTTPClient: &http.Client{
+ Transport: registry.GetHTTPTransport(a.registry.Insecure),
+ },
+ }
+ if a.forceEndpoint != nil {
+ config.Endpoint = a.forceEndpoint
+ }
+ sess := session.Must(session.NewSession(config))
+
+ svc := awsecrapi.New(sess)
+
+ _, err := svc.CreateRepository(&awsecrapi.CreateRepositoryInput{
+ RepositoryName: &repository,
+ })
+ if err != nil {
+ if e, ok := err.(awserr.Error); ok {
+ if e.Code() == awsecrapi.ErrCodeRepositoryAlreadyExistsException {
+ return nil
+ }
+ }
+ return err
+ }
+ return nil
+}
diff --git a/src/replication/adapter/awsecr/adapter_test.go b/src/replication/adapter/awsecr/adapter_test.go
new file mode 100644
index 000000000..206e35673
--- /dev/null
+++ b/src/replication/adapter/awsecr/adapter_test.go
@@ -0,0 +1,285 @@
+package awsecr
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "regexp"
+ "testing"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/utils/test"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAdapter_NewAdapter(t *testing.T) {
+ factory, err := adp.GetFactory("BadName")
+ assert.Nil(t, factory)
+ assert.NotNil(t, err)
+
+ factory, err = adp.GetFactory(model.RegistryTypeAwsEcr)
+ assert.Nil(t, err)
+ assert.NotNil(t, factory)
+
+ adapter, err := factory(&model.Registry{
+ Type: model.RegistryTypeAwsEcr,
+ Credential: &model.Credential{
+ AccessKey: "xxx",
+ AccessSecret: "ppp",
+ },
+ URL: "https://api.ecr.test-region.amazonaws.com",
+ })
+ assert.Nil(t, err)
+ assert.NotNil(t, adapter)
+
+ adapter, err = factory(&model.Registry{
+ Type: model.RegistryTypeAwsEcr,
+ Credential: &model.Credential{
+ AccessKey: "xxx",
+ AccessSecret: "ppp",
+ },
+ URL: "https://123456.dkr.ecr.test-region.amazonaws.com",
+ })
+ assert.Nil(t, err)
+ assert.NotNil(t, adapter)
+
+ adapter, err = factory(&model.Registry{
+ Type: model.RegistryTypeAwsEcr,
+ Credential: &model.Credential{
+ AccessKey: "xxx",
+ AccessSecret: "ppp",
+ },
+ })
+ assert.Nil(t, adapter)
+ assert.NotNil(t, err)
+
+}
+
+func getMockAdapter(t *testing.T, hasCred, health bool) (*adapter, *httptest.Server) {
+ server := test.NewServer(
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/_catalog",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`
+ {
+ "repositories": [
+ "test1"
+ ]
+ }`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/{repo}/tags/list",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`
+ {
+ "name": "test1",
+ "tags": [
+ "latest"
+ ]
+ }`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if health {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusBadRequest)
+ }
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodPost,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if buf, e := ioutil.ReadAll(&io.LimitedReader{R: r.Body, N: 80}); e == nil {
+ fmt.Println("\t", string(buf))
+ }
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ )
+
+ registry := &model.Registry{
+ Type: model.RegistryTypeAwsEcr,
+ URL: server.URL,
+ }
+ if hasCred {
+ registry.Credential = &model.Credential{
+ AccessKey: "xxx",
+ AccessSecret: "ppp",
+ }
+ }
+ dockerRegistryAdapter, err := native.NewAdapter(registry)
+ if err != nil {
+ panic(err)
+ }
+ return &adapter{
+ registry: registry,
+ Adapter: dockerRegistryAdapter,
+ region: "test-region",
+ forceEndpoint: &server.URL,
+ }, server
+}
+
+func TestAdapter_Info(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ info, err := a.Info()
+ assert.Nil(t, err)
+ assert.NotNil(t, info)
+
+ assert.EqualValues(t, 1, len(info.SupportedResourceTypes))
+ assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
+}
+
+func TestAdapter_HealthCheck(t *testing.T) {
+ a, s := getMockAdapter(t, false, true)
+ defer s.Close()
+ status, err := a.HealthCheck()
+ assert.Nil(t, err)
+ assert.NotNil(t, status)
+ assert.EqualValues(t, model.Unhealthy, status)
+
+ a, s = getMockAdapter(t, true, false)
+ defer s.Close()
+ status, err = a.HealthCheck()
+ assert.Nil(t, err)
+ assert.NotNil(t, status)
+ assert.EqualValues(t, model.Unhealthy, status)
+
+ a, s = getMockAdapter(t, true, true)
+ defer s.Close()
+ status, err = a.HealthCheck()
+ assert.Nil(t, err)
+ assert.NotNil(t, status)
+ assert.EqualValues(t, model.Healthy, status)
+}
+
+func TestAdapter_PrepareForPush(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ resources := []*model.Resource{
+ {
+ Type: model.ResourceTypeImage,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: "busybox",
+ },
+ },
+ },
+ }
+
+ err := a.PrepareForPush(resources)
+ assert.Nil(t, err)
+}
+
+func TestAdapter_FetchImages(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ resources, err := a.FetchImages([]*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "*",
+ },
+ {
+ Type: model.FilterTypeTag,
+ Value: "*",
+ },
+ })
+ assert.Nil(t, err)
+ assert.NotNil(t, resources)
+ assert.Equal(t, 1, len(resources))
+}
+
+func TestAwsAuthCredential_Modify(t *testing.T) {
+ et := time.Now().Add(time.Second).Unix()
+ server := test.NewServer(
+ &test.RequestHandlerMapping{
+ Method: http.MethodPost,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if buf, e := ioutil.ReadAll(&io.LimitedReader{R: r.Body, N: 80}); e == nil {
+ fmt.Println("\t", string(buf))
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(fmt.Sprintf(`
+{
+ "authorizationData" : [
+ {
+ "expiresAt" : %d,
+ "proxyEndpoint" : "https://12345.dkr.ecr.ap-northeast-1.amazonaws.com",
+ "authorizationToken" : "QVdTOmV5SndZWGxzYjJGa0lqb2llRlJNTkdSbWMyZE5RM0pXYWtoVWRrdzRNVVJIT1d0NFNXRlJjVEpET0c5cVZVUlFWRUkxUVVoS1ZDOVJjbXBIV0d4RlN6ZFZlR0UxTnk5TVdVSXdSU3RyVlRBNVRrSnVXbmhoUVdKaFlVZzFOV3d3YzJ4RVNIcHdZVWRZWTA4dmVGbHFjakphV1VKaE1YUlVkMU5JV2xWU1UxbzNSaTlKTHpaMFlXaFVPV1pXTldoelRXcFZiQ3R1SzBndlptdEtWMmMxYW5wclJrTXpkRXgzWkd4MFdWaE1PREZzV1dGWGQzVjJkbG94YkZKbFVrRnBhbVZYU1cxRksyRk9WM3AzVm1jM1N6aFNTMmhvYzBkRlFXNXRRbEJ4WkRGTVNuRkpjR2hUTldaNmJrazFNWEpPWmtwNU1WUnRMMVZNVFZZMVNYVkJjV1ZHYzA5MFUycEhkRTlFWVhsdGNrVlFXamhYUTBkR05YRklTWFE0UmtSTWNGQllPWFZoYW14NmNrbENkamROVkRsVk1UWlpkVlJpZDFaSWJYRjZPRGQ0VDNKdGVIaFRSR0Z2TDNCVU5qUXhja2w1YkhwUFJHUmpUMEpWWVdGUmRsWnpjak5TUzFaaFpEUmhaVkJ3ZUNzMGVYa3dhR2ROZW5sd1RVWkdRMkV3ZEVveU5HeEVNVVpUTWtkVWFXRlhMemMyVlRoTE9WWndNMEZ6WjFWaU4zbHZZbmhaY2tNNGFqVTRiMlJ4WlVWV01GbEtMekJTWWxSU1FYTjJiM1JEV1VzcllrSlJMM3BOUldjelN6UnpNa0lySzBGclIyUTJNM2MyZEd0VUx6VnBVWFZYY1UxdGJXcGxZMVF2TlRGc04wRm9UMFJzYlRKME5rbzRUamhvTXpkbWJVOWlMMkpXWTA1a09GWTVTM0ptWmtGQk1HTllSVTE1UWk5T1RIcHlaMEpwWWtOUmFFdHNiSEJTYlU5YVNHOHpWemxpV1haTmMzcHJVM0Z0YkU5clpUQmxjbU5GVTAxS1oycHRNV1Z5TDBKMlJHbGxVVEJDTWpSWVpHRjNLMDlGWkUxeVExTlRORll2ZDFFM1dWUXJRVFY0SzJScWNHWmhiREI1UkN0YVltOXdUelF3YlZBNFpXSlpLMUkxYld4VGRDOU9NblpxVDA1clMxbE9aemh3WVUxbVVFVjVja3BXT0ZSME4zUlVPR2MzV0ZaS1RVOVJOSEpqUlVaV09HbHBSVE5LVFRGc1RqRXljSGxOVFVGbk5sbGtNM3A0UW1OWFZrWkhRM1ZuVEhZNU1DOVhRVVJtTlV4TWFHbHFXRTh6YzNFd1dVaGtWRUkzUjBObVdtaGlla3huWjJsT2FVRXZXa2tyU1hSWlZUQnFUR2xxVGxoV1ZEWmFiRGcwZVdzM2IzWTVOVnBhZVUxUVFteHdjVkJLYkVsbU0yWkdWamc0UXpJdmVtTnNSeXQxYW1kb1VYVkxkM0U1ZEROdlVGZ3piaXREUm5oMVFqTnVTREZDVURGVlVVbEtVbGx5UlRaRUt6TkJPRWM1UVVabVVIRkNkMVZUWmpCSE5qbHlhM056YlhKdU1XMTVUa3RWZFZCemRETkplREpaY214ckswZFBOazB4ZG5GU2JsSXJUVTFUWmxSMmFtRlFOMXBEVW5CQmVFWTJZeXN5VlZKWVJrdElObEkyVDNCcGJFSktRV3N4UkhBMGRFNVBiVzAyYzJsalRFWjBjek0zTm1OUGNWWTRUMjkwVldZeGVrRjVZVGgxTlc5VWRGUkRUemcxZVVKVWFXNXNkMVZ1WmpZcmNtOHljVXRoVUZGWFdVZzNhamhWWkVaS05EUXdMMHRzVEdwNVlXSmlia1ZJTjNsRVpGRnhXRnBQTkVNeFptRlNZeTgyYUVwdEsxQXZXSEJETXpaSE0zTk9iWGgySzJKQlJHUXlUakZVYm1JMVJFZElZVVJTY2tsYU1uWkNiMHBRUW5GYVUwbGhRazV4YkZWUldWQjNUVEpEVjJzdlVVRTRlRVJDTjNsRlIwTnFSWFJuUFQwaUxDSmtZWFJoYTJWNUlqb2lRVkZGUWtGSVowRk5aa3RFYkVsdmNFTTJlbk13WWsxa1VuSlpVMGhoTDBNek9XdERjbU5RT0d0V2NISkZPV1lyYTFGQlFVRklOSGRtUVZsS1MyOWFTV2gyWTA1QlVXTkhiMGM0ZDJKUlNVSkJSRUp2UW1kcmNXaHJhVWM1ZHpCQ1FuZEZkMGhuV1VwWlNWcEpRVmRWUkVKQlJYVk5Ra1ZGUkVVNVpESnBPVVJNVUZrek5Ga3JTMmRCU1VKRlNVRTNNVnA2T1c1eVQzSjNVMnBhUW1Wc1YyOTBNRUpwY0VwbVoyTkhhbU5FU3k5WVEwY3JNSGxvTDFFNVpuZ3pVemc0WjFVMFQxQkVabVpVV1d4UFRUQTVPSGhvUjJWWmJscEZRV3hyZUN0ek1EMGlMQ0oyWlhKemFXOXVJam9pTWlJc0luUjVjR1VpT2lKRVFWUkJYMHRGV1NJc0ltVjRjR2x5WVhScGIyNGlPakUxTmpFME9EVXpNemg5"
+ }
+ ]
+}
+`, et)))
+ },
+ },
+ )
+ defer server.Close()
+ a, _ := NewAuth("test-region", "xxx", "ppp", true).(*awsAuthCredential)
+ a.forceEndpoint = &server.URL
+ req := httptest.NewRequest(http.MethodGet, "https://1234.dkr.ecr.test-region.amazonaws.com/v2/", nil)
+ err := a.Modify(req)
+ assert.Nil(t, err)
+ err = a.Modify(req)
+ assert.Nil(t, err)
+ time.Sleep(time.Second)
+ err = a.Modify(req)
+ assert.Nil(t, err)
+}
+
+var urlForBenchmark = []string{
+ "https://1234.dkr.ecr.test-region.amazonaws.com/v2/",
+ "https://api.ecr.test-region.amazonaws.com",
+ "https://test-region.amazonaws.com",
+}
+
+func compileRegexpEveryTime(url string) (string, error) {
+ rs := regexp.MustCompile(regionPattern).FindStringSubmatch(url)
+ if rs == nil {
+ return "", errors.New("Bad aws url")
+ }
+ return rs[1], nil
+}
+
+func BenchmarkGetRegion(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, url := range urlForBenchmark {
+ parseRegion(url)
+ }
+ }
+}
+
+func BenchmarkCompileRegexpEveryTime(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ for _, url := range urlForBenchmark {
+ compileRegexpEveryTime(url)
+ }
+ }
+}
diff --git a/src/replication/adapter/awsecr/auth.go b/src/replication/adapter/awsecr/auth.go
new file mode 100644
index 000000000..998d14d04
--- /dev/null
+++ b/src/replication/adapter/awsecr/auth.go
@@ -0,0 +1,163 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package awsecr
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ awsecrapi "github.com/aws/aws-sdk-go/service/ecr"
+ "github.com/goharbor/harbor/src/common/http/modifier"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/registry"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+)
+
+// Credential ...
+type Credential modifier.Modifier
+
+// Implements interface Credential
+type awsAuthCredential struct {
+ region string
+ accessKey string
+ accessSecret string
+ insecure bool
+ forceEndpoint *string
+
+ cacheToken *cacheToken
+ cacheExpired *time.Time
+}
+
+type cacheToken struct {
+ endpoint string
+ user string
+ password string
+ host string
+}
+
+// DefaultCacheExpiredTime is expired timeout for aws auth token
+const DefaultCacheExpiredTime = time.Hour * 1
+
+func (a *awsAuthCredential) Modify(req *http.Request) error {
+ // url maybe redirect to s3
+ if !strings.Contains(req.URL.Host, ".ecr.") {
+ return nil
+ }
+ if !a.isTokenValid() {
+ endpoint, user, pass, expiresAt, err := a.getAuthorization()
+
+ if err != nil {
+ return err
+ }
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return err
+ }
+ a.cacheToken = &cacheToken{}
+ a.cacheToken.host = u.Host
+ a.cacheToken.user = user
+ a.cacheToken.password = pass
+ a.cacheToken.endpoint = endpoint
+ t := time.Now().Add(DefaultCacheExpiredTime)
+ if t.Before(*expiresAt) {
+ a.cacheExpired = &t
+ } else {
+ a.cacheExpired = expiresAt
+ }
+ }
+ req.Host = a.cacheToken.host
+ req.URL.Host = a.cacheToken.host
+ req.SetBasicAuth(a.cacheToken.user, a.cacheToken.password)
+ return nil
+}
+
+func (a *awsAuthCredential) getAuthorization() (string, string, string, *time.Time, error) {
+ log.Infof("Aws Ecr getAuthorization %s", a.accessKey)
+ cred := credentials.NewStaticCredentials(
+ a.accessKey,
+ a.accessSecret,
+ "")
+ config := &aws.Config{
+ Credentials: cred,
+ Region: &a.region,
+ HTTPClient: &http.Client{
+ Transport: registry.GetHTTPTransport(a.insecure),
+ },
+ }
+ if a.forceEndpoint != nil {
+ config.Endpoint = a.forceEndpoint
+ }
+ sess, err := session.NewSession(config)
+ if err != nil {
+ return "", "", "", nil, err
+ }
+
+ svc := awsecrapi.New(sess)
+
+ result, err := svc.GetAuthorizationToken(nil)
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return "", "", "", nil, fmt.Errorf("%s: %s", aerr.Code(), aerr.Error())
+ }
+
+ return "", "", "", nil, err
+ }
+
+ // Double check
+ if len(result.AuthorizationData) == 0 {
+ return "", "", "", nil, errors.New("no authorization token returned")
+ }
+
+ theOne := result.AuthorizationData[0]
+ expiresAt := theOne.ExpiresAt
+ payload, _ := base64.StdEncoding.DecodeString(*theOne.AuthorizationToken)
+ pair := strings.SplitN(string(payload), ":", 2)
+
+ log.Debugf("Aws Ecr getAuthorization %s result: %d %s...", a.accessKey, len(pair[1]), pair[1][:25])
+
+ return *(theOne.ProxyEndpoint), pair[0], pair[1], expiresAt, nil
+}
+
+func (a *awsAuthCredential) isTokenValid() bool {
+ if a.cacheToken == nil {
+ return false
+ }
+ if a.cacheExpired == nil {
+ return false
+ }
+ if time.Now().After(*a.cacheExpired) {
+ a.cacheExpired = nil
+ a.cacheToken = nil
+ return false
+ }
+ return true
+}
+
+// NewAuth new aws auth
+func NewAuth(region, accessKey, accessSecret string, insecure bool) Credential {
+ return &awsAuthCredential{
+ region: region,
+ accessKey: accessKey,
+ accessSecret: accessSecret,
+ insecure: insecure,
+ }
+}
diff --git a/src/replication/adapter/azurecr/adapter.go b/src/replication/adapter/azurecr/adapter.go
new file mode 100644
index 000000000..5dc89d56b
--- /dev/null
+++ b/src/replication/adapter/azurecr/adapter.go
@@ -0,0 +1,57 @@
+package azurecr
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
+ "github.com/goharbor/harbor/src/replication/model"
+)
+
+func init() {
+ if err := adp.RegisterFactory(model.RegistryTypeAzureAcr, factory); err != nil {
+ log.Errorf("Register adapter factory for %s error: %v", model.RegistryTypeAzureAcr, err)
+ return
+ }
+ log.Infof("Factory for adapter %s registered", model.RegistryTypeAzureAcr)
+}
+
+func factory(registry *model.Registry) (adp.Adapter, error) {
+ dockerRegistryAdapter, err := native.NewAdapter(registry)
+ if err != nil {
+ return nil, err
+ }
+ return &adapter{
+ Adapter: dockerRegistryAdapter,
+ }, nil
+}
+
+type adapter struct {
+ *native.Adapter
+}
+
+// Ensure '*adapter' implements interface 'Adapter'.
+var _ adp.Adapter = (*adapter)(nil)
+
+// Info returns information of the registry
+func (a *adapter) Info() (*model.RegistryInfo, error) {
+ return &model.RegistryInfo{
+ Type: model.RegistryTypeAzureAcr,
+ SupportedResourceTypes: []model.ResourceType{
+ model.ResourceTypeImage,
+ },
+ SupportedResourceFilters: []*model.FilterStyle{
+ {
+ Type: model.FilterTypeName,
+ Style: model.FilterStyleTypeText,
+ },
+ {
+ Type: model.FilterTypeTag,
+ Style: model.FilterStyleTypeText,
+ },
+ },
+ SupportedTriggers: []model.TriggerType{
+ model.TriggerTypeManual,
+ model.TriggerTypeScheduled,
+ },
+ }, nil
+}
diff --git a/src/replication/adapter/azurecr/adapter_test.go b/src/replication/adapter/azurecr/adapter_test.go
new file mode 100644
index 000000000..6c1bfb365
--- /dev/null
+++ b/src/replication/adapter/azurecr/adapter_test.go
@@ -0,0 +1,17 @@
+package azurecr
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestInfo(t *testing.T) {
+ a := &adapter{}
+ info, err := a.Info()
+ assert.Nil(t, err)
+ assert.NotNil(t, info)
+ assert.EqualValues(t, 1, len(info.SupportedResourceTypes))
+ assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
+}
diff --git a/src/replication/adapter/dockerhub/adapter.go b/src/replication/adapter/dockerhub/adapter.go
index 374ebf8c4..3cfb692eb 100644
--- a/src/replication/adapter/dockerhub/adapter.go
+++ b/src/replication/adapter/dockerhub/adapter.go
@@ -9,9 +9,10 @@ import (
"net/http"
"strings"
+ "github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
- "github.com/goharbor/harbor/src/common/utils/registry/auth"
adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
"github.com/goharbor/harbor/src/replication/model"
"github.com/goharbor/harbor/src/replication/util"
)
@@ -25,8 +26,13 @@ func init() {
}
func factory(registry *model.Registry) (adp.Adapter, error) {
- client, err := NewClient(&model.Registry{
- URL: baseURL, // specify the URL of Docker Hub
+ client, err := NewClient(registry)
+ if err != nil {
+ return nil, err
+ }
+
+ dockerRegistryAdapter, err := native.NewAdapter(&model.Registry{
+ URL: registryURL,
Credential: registry.Credential,
Insecure: registry.Insecure,
})
@@ -34,38 +40,15 @@ func factory(registry *model.Registry) (adp.Adapter, error) {
return nil, err
}
- // if the registry.Credentail isn't specified, the credential here is nil
- // the client will request the token with no authentication
- // this is needed for pulling images from public repositories
- var credential auth.Credential
- if registry.Credential != nil && len(registry.Credential.AccessSecret) != 0 {
- credential = auth.NewBasicAuthCredential(
- registry.Credential.AccessKey,
- registry.Credential.AccessSecret)
- }
- authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
- Transport: util.GetHTTPTransport(registry.Insecure),
- }, credential)
-
- reg, err := adp.NewDefaultImageRegistryWithCustomizedAuthorizer(&model.Registry{
- Name: registry.Name,
- URL: registryURL, // specify the URL of Docker Hub registry service
- Credential: registry.Credential,
- Insecure: registry.Insecure,
- }, authorizer)
- if err != nil {
- return nil, err
- }
-
return &adapter{
- client: client,
- registry: registry,
- DefaultImageRegistry: reg,
+ client: client,
+ registry: registry,
+ Adapter: dockerRegistryAdapter,
}, nil
}
type adapter struct {
- *adp.DefaultImageRegistry
+ *native.Adapter
registry *model.Registry
client *Client
}
@@ -264,66 +247,84 @@ func (a *adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error
log.Debugf("got %d repositories for namespace %s", n, ns)
}
- var resources []*model.Resource
- // TODO(ChenDe): Get tags for repos in parallel
- for _, repo := range repos {
- name := fmt.Sprintf("%s/%s", repo.Namespace, repo.Name)
- // If name filter set, skip repos that don't match the filter pattern.
- if len(nameFilter) != 0 {
- m, err := util.Match(nameFilter, name)
- if err != nil {
- return nil, fmt.Errorf("match repo name '%s' against pattern '%s' error: %v", name, nameFilter, err)
- }
- if !m {
- continue
- }
- }
+ var rawResources = make([]*model.Resource, len(repos))
+ runner := utils.NewLimitedConcurrentRunner(adp.MaxConcurrency)
+ defer runner.Cancel()
+ for i, r := range repos {
+ index := i
+ repo := r
+ runner.AddTask(func() error {
+ name := fmt.Sprintf("%s/%s", repo.Namespace, repo.Name)
+ log.Infof("Routine started to collect tags for repo: %s", name)
- var tags []string
- page := 1
- pageSize := 100
- for {
- pageTags, err := a.getTags(repo.Namespace, repo.Name, page, pageSize)
- if err != nil {
- return nil, fmt.Errorf("get tags for repo '%s/%s' from DockerHub error: %v", repo.Namespace, repo.Name, err)
- }
- for _, t := range pageTags.Tags {
- // If tag filter set, skip tags that don't match the filter pattern.
- if len(tagFilter) != 0 {
- m, err := util.Match(tagFilter, t.Name)
- if err != nil {
- return nil, fmt.Errorf("match tag name '%s' against pattern '%s' error: %v", t.Name, tagFilter, err)
- }
-
- if !m {
- continue
- }
+ // If name filter set, skip repos that don't match the filter pattern.
+ if len(nameFilter) != 0 {
+ m, err := util.Match(nameFilter, name)
+ if err != nil {
+ return fmt.Errorf("match repo name '%s' against pattern '%s' error: %v", name, nameFilter, err)
+ }
+ if !m {
+ return nil
}
- tags = append(tags, t.Name)
}
- if len(pageTags.Next) == 0 {
- break
+ var tags []string
+ page := 1
+ pageSize := 100
+ for {
+ pageTags, err := a.getTags(repo.Namespace, repo.Name, page, pageSize)
+ if err != nil {
+ return fmt.Errorf("get tags for repo '%s/%s' from DockerHub error: %v", repo.Namespace, repo.Name, err)
+ }
+ for _, t := range pageTags.Tags {
+ // If tag filter set, skip tags that don't match the filter pattern.
+ if len(tagFilter) != 0 {
+ m, err := util.Match(tagFilter, t.Name)
+ if err != nil {
+ return fmt.Errorf("match tag name '%s' against pattern '%s' error: %v", t.Name, tagFilter, err)
+ }
+
+ if !m {
+ continue
+ }
+ }
+ tags = append(tags, t.Name)
+ }
+
+ if len(pageTags.Next) == 0 {
+ break
+ }
+ page++
}
- page++
- }
- // If the repo has no tags, skip it
- if len(tags) == 0 {
- continue
- }
+ if len(tags) > 0 {
+ rawResources[index] = &model.Resource{
+ Type: model.ResourceTypeImage,
+ Registry: a.registry,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: name,
+ },
+ Vtags: tags,
+ },
+ }
+ }
- resources = append(resources, &model.Resource{
- Type: model.ResourceTypeImage,
- Registry: a.registry,
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{
- Name: name,
- },
- Vtags: tags,
- },
+ return nil
})
}
+ runner.Wait()
+
+ if runner.IsCancelled() {
+ return nil, fmt.Errorf("FetchImages error when collect tags for repos")
+ }
+
+ var resources []*model.Resource
+ for _, r := range rawResources {
+ if r != nil {
+ resources = append(resources, r)
+ }
+ }
return resources, nil
}
diff --git a/src/replication/adapter/dockerhub/adapter_test.go b/src/replication/adapter/dockerhub/adapter_test.go
index 7d73da49d..9cf9ed650 100644
--- a/src/replication/adapter/dockerhub/adapter_test.go
+++ b/src/replication/adapter/dockerhub/adapter_test.go
@@ -4,11 +4,10 @@ import (
"fmt"
"testing"
- "github.com/stretchr/testify/require"
-
adp "github.com/goharbor/harbor/src/replication/adapter"
"github.com/goharbor/harbor/src/replication/model"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
const (
@@ -24,6 +23,7 @@ func getAdapter(t *testing.T) adp.Adapter {
adapter, err := factory(&model.Registry{
Type: model.RegistryTypeDockerHub,
+ URL: baseURL,
Credential: &model.Credential{
AccessKey: testUser,
AccessSecret: testPassword,
diff --git a/src/replication/adapter/dockerhub/client.go b/src/replication/adapter/dockerhub/client.go
index 1db72eca3..7e95d174c 100644
--- a/src/replication/adapter/dockerhub/client.go
+++ b/src/replication/adapter/dockerhub/client.go
@@ -26,7 +26,7 @@ func NewClient(registry *model.Registry) (*Client, error) {
client := &Client{
host: registry.URL,
client: &http.Client{
- Transport: util.GetHTTPTransport(false),
+ Transport: util.GetHTTPTransport(registry.Insecure),
},
}
diff --git a/src/replication/adapter/googlegcr/adapter.go b/src/replication/adapter/googlegcr/adapter.go
new file mode 100644
index 000000000..4dd47d834
--- /dev/null
+++ b/src/replication/adapter/googlegcr/adapter.go
@@ -0,0 +1,89 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package googlegcr
+
+import (
+ "github.com/goharbor/harbor/src/common/utils/log"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
+ "github.com/goharbor/harbor/src/replication/model"
+)
+
+func init() {
+ if err := adp.RegisterFactory(model.RegistryTypeGoogleGcr, func(registry *model.Registry) (adp.Adapter, error) {
+ return newAdapter(registry)
+ }); err != nil {
+ log.Errorf("failed to register factory for %s: %v", model.RegistryTypeGoogleGcr, err)
+ return
+ }
+ log.Infof("the factory for adapter %s registered", model.RegistryTypeGoogleGcr)
+}
+
+func newAdapter(registry *model.Registry) (*adapter, error) {
+ dockerRegistryAdapter, err := native.NewAdapter(registry)
+ if err != nil {
+ return nil, err
+ }
+
+ return &adapter{
+ registry: registry,
+ Adapter: dockerRegistryAdapter,
+ }, nil
+}
+
+type adapter struct {
+ *native.Adapter
+ registry *model.Registry
+}
+
+var _ adp.Adapter = adapter{}
+
+func (adapter) Info() (info *model.RegistryInfo, err error) {
+ return &model.RegistryInfo{
+ Type: model.RegistryTypeGoogleGcr,
+ SupportedResourceTypes: []model.ResourceType{
+ model.ResourceTypeImage,
+ },
+ SupportedResourceFilters: []*model.FilterStyle{
+ {
+ Type: model.FilterTypeName,
+ Style: model.FilterStyleTypeText,
+ },
+ {
+ Type: model.FilterTypeTag,
+ Style: model.FilterStyleTypeText,
+ },
+ },
+ SupportedTriggers: []model.TriggerType{
+ model.TriggerTypeManual,
+ model.TriggerTypeScheduled,
+ },
+ }, nil
+}
+
+// HealthCheck checks health status of a registry
+func (a adapter) HealthCheck() (model.HealthStatus, error) {
+ var err error
+ if a.registry.Credential == nil ||
+ len(a.registry.Credential.AccessKey) == 0 || len(a.registry.Credential.AccessSecret) == 0 {
+ log.Errorf("no credential to ping registry %s", a.registry.URL)
+ return model.Unhealthy, nil
+ }
+ if err = a.PingGet(); err != nil {
+ log.Errorf("failed to ping registry %s: %v", a.registry.URL, err)
+ return model.Unhealthy, nil
+ }
+ return model.Healthy, nil
+}
diff --git a/src/replication/adapter/googlegcr/adapter_test.go b/src/replication/adapter/googlegcr/adapter_test.go
new file mode 100644
index 000000000..a2acefabd
--- /dev/null
+++ b/src/replication/adapter/googlegcr/adapter_test.go
@@ -0,0 +1,161 @@
+package googlegcr
+
+import (
+ "fmt"
+ "github.com/goharbor/harbor/src/common/utils/test"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/stretchr/testify/assert"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func getMockAdapter(t *testing.T, hasCred, health bool) (*adapter, *httptest.Server) {
+ server := test.NewServer(
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/_catalog",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`
+ {
+ "repositories": [
+ "test1"
+ ]
+ }`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/{repo}/tags/list",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ w.Write([]byte(`
+ {
+ "name": "test1",
+ "tags": [
+ "latest"
+ ]
+ }`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if health {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusBadRequest)
+ }
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodPost,
+ Pattern: "/",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ fmt.Println(r.Method, r.URL)
+ if buf, e := ioutil.ReadAll(&io.LimitedReader{R: r.Body, N: 80}); e == nil {
+ fmt.Println("\t", string(buf))
+ }
+ w.WriteHeader(http.StatusOK)
+ },
+ },
+ )
+ registry := &model.Registry{
+ Type: model.RegistryTypeGoogleGcr,
+ URL: server.URL,
+ }
+ if hasCred {
+ registry.Credential = &model.Credential{
+ AccessKey: "_json_key",
+ AccessSecret: "ppp",
+ }
+ }
+
+ factory, err := adp.GetFactory(model.RegistryTypeGoogleGcr)
+ assert.Nil(t, err)
+ assert.NotNil(t, factory)
+ a, err := factory(registry)
+
+ assert.Nil(t, err)
+ return a.(*adapter), server
+}
+
+func TestAdapter_Info(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ info, err := a.Info()
+ assert.Nil(t, err)
+ assert.NotNil(t, info)
+ assert.EqualValues(t, 1, len(info.SupportedResourceTypes))
+ assert.EqualValues(t, model.ResourceTypeImage, info.SupportedResourceTypes[0])
+}
+
+func TestAdapter_HealthCheck(t *testing.T) {
+ a, s := getMockAdapter(t, false, true)
+ defer s.Close()
+ status, err := a.HealthCheck()
+ assert.Nil(t, err)
+ assert.NotNil(t, status)
+ assert.EqualValues(t, model.Unhealthy, status)
+ a, s = getMockAdapter(t, true, false)
+ defer s.Close()
+ status, err = a.HealthCheck()
+ assert.Nil(t, err)
+ assert.NotNil(t, status)
+ assert.EqualValues(t, model.Unhealthy, status)
+ a, s = getMockAdapter(t, true, true)
+ defer s.Close()
+ status, err = a.HealthCheck()
+ assert.Nil(t, err)
+ assert.NotNil(t, status)
+ assert.EqualValues(t, model.Healthy, status)
+}
+
+func TestAdapter_PrepareForPush(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ resources := []*model.Resource{
+ {
+ Type: model.ResourceTypeImage,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: "busybox",
+ },
+ },
+ },
+ }
+ err := a.PrepareForPush(resources)
+ assert.Nil(t, err)
+}
+
+func TestAdapter_FetchImages(t *testing.T) {
+ a, s := getMockAdapter(t, true, true)
+ defer s.Close()
+ resources, err := a.FetchImages([]*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "*",
+ },
+ {
+ Type: model.FilterTypeTag,
+ Value: "*",
+ },
+ })
+ assert.Nil(t, err)
+ assert.NotNil(t, resources)
+ assert.Equal(t, 1, len(resources))
+}
diff --git a/src/replication/adapter/harbor/adapter.go b/src/replication/adapter/harbor/adapter.go
index 25fc8e5b5..4c8a26597 100644
--- a/src/replication/adapter/harbor/adapter.go
+++ b/src/replication/adapter/harbor/adapter.go
@@ -27,6 +27,7 @@ import (
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/common/utils/registry/auth"
adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
"github.com/goharbor/harbor/src/replication/model"
"github.com/goharbor/harbor/src/replication/util"
)
@@ -42,7 +43,7 @@ func init() {
}
type adapter struct {
- *adp.DefaultImageRegistry
+ *native.Adapter
registry *model.Registry
url string
client *common_http.Client
@@ -67,7 +68,7 @@ func newAdapter(registry *model.Registry) (*adapter, error) {
modifiers = append(modifiers, authorizer)
}
- reg, err := adp.NewDefaultImageRegistry(registry)
+ dockerRegistryAdapter, err := native.NewAdapter(registry)
if err != nil {
return nil, err
}
@@ -78,7 +79,7 @@ func newAdapter(registry *model.Registry) (*adapter, error) {
&http.Client{
Transport: transport,
}, modifiers...),
- DefaultImageRegistry: reg,
+ Adapter: dockerRegistryAdapter,
}, nil
}
@@ -97,11 +98,6 @@ func (a *adapter) Info() (*model.RegistryInfo, error) {
Type: model.FilterTypeTag,
Style: model.FilterStyleTypeText,
},
- // TODO add support for label filter
- // {
- // Type: model.FilterTypeLabel,
- // Style: model.FilterStyleTypeText,
- // },
},
SupportedTriggers: []model.TriggerType{
model.TriggerTypeManual,
@@ -118,6 +114,26 @@ func (a *adapter) Info() (*model.RegistryInfo, error) {
if sys.ChartRegistryEnabled {
info.SupportedResourceTypes = append(info.SupportedResourceTypes, model.ResourceTypeChart)
}
+ labels := []*struct {
+ Name string `json:"name"`
+ }{}
+ // label isn't supported in some previous version of Harbor
+ if err := a.client.Get(a.getURL()+"/api/labels?scope=g", &labels); err != nil {
+ if e, ok := err.(*common_http.Error); !ok || e.Code != http.StatusNotFound {
+ return nil, err
+ }
+ } else {
+ ls := []string{}
+ for _, label := range labels {
+ ls = append(ls, label.Name)
+ }
+ labelFilter := &model.FilterStyle{
+ Type: model.FilterTypeLabel,
+ Style: model.FilterStyleTypeList,
+ Values: ls,
+ }
+ info.SupportedResourceFilters = append(info.SupportedResourceFilters, labelFilter)
+ }
return info, nil
}
@@ -140,7 +156,7 @@ func (a *adapter) PrepareForPush(resources []*model.Resource) error {
paths := strings.Split(resource.Metadata.Repository.Name, "/")
projectName := paths[0]
// handle the public properties
- metadata := resource.Metadata.Repository.Metadata
+ metadata := abstractPublicMetadata(resource.Metadata.Repository.Metadata)
pro, exist := projects[projectName]
if exist {
metadata = mergeMetadata(pro.Metadata, metadata)
@@ -171,6 +187,19 @@ func (a *adapter) PrepareForPush(resources []*model.Resource) error {
return nil
}
+func abstractPublicMetadata(metadata map[string]interface{}) map[string]interface{} {
+ if metadata == nil {
+ return nil
+ }
+ public, exist := metadata["public"]
+ if !exist {
+ return nil
+ }
+ return map[string]interface{}{
+ "public": public,
+ }
+}
+
// currently, mergeMetadata only handles the public metadata
func mergeMetadata(metadata1, metadata2 map[string]interface{}) map[string]interface{} {
public := parsePublic(metadata1) && parsePublic(metadata2)
@@ -244,12 +273,15 @@ func (a *adapter) getProject(name string) (*project, error) {
return nil, nil
}
-func (a *adapter) getRepositories(projectID int64) ([]*repository, error) {
- repositories := []*repository{}
+func (a *adapter) getRepositories(projectID int64) ([]*adp.Repository, error) {
+ repositories := []*adp.Repository{}
url := fmt.Sprintf("%s/api/repositories?project_id=%d&page=1&page_size=500", a.getURL(), projectID)
if err := a.client.GetAndIteratePagination(url, &repositories); err != nil {
return nil, err
}
+ for _, repository := range repositories {
+ repository.ResourceType = string(model.ResourceTypeImage)
+ }
return repositories, nil
}
diff --git a/src/replication/adapter/harbor/adapter_test.go b/src/replication/adapter/harbor/adapter_test.go
index 085a62533..844e536cf 100644
--- a/src/replication/adapter/harbor/adapter_test.go
+++ b/src/replication/adapter/harbor/adapter_test.go
@@ -210,3 +210,26 @@ func TestMergeMetadata(t *testing.T) {
assert.Equal(t, strconv.FormatBool(c.public), m["public"].(string))
}
}
+
+func TestAbstractPublicMetadata(t *testing.T) {
+ // nil input metadata
+ meta := abstractPublicMetadata(nil)
+ assert.Nil(t, meta)
+
+ // contains no public metadata
+ metadata := map[string]interface{}{
+ "other": "test",
+ }
+ meta = abstractPublicMetadata(metadata)
+ assert.Nil(t, meta)
+
+ // contains public metadata
+ metadata = map[string]interface{}{
+ "other": "test",
+ "public": "true",
+ }
+ meta = abstractPublicMetadata(metadata)
+ require.NotNil(t, meta)
+ require.Equal(t, 1, len(meta))
+ require.Equal(t, "true", meta["public"].(string))
+}
diff --git a/src/replication/adapter/harbor/chart_registry.go b/src/replication/adapter/harbor/chart_registry.go
index faa9749c5..a93cc3feb 100644
--- a/src/replication/adapter/harbor/chart_registry.go
+++ b/src/replication/adapter/harbor/chart_registry.go
@@ -24,45 +24,17 @@ import (
"strings"
common_http "github.com/goharbor/harbor/src/common/http"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
"github.com/goharbor/harbor/src/replication/model"
)
-type chart struct {
- Name string `json:"name"`
- Project string
-}
-
-func (c *chart) Match(filters []*model.Filter) (bool, error) {
- supportedFilters := []*model.Filter{}
- for _, filter := range filters {
- if filter.Type == model.FilterTypeName {
- supportedFilters = append(supportedFilters, filter)
- }
- }
- item := &FilterItem{
- Value: fmt.Sprintf("%s/%s", c.Project, c.Name),
- }
- return item.Match(supportedFilters)
+type label struct {
+ Name string `json:"name"`
}
type chartVersion struct {
- Name string `json:"name"`
- Version string `json:"version"`
- // TODO handle system/project level labels
- // Labels string `json:"labels"`
-}
-
-func (c *chartVersion) Match(filters []*model.Filter) (bool, error) {
- supportedFilters := []*model.Filter{}
- for _, filter := range filters {
- if filter.Type == model.FilterTypeTag {
- supportedFilters = append(supportedFilters, filter)
- }
- }
- item := &FilterItem{
- Value: c.Version,
- }
- return item.Match(supportedFilters)
+ Version string `json:"version"`
+ Labels []*label `json:"labels"`
}
type chartVersionDetail struct {
@@ -81,37 +53,60 @@ func (a *adapter) FetchCharts(filters []*model.Filter) ([]*model.Resource, error
resources := []*model.Resource{}
for _, project := range projects {
url := fmt.Sprintf("%s/api/chartrepo/%s/charts", a.getURL(), project.Name)
- charts := []*chart{}
- if err := a.client.Get(url, &charts); err != nil {
+ repositories := []*adp.Repository{}
+ if err := a.client.Get(url, &repositories); err != nil {
return nil, err
}
- for _, chart := range charts {
- chart.Project = project.Name
+ if len(repositories) == 0 {
+ continue
}
- charts, err := filterCharts(charts, filters)
- if err != nil {
- return nil, err
+ for _, repository := range repositories {
+ repository.Name = fmt.Sprintf("%s/%s", project.Name, repository.Name)
+ repository.ResourceType = string(model.ResourceTypeChart)
}
- for _, chart := range charts {
- url := fmt.Sprintf("%s/api/chartrepo/%s/charts/%s", a.getURL(), project.Name, chart.Name)
- chartVersions := []*chartVersion{}
- if err := a.client.Get(url, &chartVersions); err != nil {
+ for _, filter := range filters {
+ if err = filter.DoFilter(&repositories); err != nil {
return nil, err
}
- chartVersions, err = filterChartVersions(chartVersions, filters)
- if err != nil {
+ }
+ for _, repository := range repositories {
+ name := strings.SplitN(repository.Name, "/", 2)[1]
+ url := fmt.Sprintf("%s/api/chartrepo/%s/charts/%s", a.getURL(), project.Name, name)
+ versions := []*chartVersion{}
+ if err := a.client.Get(url, &versions); err != nil {
return nil, err
}
- for _, version := range chartVersions {
+ if len(versions) == 0 {
+ continue
+ }
+ vTags := []*adp.VTag{}
+ for _, version := range versions {
+ var labels []string
+ for _, label := range version.Labels {
+ labels = append(labels, label.Name)
+ }
+ vTags = append(vTags, &adp.VTag{
+ Name: version.Version,
+ Labels: labels,
+ ResourceType: string(model.ResourceTypeChart),
+ })
+ }
+ for _, filter := range filters {
+ if err = filter.DoFilter(&vTags); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, vTag := range vTags {
resources = append(resources, &model.Resource{
Type: model.ResourceTypeChart,
Registry: a.registry,
Metadata: &model.ResourceMetadata{
Repository: &model.Repository{
- Name: fmt.Sprintf("%s/%s", project.Name, chart.Name),
+ Name: repository.Name,
Metadata: project.Metadata,
},
- Vtags: []string{version.Version},
+ Vtags: []string{vTag.Name},
},
})
}
@@ -169,6 +164,13 @@ func (a *adapter) DownloadChart(name, version string) (io.ReadCloser, error) {
if err != nil {
return nil, err
}
+ if resp.StatusCode != http.StatusOK {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("failed to download the chart %s: %d %s", req.URL.String(), resp.StatusCode, string(body))
+ }
return resp.Body, nil
}
@@ -232,31 +234,3 @@ func parseChartName(name string) (string, string, error) {
}
return "", "", fmt.Errorf("invalid chart name format: %s", name)
}
-
-func filterCharts(charts []*chart, filters []*model.Filter) ([]*chart, error) {
- result := []*chart{}
- for _, chart := range charts {
- match, err := chart.Match(filters)
- if err != nil {
- return nil, err
- }
- if match {
- result = append(result, chart)
- }
- }
- return result, nil
-}
-
-func filterChartVersions(chartVersions []*chartVersion, filters []*model.Filter) ([]*chartVersion, error) {
- result := []*chartVersion{}
- for _, chartVersion := range chartVersions {
- match, err := chartVersion.Match(filters)
- if err != nil {
- return nil, err
- }
- if match {
- result = append(result, chartVersion)
- }
- }
- return result, nil
-}
diff --git a/src/replication/adapter/harbor/chart_registry_test.go b/src/replication/adapter/harbor/chart_registry_test.go
index a2830666d..5231c0940 100644
--- a/src/replication/adapter/harbor/chart_registry_test.go
+++ b/src/replication/adapter/harbor/chart_registry_test.go
@@ -137,7 +137,7 @@ func TestDownloadChart(t *testing.T) {
},
{
Method: http.MethodGet,
- Pattern: "/api/chartrepo/library/charts/harbor-1.0.tgz",
+ Pattern: "/chartrepo/library/charts/harbor-1.0.tgz",
Handler: func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
},
diff --git a/src/replication/adapter/harbor/filter.go b/src/replication/adapter/harbor/filter.go
deleted file mode 100644
index c7f7b910c..000000000
--- a/src/replication/adapter/harbor/filter.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright Project Harbor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package harbor
-
-import (
- "fmt"
-
- "github.com/goharbor/harbor/src/replication/model"
- "github.com/goharbor/harbor/src/replication/util"
-)
-
-// TODO unify the filter logic from different adapters into one?
-// and move the code into a separated common package
-
-// Filterable defines the interface that an object should implement
-// if the object can be filtered
-type Filterable interface {
- Match([]*model.Filter) (bool, error)
-}
-
-// FilterItem is a filterable object that can be used to match string pattern
-type FilterItem struct {
- Value string
-}
-
-// Match ...
-func (f *FilterItem) Match(filters []*model.Filter) (bool, error) {
- if len(filters) == 0 {
- return true, nil
- }
- matched := true
- for _, filter := range filters {
- pattern, ok := filter.Value.(string)
- if !ok {
- return false, fmt.Errorf("the type of filter value isn't string: %v", filter)
- }
- m, err := util.Match(pattern, f.Value)
- if err != nil {
- return false, err
- }
- if !m {
- matched = false
- break
- }
- }
- return matched, nil
-}
diff --git a/src/replication/adapter/harbor/filter_test.go b/src/replication/adapter/harbor/filter_test.go
deleted file mode 100644
index 8e873c17b..000000000
--- a/src/replication/adapter/harbor/filter_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright Project Harbor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package harbor
-
-import (
- "testing"
-
- "github.com/goharbor/harbor/src/replication/model"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestMatch(t *testing.T) {
- // nil filters
- item := &FilterItem{}
- match, err := item.Match(nil)
- require.Nil(t, err)
- assert.True(t, match)
- // contains filter whose value isn't string
- item = &FilterItem{}
- filters := []*model.Filter{
- {
- Type: "test",
- Value: 1,
- },
- }
- match, err = item.Match(filters)
- require.NotNil(t, err)
- // both filters match
- item = &FilterItem{
- Value: "b/c",
- }
- filters = []*model.Filter{
- {
- Value: "b/*",
- },
- {
- Value: "*/c",
- },
- }
- match, err = item.Match(filters)
- require.Nil(t, err)
- assert.True(t, match)
- // one filter matches and the other one doesn't
- item = &FilterItem{
- Value: "b/c",
- }
- filters = []*model.Filter{
- {
- Value: "b/*",
- },
- {
- Value: "d",
- },
- }
- match, err = item.Match(filters)
- require.Nil(t, err)
- assert.False(t, match)
- // both filters don't match
- item = &FilterItem{
- Value: "b/c",
- }
- filters = []*model.Filter{
- {
- Value: "f",
- },
- {
- Value: "d",
- },
- }
- match, err = item.Match(filters)
- require.Nil(t, err)
- assert.False(t, match)
-}
diff --git a/src/replication/adapter/harbor/image_registry.go b/src/replication/adapter/harbor/image_registry.go
index c64c0c882..269627090 100644
--- a/src/replication/adapter/harbor/image_registry.go
+++ b/src/replication/adapter/harbor/image_registry.go
@@ -18,89 +18,89 @@ import (
"fmt"
"strings"
+ "github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
"github.com/goharbor/harbor/src/replication/model"
"github.com/goharbor/harbor/src/replication/util"
)
-type repository struct {
- Name string `json:"name"`
-}
-
-func (r *repository) Match(filters []*model.Filter) (bool, error) {
- supportedFilters := []*model.Filter{}
- for _, filter := range filters {
- if filter.Type == model.FilterTypeName {
- supportedFilters = append(supportedFilters, filter)
- }
- }
- item := &FilterItem{
- Value: r.Name,
- }
- return item.Match(supportedFilters)
-}
-
-type tag struct {
- Name string `json:"name"`
-}
-
-func (t *tag) Match(filters []*model.Filter) (bool, error) {
- supportedFilters := []*model.Filter{}
- for _, filter := range filters {
- if filter.Type == model.FilterTypeTag {
- supportedFilters = append(supportedFilters, filter)
- }
- }
- item := &FilterItem{
- Value: t.Name,
- }
- return item.Match(supportedFilters)
-}
-
func (a *adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error) {
projects, err := a.listCandidateProjects(filters)
if err != nil {
return nil, err
}
+
resources := []*model.Resource{}
for _, project := range projects {
repositories, err := a.getRepositories(project.ID)
if err != nil {
return nil, err
}
- repositories, err = filterRepositories(repositories, filters)
- if err != nil {
- return nil, err
+ if len(repositories) == 0 {
+ continue
}
- for _, repository := range repositories {
- url := fmt.Sprintf("%s/api/repositories/%s/tags", a.getURL(), repository.Name)
- tags := []*tag{}
- if err = a.client.Get(url, &tags); err != nil {
+ for _, filter := range filters {
+ if err = filter.DoFilter(&repositories); err != nil {
return nil, err
}
- tags, err = filterTags(tags, filters)
- if err != nil {
- return nil, err
- }
- if len(tags) == 0 {
- continue
- }
- vtags := []string{}
- for _, tag := range tags {
- vtags = append(vtags, tag.Name)
- }
- resources = append(resources, &model.Resource{
- Type: model.ResourceTypeImage,
- Registry: a.registry,
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{
- Name: repository.Name,
- Metadata: project.Metadata,
+ }
+
+ var rawResources = make([]*model.Resource, len(repositories))
+ runner := utils.NewLimitedConcurrentRunner(adp.MaxConcurrency)
+ defer runner.Cancel()
+
+ for i, r := range repositories {
+ index := i
+ repo := r
+ runner.AddTask(func() error {
+ vTags, err := a.getTags(repo.Name)
+ if err != nil {
+ return fmt.Errorf("List tags for repo '%s' error: %v", repo.Name, err)
+ }
+ if len(vTags) == 0 {
+ rawResources[index] = nil
+ return nil
+ }
+ for _, filter := range filters {
+ if err = filter.DoFilter(&vTags); err != nil {
+ return fmt.Errorf("Filter tags %v error: %v", vTags, err)
+ }
+ }
+ if len(vTags) == 0 {
+ rawResources[index] = nil
+ return nil
+ }
+ tags := []string{}
+ for _, vTag := range vTags {
+ tags = append(tags, vTag.Name)
+ }
+ rawResources[index] = &model.Resource{
+ Type: model.ResourceTypeImage,
+ Registry: a.registry,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: repo.Name,
+ Metadata: project.Metadata,
+ },
+ Vtags: tags,
},
- Vtags: vtags,
- },
+ }
+
+ return nil
})
}
+ runner.Wait()
+
+ if runner.IsCancelled() {
+ return nil, fmt.Errorf("FetchImages error when collect tags for repos")
+ }
+
+ for _, r := range rawResources {
+ if r != nil {
+ resources = append(resources, r)
+ }
+ }
}
return resources, nil
@@ -150,30 +150,28 @@ func (a *adapter) DeleteManifest(repository, reference string) error {
return a.client.Delete(url)
}
-func filterRepositories(repositories []*repository, filters []*model.Filter) ([]*repository, error) {
- result := []*repository{}
- for _, repository := range repositories {
- match, err := repository.Match(filters)
- if err != nil {
- return nil, err
- }
- if match {
- result = append(result, repository)
+func (a *adapter) getTags(repository string) ([]*adp.VTag, error) {
+ url := fmt.Sprintf("%s/api/repositories/%s/tags", a.getURL(), repository)
+ tags := []*struct {
+ Name string `json:"name"`
+ Labels []*struct {
+ Name string `json:"name"`
}
+ }{}
+ if err := a.client.Get(url, &tags); err != nil {
+ return nil, err
}
- return result, nil
-}
-
-func filterTags(tags []*tag, filters []*model.Filter) ([]*tag, error) {
- result := []*tag{}
+ vTags := []*adp.VTag{}
for _, tag := range tags {
- match, err := tag.Match(filters)
- if err != nil {
- return nil, err
- }
- if match {
- result = append(result, tag)
+ var labels []string
+ for _, label := range tag.Labels {
+ labels = append(labels, label.Name)
}
+ vTags = append(vTags, &adp.VTag{
+ Name: tag.Name,
+ Labels: labels,
+ ResourceType: string(model.ResourceTypeImage),
+ })
}
- return result, nil
+ return vTags, nil
}
diff --git a/src/replication/adapter/helmhub/adapter.go b/src/replication/adapter/helmhub/adapter.go
new file mode 100644
index 000000000..45fb7a0a3
--- /dev/null
+++ b/src/replication/adapter/helmhub/adapter.go
@@ -0,0 +1,80 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helmhub
+
+import (
+ "errors"
+ "github.com/goharbor/harbor/src/common/utils/log"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/model"
+)
+
+func init() {
+ if err := adp.RegisterFactory(model.RegistryTypeHelmHub, func(registry *model.Registry) (adp.Adapter, error) {
+ return newAdapter(registry)
+ }); err != nil {
+ log.Errorf("failed to register factory for %s: %v", model.RegistryTypeHelmHub, err)
+ return
+ }
+ log.Infof("the factory for adapter %s registered", model.RegistryTypeHelmHub)
+}
+
+type adapter struct {
+ registry *model.Registry
+ client *Client
+}
+
+func newAdapter(registry *model.Registry) (*adapter, error) {
+ return &adapter{
+ registry: registry,
+ client: NewClient(registry),
+ }, nil
+}
+
+func (a *adapter) Info() (*model.RegistryInfo, error) {
+ return &model.RegistryInfo{
+ Type: model.RegistryTypeHelmHub,
+ SupportedResourceTypes: []model.ResourceType{
+ model.ResourceTypeChart,
+ },
+ SupportedResourceFilters: []*model.FilterStyle{
+ {
+ Type: model.FilterTypeName,
+ Style: model.FilterStyleTypeText,
+ },
+ {
+ Type: model.FilterTypeTag,
+ Style: model.FilterStyleTypeText,
+ },
+ },
+ SupportedTriggers: []model.TriggerType{
+ model.TriggerTypeManual,
+ model.TriggerTypeScheduled,
+ },
+ }, nil
+}
+
+func (a *adapter) PrepareForPush(resources []*model.Resource) error {
+ return errors.New("not supported")
+}
+
+// HealthCheck checks health status of a registry
+func (a *adapter) HealthCheck() (model.HealthStatus, error) {
+ err := a.client.checkHealthy()
+ if err == nil {
+ return model.Healthy, nil
+ }
+ return model.Unhealthy, err
+}
diff --git a/src/replication/adapter/helmhub/adapter_test.go b/src/replication/adapter/helmhub/adapter_test.go
new file mode 100644
index 000000000..ee22fc6dd
--- /dev/null
+++ b/src/replication/adapter/helmhub/adapter_test.go
@@ -0,0 +1,44 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helmhub
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInfo(t *testing.T) {
+ adapter := &adapter{}
+ info, err := adapter.Info()
+ require.Nil(t, err)
+ require.Equal(t, 1, len(info.SupportedResourceTypes))
+ assert.Equal(t, model.ResourceTypeChart, info.SupportedResourceTypes[0])
+}
+
+func TestPrepareForPush(t *testing.T) {
+ adapter := &adapter{}
+ err := adapter.PrepareForPush(nil)
+ require.NotNil(t, err)
+}
+
+func TestHealthCheck(t *testing.T) {
+ adapter, _ := newAdapter(nil)
+ status, err := adapter.HealthCheck()
+ require.Equal(t, model.Healthy, string(status))
+ require.Nil(t, err)
+}
diff --git a/src/replication/adapter/helmhub/chart.go b/src/replication/adapter/helmhub/chart.go
new file mode 100644
index 000000000..5c46b9e64
--- /dev/null
+++ b/src/replication/adapter/helmhub/chart.go
@@ -0,0 +1,44 @@
+package helmhub
+
+type chart struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+}
+
+type chartList struct {
+ Data []*chart `json:"data"`
+}
+
+type chartAttributes struct {
+ Version string `json:"version"`
+ URLs []string `json:"urls"`
+}
+
+type chartRepo struct {
+ Name string `json:"name"`
+ URL string `json:"url"`
+}
+
+type chartData struct {
+ Name string `json:"name"`
+ Repo *chartRepo `json:"repo"`
+}
+
+type chartInfo struct {
+ Data *chartData `json:"data"`
+}
+
+type chartRelationships struct {
+ Chart *chartInfo `json:"chart"`
+}
+
+type chartVersion struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Attributes *chartAttributes `json:"attributes"`
+ Relationships *chartRelationships `json:"relationships"`
+}
+
+type chartVersionList struct {
+ Data []*chartVersion `json:"data"`
+}
diff --git a/src/replication/adapter/helmhub/chart_registry.go b/src/replication/adapter/helmhub/chart_registry.go
new file mode 100644
index 000000000..d59cf73ad
--- /dev/null
+++ b/src/replication/adapter/helmhub/chart_registry.go
@@ -0,0 +1,154 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helmhub
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/pkg/errors"
+)
+
+func (a *adapter) FetchCharts(filters []*model.Filter) ([]*model.Resource, error) {
+ charts, err := a.client.fetchCharts()
+ if err != nil {
+ return nil, err
+ }
+
+ resources := []*model.Resource{}
+ repositories := []*adp.Repository{}
+ for _, chart := range charts.Data {
+ repository := &adp.Repository{
+ ResourceType: string(model.ResourceTypeChart),
+ Name: chart.ID,
+ }
+ repositories = append(repositories, repository)
+ }
+
+ for _, filter := range filters {
+ if err = filter.DoFilter(&repositories); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, repository := range repositories {
+ versionList, err := a.client.fetchChartDetail(repository.Name)
+ if err != nil {
+ log.Errorf("fetch chart detail: %v", err)
+ return nil, err
+ }
+
+ vTags := []*adp.VTag{}
+ for _, version := range versionList.Data {
+ vTags = append(vTags, &adp.VTag{
+ Name: version.Attributes.Version,
+ ResourceType: string(model.ResourceTypeChart),
+ })
+ }
+
+ for _, filter := range filters {
+ if err = filter.DoFilter(&vTags); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, vTag := range vTags {
+ resources = append(resources, &model.Resource{
+ Type: model.ResourceTypeChart,
+ Registry: a.registry,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: repository.Name,
+ },
+ Vtags: []string{vTag.Name},
+ },
+ })
+ }
+ }
+ return resources, nil
+}
+
+func (a *adapter) ChartExist(name, version string) (bool, error) {
+ versionList, err := a.client.fetchChartDetail(name)
+ if err != nil {
+ if err == ErrHTTPNotFound {
+ return false, nil
+ }
+ return false, err
+ }
+
+ for _, v := range versionList.Data {
+ if v.Attributes.Version == version {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+
+func (a *adapter) DownloadChart(name, version string) (io.ReadCloser, error) {
+ versionList, err := a.client.fetchChartDetail(name)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, v := range versionList.Data {
+ if v.Attributes.Version == version {
+ return a.download(v)
+ }
+ }
+ return nil, errors.New("chart not found")
+}
+
+func (a *adapter) download(version *chartVersion) (io.ReadCloser, error) {
+ if len(version.Attributes.URLs) == 0 || len(version.Attributes.URLs[0]) == 0 {
+ return nil, fmt.Errorf("cannot got the download url for chart %s", version.ID)
+ }
+
+ url := strings.ToLower(version.Attributes.URLs[0])
+ if !(strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")) {
+ url = fmt.Sprintf("%s/%s", version.Relationships.Chart.Data.Repo.URL, url)
+ }
+
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ if err != nil {
+ return nil, err
+ }
+ resp, err := a.client.do(req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode != http.StatusOK {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+ return nil, fmt.Errorf("failed to download the chart %s: %d %s", req.URL.String(), resp.StatusCode, string(body))
+ }
+ return resp.Body, nil
+}
+
+func (a *adapter) UploadChart(name, version string, chart io.Reader) error {
+ return errors.New("not supported")
+}
+
+func (a *adapter) DeleteChart(name, version string) error {
+ return errors.New("not supported")
+}
diff --git a/src/replication/adapter/helmhub/chart_registry_test.go b/src/replication/adapter/helmhub/chart_registry_test.go
new file mode 100644
index 000000000..504d14f20
--- /dev/null
+++ b/src/replication/adapter/helmhub/chart_registry_test.go
@@ -0,0 +1,94 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package helmhub
+
+import (
+ "testing"
+
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFetchCharts(t *testing.T) {
+ adapter, err := newAdapter(nil)
+ require.Nil(t, err)
+ // filter 1
+ filters := []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "k*/*",
+ },
+ }
+ resources, err := adapter.FetchCharts(filters)
+ require.Nil(t, err)
+ assert.NotZero(t, len(resources))
+ assert.Equal(t, model.ResourceTypeChart, resources[0].Type)
+ assert.Equal(t, 1, len(resources[0].Metadata.Vtags))
+ assert.NotNil(t, resources[0].Metadata.Vtags[0])
+ // filter 2
+ filters = []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "harbor/*",
+ },
+ }
+ resources, err = adapter.FetchCharts(filters)
+ require.Nil(t, err)
+ assert.NotZero(t, len(resources))
+ assert.Equal(t, model.ResourceTypeChart, resources[0].Type)
+ assert.Equal(t, "harbor/harbor", resources[0].Metadata.Repository.Name)
+ assert.Equal(t, 1, len(resources[0].Metadata.Vtags))
+ assert.NotNil(t, resources[0].Metadata.Vtags[0])
+}
+
+func TestChartExist(t *testing.T) {
+ adapter, err := newAdapter(nil)
+ require.Nil(t, err)
+ exist, err := adapter.ChartExist("harbor/harbor", "1.0.0")
+ require.Nil(t, err)
+ require.True(t, exist)
+}
+
+func TestChartExist2(t *testing.T) {
+ adapter, err := newAdapter(nil)
+ require.Nil(t, err)
+ exist, err := adapter.ChartExist("goharbor/harbor", "1.0.0")
+ require.Nil(t, err)
+ require.False(t, exist)
+
+ exist, err = adapter.ChartExist("harbor/harbor", "1.0.100")
+ require.Nil(t, err)
+ require.False(t, exist)
+}
+
+func TestDownloadChart(t *testing.T) {
+ adapter, err := newAdapter(nil)
+ require.Nil(t, err)
+ _, err = adapter.DownloadChart("harbor/harbor", "1.0.0")
+ require.Nil(t, err)
+}
+
+func TestUploadChart(t *testing.T) {
+ adapter := &adapter{}
+ err := adapter.UploadChart("library/harbor", "1.0", nil)
+ require.NotNil(t, err)
+}
+
+func TestDeleteChart(t *testing.T) {
+ adapter := &adapter{}
+ err := adapter.DeleteChart("library/harbor", "1.0")
+ require.NotNil(t, err)
+}
diff --git a/src/replication/adapter/helmhub/client.go b/src/replication/adapter/helmhub/client.go
new file mode 100644
index 000000000..c69b0d7a7
--- /dev/null
+++ b/src/replication/adapter/helmhub/client.go
@@ -0,0 +1,116 @@
+package helmhub
+
+import (
+ "encoding/json"
+ "fmt"
+ "github.com/goharbor/harbor/src/replication/model"
+ "github.com/goharbor/harbor/src/replication/util"
+ "github.com/pkg/errors"
+ "io/ioutil"
+ "net/http"
+)
+
+// ErrHTTPNotFound defines the return error when receiving 404 response code
+var ErrHTTPNotFound = errors.New("Not Found")
+
+// Client is a client to interact with HelmHub
+type Client struct {
+ client *http.Client
+}
+
+// NewClient creates a new HelmHub client.
+func NewClient(registry *model.Registry) *Client {
+ return &Client{
+ client: &http.Client{
+ Transport: util.GetHTTPTransport(false),
+ },
+ }
+}
+
+// fetchCharts fetches the chart list from helm hub.
+func (c *Client) fetchCharts() (*chartList, error) {
+ request, err := http.NewRequest(http.MethodGet, baseURL+listCharts, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("fetch chart list error %d: %s", resp.StatusCode, string(body))
+ }
+
+ list := &chartList{}
+ err = json.Unmarshal(body, list)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshal chart list response error: %v", err)
+ }
+
+ return list, nil
+}
+
+// fetchChartDetail fetches the chart detail of a chart from helm hub.
+func (c *Client) fetchChartDetail(chartName string) (*chartVersionList, error) {
+ request, err := http.NewRequest(http.MethodGet, baseURL+listVersions(chartName), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Do(request)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, ErrHTTPNotFound
+ } else if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("fetch chart detail error %d: %s", resp.StatusCode, string(body))
+ }
+
+ list := &chartVersionList{}
+ err = json.Unmarshal(body, list)
+ if err != nil {
+ return nil, fmt.Errorf("unmarshal chart detail response error: %v", err)
+ }
+
+ return list, nil
+}
+
+func (c *Client) checkHealthy() error {
+ request, err := http.NewRequest(http.MethodGet, baseURL, nil)
+ if err != nil {
+ return err
+ }
+
+ resp, err := c.client.Do(request)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+
+ ioutil.ReadAll(resp.Body)
+ if resp.StatusCode >= 200 && resp.StatusCode < 300 {
+ return nil
+ }
+ return errors.New("helm hub is unhealthy")
+}
+
+// do work as a proxy of Do function from net.http
+func (c *Client) do(req *http.Request) (*http.Response, error) {
+ return c.client.Do(req)
+}
diff --git a/src/replication/adapter/helmhub/consts.go b/src/replication/adapter/helmhub/consts.go
new file mode 100644
index 000000000..dab17bfcf
--- /dev/null
+++ b/src/replication/adapter/helmhub/consts.go
@@ -0,0 +1,12 @@
+package helmhub
+
+import "fmt"
+
+const (
+ baseURL = "https://hub.helm.sh"
+ listCharts = "/api/chartsvc/v1/charts"
+)
+
+func listVersions(chartName string) string {
+ return fmt.Sprintf("/api/chartsvc/v1/charts/%s/versions", chartName)
+}
diff --git a/src/replication/adapter/huawei/huawei_adapter.go b/src/replication/adapter/huawei/huawei_adapter.go
index 11dcfed7d..fc655e6ea 100644
--- a/src/replication/adapter/huawei/huawei_adapter.go
+++ b/src/replication/adapter/huawei/huawei_adapter.go
@@ -1,8 +1,6 @@
package huawei
import (
- "crypto/tls"
- "encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
@@ -10,8 +8,12 @@ import (
"regexp"
"strings"
+ common_http "github.com/goharbor/harbor/src/common/http"
+ "github.com/goharbor/harbor/src/common/http/modifier"
"github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/common/utils/registry/auth"
adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/replication/adapter/native"
"github.com/goharbor/harbor/src/replication/model"
"github.com/goharbor/harbor/src/replication/util"
)
@@ -27,8 +29,9 @@ func init() {
// Adapter is for images replications between harbor and Huawei image repository(SWR)
type adapter struct {
- *adp.DefaultImageRegistry
+ *native.Adapter
registry *model.Registry
+ client *common_http.Client
}
// Info gets info about Huawei SWR
@@ -55,18 +58,8 @@ func (a *adapter) ListNamespaces(query *model.NamespaceQuery) ([]*model.Namespac
}
r.Header.Add("content-type", "application/json; charset=utf-8")
- encodeAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)))
- r.Header.Add("Authorization", "Basic "+encodeAuth)
- client := &http.Client{}
- if a.registry.Insecure == true {
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
- }
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return namespaces, err
}
@@ -119,8 +112,11 @@ func (a *adapter) ConvertResourceMetadata(resourceMetadata *model.ResourceMetada
func (a *adapter) PrepareForPush(resources []*model.Resource) error {
namespaces := map[string]struct{}{}
for _, resource := range resources {
+ var namespace string
paths := strings.Split(resource.Metadata.Repository.Name, "/")
- namespace := paths[0]
+ if len(paths) > 0 {
+ namespace = paths[0]
+ }
ns, err := a.GetNamespace(namespace)
if err != nil {
return err
@@ -132,9 +128,7 @@ func (a *adapter) PrepareForPush(resources []*model.Resource) error {
}
url := fmt.Sprintf("%s/dockyard/v2/namespaces", a.registry.URL)
- client := &http.Client{
- Transport: util.GetHTTPTransport(a.registry.Insecure),
- }
+
for namespace := range namespaces {
namespacebyte, err := json.Marshal(struct {
Namespace string `json:"namespace"`
@@ -151,10 +145,8 @@ func (a *adapter) PrepareForPush(resources []*model.Resource) error {
}
r.Header.Add("content-type", "application/json; charset=utf-8")
- encodeAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)))
- r.Header.Add("Authorization", "Basic "+encodeAuth)
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return err
}
@@ -184,20 +176,8 @@ func (a *adapter) GetNamespace(namespaceStr string) (*model.Namespace, error) {
}
r.Header.Add("content-type", "application/json; charset=utf-8")
- encodeAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)))
- r.Header.Add("Authorization", "Basic "+encodeAuth)
- var client *http.Client
- if a.registry.Insecure == true {
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
- } else {
- client = &http.Client{}
- }
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return namespace, err
}
@@ -232,13 +212,34 @@ func (a *adapter) HealthCheck() (model.HealthStatus, error) {
// AdapterFactory is the factory for huawei adapter
func AdapterFactory(registry *model.Registry) (adp.Adapter, error) {
- reg, err := adp.NewDefaultImageRegistry(registry)
+ dockerRegistryAdapter, err := native.NewAdapter(registry)
if err != nil {
return nil, err
}
+
+ var (
+ modifiers = []modifier.Modifier{
+ &auth.UserAgentModifier{
+ UserAgent: adp.UserAgentReplication,
+ }}
+ authorizer modifier.Modifier
+ )
+ if registry.Credential != nil {
+ authorizer = auth.NewBasicAuthCredential(
+ registry.Credential.AccessKey,
+ registry.Credential.AccessSecret)
+ modifiers = append(modifiers, authorizer)
+ }
+
return &adapter{
- registry: registry,
- DefaultImageRegistry: reg,
+ Adapter: dockerRegistryAdapter,
+ registry: registry,
+ client: common_http.NewClient(
+ &http.Client{
+ Transport: util.GetHTTPTransport(registry.Insecure),
+ },
+ modifiers...,
+ ),
}, nil
}
diff --git a/src/replication/adapter/huawei/image_registry.go b/src/replication/adapter/huawei/image_registry.go
index b3636163e..3738ea2e0 100644
--- a/src/replication/adapter/huawei/image_registry.go
+++ b/src/replication/adapter/huawei/image_registry.go
@@ -1,8 +1,6 @@
package huawei
import (
- "crypto/tls"
- "encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
@@ -25,18 +23,8 @@ func (a *adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error
}
r.Header.Add("content-type", "application/json; charset=utf-8")
- encodeAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)))
- r.Header.Add("Authorization", "Basic "+encodeAuth)
- client := &http.Client{}
- if a.registry.Insecure == true {
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
- }
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return resources, err
}
@@ -82,15 +70,7 @@ func (a *adapter) ManifestExist(repository, reference string) (exist bool, diges
r.Header.Add("content-type", "application/json; charset=utf-8")
r.Header.Add("Authorization", "Bearer "+token.Token)
- client := &http.Client{}
- if a.registry.Insecure == true {
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
- }
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return exist, digest, err
}
@@ -133,15 +113,7 @@ func (a *adapter) DeleteManifest(repository, reference string) error {
r.Header.Add("content-type", "application/json; charset=utf-8")
r.Header.Add("Authorization", "Bearer "+token.Token)
- client := &http.Client{}
- if a.registry.Insecure == true {
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
- }
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return err
}
@@ -220,18 +192,8 @@ func getJwtToken(a *adapter, repository string) (token jwtToken, err error) {
}
r.Header.Add("content-type", "application/json; charset=utf-8")
- encodeAuth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", a.registry.Credential.AccessKey, a.registry.Credential.AccessSecret)))
- r.Header.Add("Authorization", "Basic "+encodeAuth)
- client := &http.Client{}
- if a.registry.Insecure == true {
- client = &http.Client{
- Transport: &http.Transport{
- TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
- },
- }
- }
- resp, err := client.Do(r)
+ resp, err := a.client.Do(r)
if err != nil {
return token, err
}
diff --git a/src/replication/adapter/huawei/image_registry_test.go b/src/replication/adapter/huawei/image_registry_test.go
index 8eff4b420..94e38908a 100644
--- a/src/replication/adapter/huawei/image_registry_test.go
+++ b/src/replication/adapter/huawei/image_registry_test.go
@@ -1,6 +1,7 @@
package huawei
import (
+ "os"
"strings"
"testing"
@@ -20,7 +21,11 @@ func init() {
Insecure: false,
Status: "",
}
- HWAdapter.registry = hwRegistry
+ adp, err := AdapterFactory(hwRegistry)
+ if err != nil {
+ os.Exit(1)
+ }
+ HWAdapter = *adp.(*adapter)
}
func TestAdapter_FetchImages(t *testing.T) {
diff --git a/src/replication/adapter/image_registry.go b/src/replication/adapter/image_registry.go
deleted file mode 100644
index fa4122fdc..000000000
--- a/src/replication/adapter/image_registry.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Copyright Project Harbor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "errors"
- "io"
- "net/http"
- "strings"
- "sync"
-
- "github.com/docker/distribution"
- "github.com/docker/distribution/manifest/schema1"
- "github.com/goharbor/harbor/src/common/http/modifier"
- common_http_auth "github.com/goharbor/harbor/src/common/http/modifier/auth"
- "github.com/goharbor/harbor/src/common/utils/log"
- registry_pkg "github.com/goharbor/harbor/src/common/utils/registry"
- "github.com/goharbor/harbor/src/common/utils/registry/auth"
- "github.com/goharbor/harbor/src/replication/model"
- "github.com/goharbor/harbor/src/replication/util"
-)
-
-// const definition
-const (
- UserAgentReplication = "harbor-replication-service"
-)
-
-// ImageRegistry defines the capabilities that an image registry should have
-type ImageRegistry interface {
- FetchImages(filters []*model.Filter) ([]*model.Resource, error)
- ManifestExist(repository, reference string) (exist bool, digest string, err error)
- PullManifest(repository, reference string, accepttedMediaTypes []string) (manifest distribution.Manifest, digest string, err error)
- PushManifest(repository, reference, mediaType string, payload []byte) error
- // the "reference" can be "tag" or "digest", the function needs to handle both
- DeleteManifest(repository, reference string) error
- BlobExist(repository, digest string) (exist bool, err error)
- PullBlob(repository, digest string) (size int64, blob io.ReadCloser, err error)
- PushBlob(repository, digest string, size int64, blob io.Reader) error
-}
-
-// DefaultImageRegistry provides a default implementation for interface ImageRegistry
-type DefaultImageRegistry struct {
- sync.RWMutex
- *registry_pkg.Registry
- registry *model.Registry
- client *http.Client
- clients map[string]*registry_pkg.Repository
-}
-
-// NewDefaultImageRegistry returns an instance of DefaultImageRegistry
-func NewDefaultImageRegistry(registry *model.Registry) (*DefaultImageRegistry, error) {
- var authorizer modifier.Modifier
- if registry.Credential != nil && len(registry.Credential.AccessSecret) != 0 {
- var cred modifier.Modifier
- if registry.Credential.Type == model.CredentialTypeSecret {
- cred = common_http_auth.NewSecretAuthorizer(registry.Credential.AccessSecret)
- } else {
- cred = auth.NewBasicAuthCredential(
- registry.Credential.AccessKey,
- registry.Credential.AccessSecret)
- }
- authorizer = auth.NewStandardTokenAuthorizer(&http.Client{
- Transport: util.GetHTTPTransport(registry.Insecure),
- }, cred, registry.TokenServiceURL)
- }
- return NewDefaultImageRegistryWithCustomizedAuthorizer(registry, authorizer)
-}
-
-// NewDefaultImageRegistryWithCustomizedAuthorizer returns an instance of DefaultImageRegistry with the customized authorizer
-func NewDefaultImageRegistryWithCustomizedAuthorizer(registry *model.Registry, authorizer modifier.Modifier) (*DefaultImageRegistry, error) {
- transport := util.GetHTTPTransport(registry.Insecure)
- modifiers := []modifier.Modifier{
- &auth.UserAgentModifier{
- UserAgent: UserAgentReplication,
- },
- }
- if authorizer != nil {
- modifiers = append(modifiers, authorizer)
- }
- client := &http.Client{
- Transport: registry_pkg.NewTransport(transport, modifiers...),
- }
- reg, err := registry_pkg.NewRegistry(registry.URL, client)
- if err != nil {
- return nil, err
- }
- return &DefaultImageRegistry{
- Registry: reg,
- client: client,
- registry: registry,
- clients: map[string]*registry_pkg.Repository{},
- }, nil
-}
-
-func (d *DefaultImageRegistry) getClient(repository string) (*registry_pkg.Repository, error) {
- d.RLock()
- client, exist := d.clients[repository]
- d.RUnlock()
- if exist {
- return client, nil
- }
-
- return d.create(repository)
-}
-
-func (d *DefaultImageRegistry) create(repository string) (*registry_pkg.Repository, error) {
- d.Lock()
- defer d.Unlock()
- // double check
- client, exist := d.clients[repository]
- if exist {
- return client, nil
- }
-
- client, err := registry_pkg.NewRepository(repository, d.registry.URL, d.client)
- if err != nil {
- return nil, err
- }
- d.clients[repository] = client
- return client, nil
-}
-
-// HealthCheck checks health status of a registry
-func (d *DefaultImageRegistry) HealthCheck() (model.HealthStatus, error) {
- var err error
- if d.registry.Credential == nil ||
- (len(d.registry.Credential.AccessKey) == 0 && len(d.registry.Credential.AccessSecret) == 0) {
- err = d.PingSimple()
- } else {
- err = d.Ping()
- }
- if err != nil {
- log.Errorf("failed to ping registry %s: %v", d.registry.URL, err)
- return model.Unhealthy, nil
- }
- return model.Healthy, nil
-}
-
-// FetchImages ...
-func (d *DefaultImageRegistry) FetchImages(namespaces []string, filters []*model.Filter) ([]*model.Resource, error) {
- return nil, errors.New("not implemented")
-}
-
-// ManifestExist ...
-func (d *DefaultImageRegistry) ManifestExist(repository, reference string) (bool, string, error) {
- client, err := d.getClient(repository)
- if err != nil {
- return false, "", err
- }
- digest, exist, err := client.ManifestExist(reference)
- return exist, digest, err
-}
-
-// PullManifest ...
-func (d *DefaultImageRegistry) PullManifest(repository, reference string, accepttedMediaTypes []string) (distribution.Manifest, string, error) {
- client, err := d.getClient(repository)
- if err != nil {
- return nil, "", err
- }
- digest, mediaType, payload, err := client.PullManifest(reference, accepttedMediaTypes)
- if err != nil {
- return nil, "", err
- }
- if strings.Contains(mediaType, "application/json") {
- mediaType = schema1.MediaTypeManifest
- }
- manifest, _, err := registry_pkg.UnMarshal(mediaType, payload)
- if err != nil {
- return nil, "", err
- }
- return manifest, digest, nil
-}
-
-// PushManifest ...
-func (d *DefaultImageRegistry) PushManifest(repository, reference, mediaType string, payload []byte) error {
- client, err := d.getClient(repository)
- if err != nil {
- return err
- }
- _, err = client.PushManifest(reference, mediaType, payload)
- return err
-}
-
-// DeleteManifest ...
-func (d *DefaultImageRegistry) DeleteManifest(repository, reference string) error {
- client, err := d.getClient(repository)
- if err != nil {
- return err
- }
- digest := reference
- if !isDigest(digest) {
- dgt, exist, err := client.ManifestExist(reference)
- if err != nil {
- return err
- }
- if !exist {
- log.Debugf("the manifest of %s:%s doesn't exist", repository, reference)
- return nil
- }
- digest = dgt
- }
- return client.DeleteManifest(digest)
-}
-
-// BlobExist ...
-func (d *DefaultImageRegistry) BlobExist(repository, digest string) (bool, error) {
- client, err := d.getClient(repository)
- if err != nil {
- return false, err
- }
- return client.BlobExist(digest)
-}
-
-// PullBlob ...
-func (d *DefaultImageRegistry) PullBlob(repository, digest string) (int64, io.ReadCloser, error) {
- client, err := d.getClient(repository)
- if err != nil {
- return 0, nil, err
- }
- return client.PullBlob(digest)
-}
-
-// PushBlob ...
-func (d *DefaultImageRegistry) PushBlob(repository, digest string, size int64, blob io.Reader) error {
- client, err := d.getClient(repository)
- if err != nil {
- return err
- }
- return client.PushBlob(digest, size, blob)
-}
-
-func isDigest(str string) bool {
- return strings.Contains(str, ":")
-}
-
-// ListTag ...
-func (d *DefaultImageRegistry) ListTag(repository string) ([]string, error) {
- client, err := d.getClient(repository)
- if err != nil {
- return []string{}, err
- }
- return client.ListTag()
-}
diff --git a/src/replication/adapter/native/adapter.go b/src/replication/adapter/native/adapter.go
index 79a3212bd..887448ed7 100644
--- a/src/replication/adapter/native/adapter.go
+++ b/src/replication/adapter/native/adapter.go
@@ -15,14 +15,28 @@
package native
import (
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/docker/distribution"
+ "github.com/docker/distribution/manifest/schema1"
+ "github.com/goharbor/harbor/src/common/http/modifier"
+ common_http_auth "github.com/goharbor/harbor/src/common/http/modifier/auth"
+ "github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
+ registry_pkg "github.com/goharbor/harbor/src/common/utils/registry"
+ "github.com/goharbor/harbor/src/common/utils/registry/auth"
adp "github.com/goharbor/harbor/src/replication/adapter"
"github.com/goharbor/harbor/src/replication/model"
+ "github.com/goharbor/harbor/src/replication/util"
)
func init() {
if err := adp.RegisterFactory(model.RegistryTypeDockerRegistry, func(registry *model.Registry) (adp.Adapter, error) {
- return newAdapter(registry)
+ return NewAdapter(registry)
}); err != nil {
log.Errorf("failed to register factory for %s: %v", model.RegistryTypeDockerRegistry, err)
return
@@ -30,25 +44,65 @@ func init() {
log.Infof("the factory for adapter %s registered", model.RegistryTypeDockerRegistry)
}
-func newAdapter(registry *model.Registry) (*native, error) {
- reg, err := adp.NewDefaultImageRegistry(registry)
+var _ adp.Adapter = &Adapter{}
+
+// Adapter implements an adapter for Docker registry. It can be used to all registries
+// that implement the registry V2 API
+type Adapter struct {
+ sync.RWMutex
+ *registry_pkg.Registry
+ registry *model.Registry
+ client *http.Client
+ clients map[string]*registry_pkg.Repository // client for repositories
+}
+
+// NewAdapter returns an instance of the Adapter
+func NewAdapter(registry *model.Registry) (*Adapter, error) {
+ var cred modifier.Modifier
+ if registry.Credential != nil && len(registry.Credential.AccessSecret) != 0 {
+ if registry.Credential.Type == model.CredentialTypeSecret {
+ cred = common_http_auth.NewSecretAuthorizer(registry.Credential.AccessSecret)
+ } else {
+ cred = auth.NewBasicAuthCredential(
+ registry.Credential.AccessKey,
+ registry.Credential.AccessSecret)
+ }
+ }
+ authorizer := auth.NewStandardTokenAuthorizer(&http.Client{
+ Transport: util.GetHTTPTransport(registry.Insecure),
+ }, cred, registry.TokenServiceURL)
+
+ return NewAdapterWithCustomizedAuthorizer(registry, authorizer)
+}
+
+// NewAdapterWithCustomizedAuthorizer returns an instance of the Adapter with the customized authorizer
+func NewAdapterWithCustomizedAuthorizer(registry *model.Registry, authorizer modifier.Modifier) (*Adapter, error) {
+ transport := util.GetHTTPTransport(registry.Insecure)
+ modifiers := []modifier.Modifier{
+ &auth.UserAgentModifier{
+ UserAgent: adp.UserAgentReplication,
+ },
+ }
+ if authorizer != nil {
+ modifiers = append(modifiers, authorizer)
+ }
+ client := &http.Client{
+ Transport: registry_pkg.NewTransport(transport, modifiers...),
+ }
+ reg, err := registry_pkg.NewRegistry(registry.URL, client)
if err != nil {
return nil, err
}
- return &native{
- registry: registry,
- DefaultImageRegistry: reg,
+ return &Adapter{
+ Registry: reg,
+ registry: registry,
+ client: client,
+ clients: map[string]*registry_pkg.Repository{},
}, nil
}
-type native struct {
- *adp.DefaultImageRegistry
- registry *model.Registry
-}
-
-var _ adp.Adapter = native{}
-
-func (native) Info() (info *model.RegistryInfo, err error) {
+// Info returns the basic information about the adapter
+func (a *Adapter) Info() (info *model.RegistryInfo, err error) {
return &model.RegistryInfo{
Type: model.RegistryTypeDockerRegistry,
SupportedResourceTypes: []model.ResourceType{
@@ -71,5 +125,271 @@ func (native) Info() (info *model.RegistryInfo, err error) {
}, nil
}
-// PrepareForPush nothing need to do.
-func (native) PrepareForPush([]*model.Resource) error { return nil }
+// PrepareForPush does nothing
+func (a *Adapter) PrepareForPush([]*model.Resource) error {
+ return nil
+}
+
+// HealthCheck checks health status of a registry
+func (a *Adapter) HealthCheck() (model.HealthStatus, error) {
+ var err error
+ if a.registry.Credential == nil ||
+ (len(a.registry.Credential.AccessKey) == 0 && len(a.registry.Credential.AccessSecret) == 0) {
+ err = a.PingSimple()
+ } else {
+ err = a.Ping()
+ }
+ if err != nil {
+ log.Errorf("failed to ping registry %s: %v", a.registry.URL, err)
+ return model.Unhealthy, nil
+ }
+ return model.Healthy, nil
+}
+
+// FetchImages ...
+func (a *Adapter) FetchImages(filters []*model.Filter) ([]*model.Resource, error) {
+ repositories, err := a.getRepositories(filters)
+ if err != nil {
+ return nil, err
+ }
+ if len(repositories) == 0 {
+ return nil, nil
+ }
+ for _, filter := range filters {
+ if err = filter.DoFilter(&repositories); err != nil {
+ return nil, err
+ }
+ }
+
+ var rawResources = make([]*model.Resource, len(repositories))
+ runner := utils.NewLimitedConcurrentRunner(adp.MaxConcurrency)
+ defer runner.Cancel()
+
+ for i, r := range repositories {
+ index := i
+ repo := r
+ runner.AddTask(func() error {
+ vTags, err := a.getVTags(repo.Name)
+ if err != nil {
+ return fmt.Errorf("List tags for repo '%s' error: %v", repo.Name, err)
+ }
+ if len(vTags) == 0 {
+ return nil
+ }
+ for _, filter := range filters {
+ if err = filter.DoFilter(&vTags); err != nil {
+ return fmt.Errorf("Filter tags %v error: %v", vTags, err)
+ }
+ }
+ if len(vTags) == 0 {
+ return nil
+ }
+ tags := []string{}
+ for _, vTag := range vTags {
+ tags = append(tags, vTag.Name)
+ }
+ rawResources[index] = &model.Resource{
+ Type: model.ResourceTypeImage,
+ Registry: a.registry,
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{
+ Name: repo.Name,
+ },
+ Vtags: tags,
+ },
+ }
+
+ return nil
+ })
+ }
+ runner.Wait()
+
+ if runner.IsCancelled() {
+ return nil, fmt.Errorf("FetchImages error when collect tags for repos")
+ }
+
+ var resources []*model.Resource
+ for _, r := range rawResources {
+ if r != nil {
+ resources = append(resources, r)
+ }
+ }
+
+ return resources, nil
+}
+
+func (a *Adapter) getRepositories(filters []*model.Filter) ([]*adp.Repository, error) {
+ pattern := ""
+ for _, filter := range filters {
+ if filter.Type == model.FilterTypeName {
+ pattern = filter.Value.(string)
+ break
+ }
+ }
+ var repositories []string
+ var err error
+ // if the pattern of repository name filter is a specific repository name, just returns
+ // the parsed repositories and will check the existence later when filtering the tags
+ if paths, ok := util.IsSpecificPath(pattern); ok {
+ repositories = paths
+ } else {
+ // search repositories from catalog API
+ repositories, err = a.Catalog()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ result := []*adp.Repository{}
+ for _, repository := range repositories {
+ result = append(result, &adp.Repository{
+ ResourceType: string(model.ResourceTypeImage),
+ Name: repository,
+ })
+ }
+ return result, nil
+}
+
+func (a *Adapter) getVTags(repository string) ([]*adp.VTag, error) {
+ tags, err := a.ListTag(repository)
+ if err != nil {
+ return nil, err
+ }
+ var result []*adp.VTag
+ for _, tag := range tags {
+ result = append(result, &adp.VTag{
+ ResourceType: string(model.ResourceTypeImage),
+ Name: tag,
+ })
+ }
+ return result, nil
+}
+
+// ManifestExist ...
+func (a *Adapter) ManifestExist(repository, reference string) (bool, string, error) {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return false, "", err
+ }
+ digest, exist, err := client.ManifestExist(reference)
+ return exist, digest, err
+}
+
+// PullManifest ...
+func (a *Adapter) PullManifest(repository, reference string, accepttedMediaTypes []string) (distribution.Manifest, string, error) {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return nil, "", err
+ }
+ digest, mediaType, payload, err := client.PullManifest(reference, accepttedMediaTypes)
+ if err != nil {
+ return nil, "", err
+ }
+ if strings.Contains(mediaType, "application/json") {
+ mediaType = schema1.MediaTypeManifest
+ }
+ manifest, _, err := registry_pkg.UnMarshal(mediaType, payload)
+ if err != nil {
+ return nil, "", err
+ }
+ return manifest, digest, nil
+}
+
+// PushManifest ...
+func (a *Adapter) PushManifest(repository, reference, mediaType string, payload []byte) error {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return err
+ }
+ _, err = client.PushManifest(reference, mediaType, payload)
+ return err
+}
+
+// DeleteManifest ...
+func (a *Adapter) DeleteManifest(repository, reference string) error {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return err
+ }
+ digest := reference
+ if !isDigest(digest) {
+ dgt, exist, err := client.ManifestExist(reference)
+ if err != nil {
+ return err
+ }
+ if !exist {
+ log.Debugf("the manifest of %s:%s doesn't exist", repository, reference)
+ return nil
+ }
+ digest = dgt
+ }
+ return client.DeleteManifest(digest)
+}
+
+// BlobExist ...
+func (a *Adapter) BlobExist(repository, digest string) (bool, error) {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return false, err
+ }
+ return client.BlobExist(digest)
+}
+
+// PullBlob ...
+func (a *Adapter) PullBlob(repository, digest string) (int64, io.ReadCloser, error) {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return 0, nil, err
+ }
+ return client.PullBlob(digest)
+}
+
+// PushBlob ...
+func (a *Adapter) PushBlob(repository, digest string, size int64, blob io.Reader) error {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return err
+ }
+ return client.PushBlob(digest, size, blob)
+}
+
+func isDigest(str string) bool {
+ return strings.Contains(str, ":")
+}
+
+// ListTag ...
+func (a *Adapter) ListTag(repository string) ([]string, error) {
+ client, err := a.getClient(repository)
+ if err != nil {
+ return []string{}, err
+ }
+ return client.ListTag()
+}
+
+func (a *Adapter) getClient(repository string) (*registry_pkg.Repository, error) {
+ a.RLock()
+ client, exist := a.clients[repository]
+ a.RUnlock()
+ if exist {
+ return client, nil
+ }
+
+ return a.create(repository)
+}
+
+func (a *Adapter) create(repository string) (*registry_pkg.Repository, error) {
+ a.Lock()
+ defer a.Unlock()
+ // double check
+ client, exist := a.clients[repository]
+ if exist {
+ return client, nil
+ }
+
+ client, err := registry_pkg.NewRepository(repository, a.registry.URL, a.client)
+ if err != nil {
+ return nil, err
+ }
+ a.clients[repository] = client
+ return client, nil
+}
diff --git a/src/replication/adapter/native/adapter_test.go b/src/replication/adapter/native/adapter_test.go
index 0c6ff74ff..27b0fe4f0 100644
--- a/src/replication/adapter/native/adapter_test.go
+++ b/src/replication/adapter/native/adapter_test.go
@@ -15,11 +15,15 @@
package native
import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
"testing"
- adp "github.com/goharbor/harbor/src/replication/adapter"
+ "github.com/goharbor/harbor/src/common/utils/test"
"github.com/goharbor/harbor/src/replication/model"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func Test_newAdapter(t *testing.T) {
@@ -33,7 +37,7 @@ func Test_newAdapter(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- got, err := newAdapter(tt.registry)
+ got, err := NewAdapter(tt.registry)
if tt.wantErr {
assert.NotNil(t, err)
assert.Nil(t, got)
@@ -47,14 +51,11 @@ func Test_newAdapter(t *testing.T) {
func Test_native_Info(t *testing.T) {
var registry = &model.Registry{URL: "abc"}
- var reg, _ = adp.NewDefaultImageRegistry(registry)
- var adapter = native{
- DefaultImageRegistry: reg,
- registry: registry,
- }
+ adapter, err := NewAdapter(registry)
+ require.Nil(t, err)
assert.NotNil(t, adapter)
- var info, err = adapter.Info()
+ info, err := adapter.Info()
assert.Nil(t, err)
assert.NotNil(t, info)
assert.Equal(t, model.RegistryTypeDockerRegistry, info.Type)
@@ -66,13 +67,279 @@ func Test_native_Info(t *testing.T) {
func Test_native_PrepareForPush(t *testing.T) {
var registry = &model.Registry{URL: "abc"}
- var reg, _ = adp.NewDefaultImageRegistry(registry)
- var adapter = native{
- DefaultImageRegistry: reg,
- registry: registry,
- }
+ adapter, err := NewAdapter(registry)
+ require.Nil(t, err)
assert.NotNil(t, adapter)
- var err = adapter.PrepareForPush(nil)
+ err = adapter.PrepareForPush(nil)
assert.Nil(t, err)
}
+
+func mockNativeRegistry() (mock *httptest.Server) {
+ return test.NewServer(
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/_catalog",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(`{"repositories":["test/a1","test/b2","test/c3/3level"]}`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/test/a1/tags/list",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(`{"name":"test/a1","tags":["tag11"]}`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/test/b2/tags/list",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(`{"name":"test/b2","tags":["tag11","tag2","tag13"]}`))
+ },
+ },
+ &test.RequestHandlerMapping{
+ Method: http.MethodGet,
+ Pattern: "/v2/test/c3/3level/tags/list",
+ Handler: func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte(`{"name":"test/c3/3level","tags":["tag4"]}`))
+ },
+ },
+ )
+}
+func Test_native_FetchImages(t *testing.T) {
+ var mock = mockNativeRegistry()
+ defer mock.Close()
+ fmt.Println("mockNativeRegistry URL: ", mock.URL)
+
+ var registry = &model.Registry{
+ Type: model.RegistryTypeDockerRegistry,
+ URL: mock.URL,
+ Insecure: true,
+ }
+ adapter, err := NewAdapter(registry)
+ assert.Nil(t, err)
+ assert.NotNil(t, adapter)
+
+ tests := []struct {
+ name string
+ filters []*model.Filter
+ want []*model.Resource
+ wantErr bool
+ }{
+ {
+ name: "repository not exist",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "b1",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "tag not exist",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeTag,
+ Value: "this_tag_not_exist_in_the_mock_server",
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "no filters",
+ filters: []*model.Filter{},
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/a1"},
+ Vtags: []string{"tag11"},
+ },
+ },
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/b2"},
+ Vtags: []string{"tag11", "tag2", "tag13"},
+ },
+ },
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/c3/3level"},
+ Vtags: []string{"tag4"},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "only special repository",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "test/a1",
+ },
+ },
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/a1"},
+ Vtags: []string{"tag11"},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "only special tag",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeTag,
+ Value: "tag11",
+ },
+ },
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/a1"},
+ Vtags: []string{"tag11"},
+ },
+ },
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/b2"},
+ Vtags: []string{"tag11"},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "special repository and special tag",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "test/b2",
+ },
+ {
+ Type: model.FilterTypeTag,
+ Value: "tag2",
+ },
+ },
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/b2"},
+ Vtags: []string{"tag2"},
+ },
+ },
+ },
+
+ wantErr: false,
+ },
+ {
+ name: "only wildcard repository",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "test/b*",
+ },
+ },
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/b2"},
+ Vtags: []string{"tag11", "tag2", "tag13"},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "only wildcard tag",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeTag,
+ Value: "tag1*",
+ },
+ },
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/a1"},
+ Vtags: []string{"tag11"},
+ },
+ },
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/b2"},
+ Vtags: []string{"tag11", "tag13"},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "wildcard repository and wildcard tag",
+ filters: []*model.Filter{
+ {
+ Type: model.FilterTypeName,
+ Value: "test/b*",
+ },
+ {
+ Type: model.FilterTypeTag,
+ Value: "tag1*",
+ },
+ },
+ want: []*model.Resource{
+ {
+ Metadata: &model.ResourceMetadata{
+ Repository: &model.Repository{Name: "test/b2"},
+ Vtags: []string{"tag11", "tag13"},
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var resources, err = adapter.FetchImages(tt.filters)
+ if tt.wantErr {
+ require.Len(t, resources, 0)
+ require.NotNil(t, err)
+ } else {
+ require.Equal(t, len(tt.want), len(resources))
+ for i, resource := range resources {
+ require.NotNil(t, resource.Metadata)
+ assert.Equal(t, tt.want[i].Metadata.Repository, resource.Metadata.Repository)
+ assert.Equal(t, tt.want[i].Metadata.Vtags, resource.Metadata.Vtags)
+ }
+ }
+ })
+ }
+}
+
+func TestIsDigest(t *testing.T) {
+ cases := []struct {
+ str string
+ isDigest bool
+ }{
+ {
+ str: "",
+ isDigest: false,
+ },
+ {
+ str: "latest",
+ isDigest: false,
+ },
+ {
+ str: "sha256:fea8895f450959fa676bcc1df0611ea93823a735a01205fd8622846041d0c7cf",
+ isDigest: true,
+ },
+ }
+ for _, c := range cases {
+ assert.Equal(t, c.isDigest, isDigest(c.str))
+ }
+}
diff --git a/src/replication/adapter/native/image_registry.go b/src/replication/adapter/native/image_registry.go
deleted file mode 100644
index d279b6ede..000000000
--- a/src/replication/adapter/native/image_registry.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright Project Harbor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package native
-
-import (
- adp "github.com/goharbor/harbor/src/replication/adapter"
- "github.com/goharbor/harbor/src/replication/model"
- "github.com/goharbor/harbor/src/replication/util"
-)
-
-var _ adp.ImageRegistry = native{}
-
-func (n native) FetchImages(filters []*model.Filter) ([]*model.Resource, error) {
- nameFilterPattern := ""
- tagFilterPattern := ""
- for _, filter := range filters {
- switch filter.Type {
- case model.FilterTypeName:
- nameFilterPattern = filter.Value.(string)
- case model.FilterTypeTag:
- tagFilterPattern = filter.Value.(string)
- }
- }
- repositories, err := n.filterRepositories(nameFilterPattern)
- if err != nil {
- return nil, err
- }
-
- var resources []*model.Resource
- for _, repository := range repositories {
- tags, err := n.filterTags(repository, tagFilterPattern)
- if err != nil {
- return nil, err
- }
- if len(tags) == 0 {
- continue
- }
- resources = append(resources, &model.Resource{
- Type: model.ResourceTypeImage,
- Registry: n.registry,
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{
- Name: repository,
- },
- Vtags: tags,
- },
- })
- }
-
- return resources, nil
-}
-
-func (n native) filterRepositories(pattern string) ([]string, error) {
- // if the pattern is a specific repository name, just returns the parsed repositories
- // and will check the existence later when filtering the tags
- if repositories, ok := util.IsSpecificPath(pattern); ok {
- return repositories, nil
- }
- // search repositories from catalog api
- repositories, err := n.Catalog()
- if err != nil {
- return nil, err
- }
- // if the pattern is null, just return the result of catalog API
- if len(pattern) == 0 {
- return repositories, nil
- }
- result := []string{}
- for _, repository := range repositories {
- match, err := util.Match(pattern, repository)
- if err != nil {
- return nil, err
- }
- if match {
- result = append(result, repository)
- }
- }
- return result, nil
-}
-
-func (n native) filterTags(repository, pattern string) ([]string, error) {
- tags, err := n.ListTag(repository)
- if err != nil {
- return nil, err
- }
- if len(pattern) == 0 {
- return tags, nil
- }
-
- var result []string
- for _, tag := range tags {
- match, err := util.Match(pattern, tag)
- if err != nil {
- return nil, err
- }
- if match {
- result = append(result, tag)
- }
- }
- return result, nil
-}
diff --git a/src/replication/adapter/native/image_registry_test.go b/src/replication/adapter/native/image_registry_test.go
deleted file mode 100644
index 841004a20..000000000
--- a/src/replication/adapter/native/image_registry_test.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright Project Harbor Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package native
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "testing"
-
- "github.com/goharbor/harbor/src/common/utils/test"
- adp "github.com/goharbor/harbor/src/replication/adapter"
- "github.com/goharbor/harbor/src/replication/model"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func mockNativeRegistry() (mock *httptest.Server) {
- return test.NewServer(
- &test.RequestHandlerMapping{
- Method: http.MethodGet,
- Pattern: "/v2/_catalog",
- Handler: func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(`{"repositories":["test/a1","test/b2","test/c3/3level"]}`))
- },
- },
- &test.RequestHandlerMapping{
- Method: http.MethodGet,
- Pattern: "/v2/test/a1/tags/list",
- Handler: func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(`{"name":"test/a1","tags":["tag11"]}`))
- },
- },
- &test.RequestHandlerMapping{
- Method: http.MethodGet,
- Pattern: "/v2/test/b2/tags/list",
- Handler: func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(`{"name":"test/b2","tags":["tag11","tag2","tag13"]}`))
- },
- },
- &test.RequestHandlerMapping{
- Method: http.MethodGet,
- Pattern: "/v2/test/c3/3level/tags/list",
- Handler: func(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte(`{"name":"test/c3/3level","tags":["tag4"]}`))
- },
- },
- )
-}
-func Test_native_FetchImages(t *testing.T) {
- var mock = mockNativeRegistry()
- defer mock.Close()
- fmt.Println("mockNativeRegistry URL: ", mock.URL)
-
- var registry = &model.Registry{
- Type: model.RegistryTypeDockerRegistry,
- URL: mock.URL,
- Insecure: true,
- }
- var reg, err = adp.NewDefaultImageRegistry(registry)
- assert.NotNil(t, reg)
- assert.Nil(t, err)
- var adapter = native{
- DefaultImageRegistry: reg,
- registry: registry,
- }
- assert.NotNil(t, adapter)
-
- tests := []struct {
- name string
- filters []*model.Filter
- want []*model.Resource
- wantErr bool
- }{
- {
- name: "repository not exist",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeName,
- Value: "b1",
- },
- },
- wantErr: false,
- },
- {
- name: "tag not exist",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeTag,
- Value: "this_tag_not_exist_in_the_mock_server",
- },
- },
- wantErr: false,
- },
- {
- name: "no filters",
- filters: []*model.Filter{},
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/a1"},
- Vtags: []string{"tag11"},
- },
- },
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/b2"},
- Vtags: []string{"tag11", "tag2", "tag13"},
- },
- },
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/c3/3level"},
- Vtags: []string{"tag4"},
- },
- },
- },
- wantErr: false,
- },
- {
- name: "only special repository",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeName,
- Value: "test/a1",
- },
- },
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/a1"},
- Vtags: []string{"tag11"},
- },
- },
- },
- wantErr: false,
- },
- {
- name: "only special tag",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeTag,
- Value: "tag11",
- },
- },
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/a1"},
- Vtags: []string{"tag11"},
- },
- },
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/b2"},
- Vtags: []string{"tag11"},
- },
- },
- },
- wantErr: false,
- },
- {
- name: "special repository and special tag",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeName,
- Value: "test/b2",
- },
- {
- Type: model.FilterTypeTag,
- Value: "tag2",
- },
- },
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/b2"},
- Vtags: []string{"tag2"},
- },
- },
- },
-
- wantErr: false,
- },
- {
- name: "only wildcard repository",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeName,
- Value: "test/b*",
- },
- },
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/b2"},
- Vtags: []string{"tag11", "tag2", "tag13"},
- },
- },
- },
- wantErr: false,
- },
- {
- name: "only wildcard tag",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeTag,
- Value: "tag1*",
- },
- },
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/a1"},
- Vtags: []string{"tag11"},
- },
- },
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/b2"},
- Vtags: []string{"tag11", "tag13"},
- },
- },
- },
- wantErr: false,
- },
- {
- name: "wildcard repository and wildcard tag",
- filters: []*model.Filter{
- {
- Type: model.FilterTypeName,
- Value: "test/b*",
- },
- {
- Type: model.FilterTypeTag,
- Value: "tag1*",
- },
- },
- want: []*model.Resource{
- {
- Metadata: &model.ResourceMetadata{
- Repository: &model.Repository{Name: "test/b2"},
- Vtags: []string{"tag11", "tag13"},
- },
- },
- },
- wantErr: false,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- var resources, err = adapter.FetchImages(tt.filters)
- if tt.wantErr {
- require.Len(t, resources, 0)
- require.NotNil(t, err)
- } else {
- require.Equal(t, len(tt.want), len(resources))
- for i, resource := range resources {
- require.NotNil(t, resource.Metadata)
- assert.Equal(t, tt.want[i].Metadata.Repository, resource.Metadata.Repository)
- assert.Equal(t, tt.want[i].Metadata.Vtags, resource.Metadata.Vtags)
- }
- }
- })
- }
-}
diff --git a/src/replication/dao/dao_test.go b/src/replication/dao/dao_test.go
index b3cf65e73..eb5e9bbb9 100644
--- a/src/replication/dao/dao_test.go
+++ b/src/replication/dao/dao_test.go
@@ -21,22 +21,7 @@ import (
"github.com/goharbor/harbor/src/common/dao"
)
-// TODO clean up the file
func TestMain(m *testing.M) {
dao.PrepareTestForPostgresSQL()
-
- var code = m.Run()
-
- // clear test database
- var clearSqls = []string{
- `DROP TABLE "access", "access_log", "admin_job", "alembic_version", "clair_vuln_timestamp",
- "harbor_label", "harbor_resource_label", "harbor_user", "img_scan_job", "img_scan_overview",
- "job_log", "project", "project_member", "project_metadata", "properties", "registry",
- "replication_policy", "repository", "robot", "role", "schema_migrations", "user_group",
- "replication_execution", "replication_task", "replication_schedule_job", "oidc_user";`,
- `DROP FUNCTION "update_update_time_at_column"();`,
- }
- dao.PrepareTestData(clearSqls, nil)
-
- os.Exit(code)
+ os.Exit(m.Run())
}
diff --git a/src/replication/dao/execution.go b/src/replication/dao/execution.go
index 030b57ef0..1fbd54ba2 100644
--- a/src/replication/dao/execution.go
+++ b/src/replication/dao/execution.go
@@ -322,25 +322,39 @@ func UpdateTask(task *models.Task, props ...string) (int64, error) {
return o.Update(task, props...)
}
-// UpdateTaskStatus ...
+// UpdateTaskStatus updates the status of task.
+// The implementation uses raw sql rather than QuerySetter.Filter... as QuerySetter
+// will generate sql like:
+// `UPDATE "replication_task" SET "end_time" = $1, "status" = $2
+// WHERE "id" IN ( SELECT T0."id" FROM "replication_task" T0 WHERE T0."id" = $3
+// AND T0."status" IN ($4, $5, $6))]`
+// which is not a "single" sql statement, this will cause issues when running in concurrency
func UpdateTaskStatus(id int64, status string, statusCondition ...string) (int64, error) {
- qs := dao.GetOrmer().QueryTable(&models.Task{}).
- Filter("id", id)
- if len(statusCondition) > 0 {
- qs = qs.Filter("status", statusCondition[0])
- }
- params := orm.Params{
- "status": status,
- }
+ params := []interface{}{}
+ sql := `update replication_task set status = ? `
+ params = append(params, status)
+
if taskFinished(status) {
// should update endTime
- params["end_time"] = time.Now()
+ sql += `, end_time = ? `
+ params = append(params, time.Now())
}
- n, err := qs.Update(params)
+
+ sql += `where id = ? `
+ params = append(params, id)
+ if len(statusCondition) > 0 {
+ sql += fmt.Sprintf(`and status in (%s) `, dao.ParamPlaceholderForIn(len(statusCondition)))
+ params = append(params, statusCondition)
+ }
+
+ result, err := dao.GetOrmer().Raw(sql, params...).Exec()
if err != nil {
return 0, err
}
- log.Debugf("update task status %d: -> %s", id, status)
+ n, _ := result.RowsAffected()
+ if n > 0 {
+ log.Debugf("update task status %d: -> %s", id, status)
+ }
return n, err
}
diff --git a/src/replication/filter/filter.go b/src/replication/filter/filter.go
new file mode 100644
index 000000000..e28ffe61b
--- /dev/null
+++ b/src/replication/filter/filter.go
@@ -0,0 +1,253 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filter
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/goharbor/harbor/src/common/utils/log"
+ "github.com/goharbor/harbor/src/replication/util"
+)
+
+// const definitions
+const (
+ FilterableTypeRepository = "repository"
+ FilterableTypeVTag = "vtag"
+)
+
+// FilterableType specifies the type of the filterable
+type FilterableType string
+
+// Filterable defines the methods that a filterable object must implement
+type Filterable interface {
+ // return what the type of the filterable object is(repository or vtag)
+ GetFilterableType() FilterableType
+ // return the resource type of the filterable object(image, chart, ...)
+ GetResourceType() string
+ GetName() string
+ GetLabels() []string
+}
+
+// Filter defines the methods that a filter must implement
+type Filter interface {
+ // return whether the filter is applied to the specified Filterable
+ ApplyTo(Filterable) bool
+ Filter(...Filterable) ([]Filterable, error)
+}
+
+// NewResourceTypeFilter return a Filter to filter candidates according to the resource type
+func NewResourceTypeFilter(resourceType string) Filter {
+ return &resourceTypeFilter{
+ resourceType: resourceType,
+ }
+}
+
+// NewRepositoryNameFilter return a Filter to filter the repositories according to the name
+func NewRepositoryNameFilter(pattern string) Filter {
+ return &nameFilter{
+ filterableType: FilterableTypeRepository,
+ pattern: pattern,
+ }
+}
+
+// NewVTagNameFilter return a Filter to filter the vtags according to the name
+func NewVTagNameFilter(pattern string) Filter {
+ return &nameFilter{
+ filterableType: FilterableTypeVTag,
+ pattern: pattern,
+ }
+}
+
+// NewVTagLabelFilter return a Filter to filter vtags according to the label
+func NewVTagLabelFilter(labels []string) Filter {
+ return &labelFilter{
+ labels: labels,
+ }
+}
+
+type resourceTypeFilter struct {
+ resourceType string
+}
+
+func (r *resourceTypeFilter) ApplyTo(filterable Filterable) bool {
+ if filterable == nil {
+ return false
+ }
+ switch filterable.GetFilterableType() {
+ case FilterableTypeRepository, FilterableTypeVTag:
+ return true
+ default:
+ return false
+ }
+}
+
+func (r *resourceTypeFilter) Filter(filterables ...Filterable) ([]Filterable, error) {
+ result := []Filterable{}
+ for _, filterable := range filterables {
+ if filterable.GetResourceType() == r.resourceType {
+ result = append(result, filterable)
+ }
+ }
+ return result, nil
+}
+
+type nameFilter struct {
+ filterableType FilterableType
+ pattern string
+}
+
+func (n *nameFilter) ApplyTo(filterable Filterable) bool {
+ if filterable == nil {
+ return false
+ }
+ if filterable.GetFilterableType() == n.filterableType {
+ return true
+ }
+ return false
+}
+
+func (n *nameFilter) Filter(filterables ...Filterable) ([]Filterable, error) {
+ result := []Filterable{}
+ for _, filterable := range filterables {
+ name := filterable.GetName()
+ match, err := util.Match(n.pattern, name)
+ if err != nil {
+ return nil, err
+ }
+ if match {
+ log.Debugf("%q matches the pattern %q of name filter", name, n.pattern)
+ result = append(result, filterable)
+ continue
+ }
+ log.Debugf("%q doesn't match the pattern %q of name filter, skip", name, n.pattern)
+ }
+ return result, nil
+}
+
+type labelFilter struct {
+ labels []string
+}
+
+func (l *labelFilter) ApplyTo(filterable Filterable) bool {
+ if filterable == nil {
+ return false
+ }
+ if filterable.GetFilterableType() == FilterableTypeVTag {
+ return true
+ }
+ return false
+}
+
+func (l *labelFilter) Filter(filterables ...Filterable) ([]Filterable, error) {
+ // if no specified label in the filter, just returns the input filterable
+ // candidate as the result
+ if len(l.labels) == 0 {
+ return filterables, nil
+ }
+ result := []Filterable{}
+ for _, filterable := range filterables {
+ labels := map[string]struct{}{}
+ for _, label := range filterable.GetLabels() {
+ labels[label] = struct{}{}
+ }
+ match := true
+ for _, label := range l.labels {
+ if _, exist := labels[label]; !exist {
+ match = false
+ break
+ }
+ }
+ // add the filterable to the result list if it contains
+ // all labels defined for the filter
+ if match {
+ result = append(result, filterable)
+ }
+ }
+ return result, nil
+}
+
+// DoFilter is a util function to help filter filterables easily.
+// The parameter "filterables" must be a pointer points to a slice
+// whose elements must be Filterable. After applying all the "filters"
+// to the "filterables", the result is put back into the variable
+// "filterables"
+func DoFilter(filterables interface{}, filters ...Filter) error {
+ if filterables == nil || len(filters) == 0 {
+ return nil
+ }
+
+ value := reflect.ValueOf(filterables)
+ // make sure the input is a pointer
+ if value.Kind() != reflect.Ptr {
+ return errors.New("the type of input should be pointer to a Filterable slice")
+ }
+
+ sliceValue := value.Elem()
+ // make sure the input is a pointer points to a slice
+ if sliceValue.Type().Kind() != reflect.Slice {
+ return errors.New("the type of input should be pointer to a Filterable slice")
+ }
+
+ filterableType := reflect.TypeOf((*Filterable)(nil)).Elem()
+ elemType := sliceValue.Type().Elem()
+ // make sure the input is a pointer points to a Filterable slice
+ if !elemType.Implements(filterableType) {
+ return errors.New("the type of input should be pointer to a Filterable slice")
+ }
+
+ // convert the input to Filterable slice
+ items := []Filterable{}
+ for i := 0; i < sliceValue.Len(); i++ {
+ items = append(items, sliceValue.Index(i).Interface().(Filterable))
+ }
+
+ // do filter
+ var err error
+ items, err = doFilter(items, filters...)
+ if err != nil {
+ return err
+ }
+
+ // convert back to the origin type
+ result := reflect.MakeSlice(reflect.SliceOf(elemType), 0, len(items))
+ for _, item := range items {
+ result = reflect.Append(result, reflect.ValueOf(item))
+ }
+ value.Elem().Set(result)
+
+ return nil
+}
+
+func doFilter(filterables []Filterable, filters ...Filter) ([]Filterable, error) {
+ var appliedTo, notAppliedTo []Filterable
+ var err error
+ for _, filter := range filters {
+ appliedTo, notAppliedTo = nil, nil
+ for _, filterable := range filterables {
+ if filter.ApplyTo(filterable) {
+ appliedTo = append(appliedTo, filterable)
+ } else {
+ notAppliedTo = append(notAppliedTo, filterable)
+ }
+ }
+ filterables, err = filter.Filter(appliedTo...)
+ if err != nil {
+ return nil, err
+ }
+ filterables = append(filterables, notAppliedTo...)
+ }
+ return filterables, nil
+}
diff --git a/src/replication/filter/filter_test.go b/src/replication/filter/filter_test.go
new file mode 100644
index 000000000..a066b6a7d
--- /dev/null
+++ b/src/replication/filter/filter_test.go
@@ -0,0 +1,170 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package filter
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type fakeFilterable struct {
+ filterableType FilterableType
+ resourceType string
+ name string
+ labels []string
+}
+
+func (f *fakeFilterable) GetFilterableType() FilterableType {
+ return f.filterableType
+}
+
+func (f *fakeFilterable) GetResourceType() string {
+ return f.resourceType
+}
+
+func (f *fakeFilterable) GetName() string {
+ return f.name
+}
+func (f *fakeFilterable) GetLabels() []string {
+ return f.labels
+}
+
+func TestFilterOfResourceTypeFilter(t *testing.T) {
+ filterable := &fakeFilterable{
+ filterableType: FilterableTypeRepository,
+ resourceType: "image",
+ name: "library/hello-world",
+ }
+
+ filter := NewResourceTypeFilter("image")
+ result, err := filter.Filter(filterable)
+ require.Nil(t, nil, err)
+ if assert.Equal(t, 1, len(result)) {
+ assert.True(t, reflect.DeepEqual(filterable, result[0]))
+ }
+
+ filter = NewResourceTypeFilter("chart")
+ result, err = filter.Filter(filterable)
+ require.Nil(t, nil, err)
+ assert.Equal(t, 0, len(result))
+}
+
+func TestApplyToOfResourceTypeFilter(t *testing.T) {
+ filterable := &fakeFilterable{
+ filterableType: FilterableTypeRepository,
+ }
+
+ filter := NewResourceTypeFilter("image")
+ assert.True(t, filter.ApplyTo(filterable))
+
+ filterable.filterableType = FilterableTypeVTag
+ assert.True(t, filter.ApplyTo(filterable))
+
+ filterable.filterableType = FilterableType("unknown")
+ assert.False(t, filter.ApplyTo(filterable))
+}
+
+func TestFilterOfNameFilter(t *testing.T) {
+ filterable := &fakeFilterable{
+ name: "foo",
+ }
+ // pass the filter
+ filter := &nameFilter{
+ pattern: "*",
+ }
+ result, err := filter.Filter(filterable)
+ require.Nil(t, err)
+ if assert.Equal(t, 1, len(result)) {
+ assert.True(t, reflect.DeepEqual(filterable, result[0].(*fakeFilterable)))
+ }
+
+ // cannot pass the filter
+ filter.pattern = "cannotpass"
+ result, err = filter.Filter(filterable)
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(result))
+}
+
+func TestApplyToOfNameFilter(t *testing.T) {
+ filterable := &fakeFilterable{
+ filterableType: FilterableTypeRepository,
+ }
+
+ filter := &nameFilter{
+ filterableType: FilterableTypeRepository,
+ }
+ assert.True(t, filter.ApplyTo(filterable))
+
+ filterable.filterableType = FilterableTypeVTag
+ assert.False(t, filter.ApplyTo(filterable))
+}
+
+func TestFilterOfLabelFilter(t *testing.T) {
+ filterable := &fakeFilterable{
+ labels: []string{"production"},
+ }
+ // pass the filter
+ filter := &labelFilter{
+ labels: []string{"production"},
+ }
+ result, err := filter.Filter(filterable)
+ require.Nil(t, err)
+ if assert.Equal(t, 1, len(result)) {
+ assert.True(t, reflect.DeepEqual(filterable, result[0].(*fakeFilterable)))
+ }
+ // cannot pass the filter
+ filter.labels = []string{"production", "ci-pass"}
+ result, err = filter.Filter(filterable)
+ require.Nil(t, err)
+ assert.Equal(t, 0, len(result))
+}
+
+func TestApplyToOfLabelFilter(t *testing.T) {
+ filterable := &fakeFilterable{
+ filterableType: FilterableTypeRepository,
+ }
+
+ filter := labelFilter{}
+ assert.False(t, filter.ApplyTo(filterable))
+
+ filterable.filterableType = FilterableTypeVTag
+ assert.True(t, filter.ApplyTo(filterable))
+}
+
+func TestDoFilter(t *testing.T) {
+ tag1 := &fakeFilterable{
+ filterableType: FilterableTypeVTag,
+ name: "1.0",
+ labels: []string{"production"},
+ }
+ tag2 := &fakeFilterable{
+ filterableType: FilterableTypeVTag,
+ name: "latest",
+ labels: []string{"dev"},
+ }
+ filterables := []Filterable{tag1, tag2}
+ filters := []Filter{
+ NewVTagNameFilter("*"),
+ NewVTagLabelFilter([]string{"production"}),
+ }
+ err := DoFilter(&filterables, filters...)
+ require.Nil(t, err)
+ if assert.Equal(t, 1, len(filterables)) {
+ assert.True(t, reflect.DeepEqual(tag1, filterables[0]))
+ }
+}
diff --git a/src/replication/model/policy.go b/src/replication/model/policy.go
index 16b8715ed..21c89565f 100644
--- a/src/replication/model/policy.go
+++ b/src/replication/model/policy.go
@@ -18,6 +18,8 @@ import (
"fmt"
"time"
+ "github.com/goharbor/harbor/src/replication/filter"
+
"github.com/astaxie/beego/validation"
"github.com/goharbor/harbor/src/common/models"
"github.com/robfig/cron"
@@ -86,19 +88,33 @@ func (p *Policy) Valid(v *validation.Validation) {
// valid the filters
for _, filter := range p.Filters {
- value, ok := filter.Value.(string)
- if !ok {
- v.SetError("filters", "the type of filter value isn't string")
- break
- }
switch filter.Type {
- case FilterTypeResource:
- rt := ResourceType(value)
- if !(rt == ResourceTypeImage || rt == ResourceTypeChart) {
- v.SetError("filters", fmt.Sprintf("invalid resource filter: %s", value))
+ case FilterTypeResource, FilterTypeName, FilterTypeTag:
+ value, ok := filter.Value.(string)
+ if !ok {
+ v.SetError("filters", "the type of filter value isn't string")
break
}
- case FilterTypeName, FilterTypeTag, FilterTypeLabel:
+ if filter.Type == FilterTypeResource {
+ rt := ResourceType(value)
+ if !(rt == ResourceTypeImage || rt == ResourceTypeChart) {
+ v.SetError("filters", fmt.Sprintf("invalid resource filter: %s", value))
+ break
+ }
+ }
+ case FilterTypeLabel:
+ labels, ok := filter.Value.([]interface{})
+ if !ok {
+ v.SetError("filters", "the type of label filter value isn't string slice")
+ break
+ }
+ for _, label := range labels {
+ _, ok := label.(string)
+ if !ok {
+ v.SetError("filters", "the type of label filter value isn't string slice")
+ break
+ }
+ }
default:
v.SetError("filters", "invalid filter type")
break
@@ -133,6 +149,32 @@ type Filter struct {
Value interface{} `json:"value"`
}
+// DoFilter filter the filterables
+// The parameter "filterables" must be a pointer points to a slice
+// whose elements must be Filterable. After applying the filter
+// to the "filterables", the result is put back into the variable
+// "filterables"
+func (f *Filter) DoFilter(filterables interface{}) error {
+ var ft filter.Filter
+ switch f.Type {
+ case FilterTypeName:
+ ft = filter.NewRepositoryNameFilter(f.Value.(string))
+ case FilterTypeTag:
+ ft = filter.NewVTagNameFilter(f.Value.(string))
+ case FilterTypeLabel:
+ labels, ok := f.Value.([]string)
+ if ok {
+ ft = filter.NewVTagLabelFilter(labels)
+ }
+ case FilterTypeResource:
+ ft = filter.NewResourceTypeFilter(f.Value.(string))
+ default:
+ return fmt.Errorf("unsupported filter type: %s", f.Type)
+ }
+
+ return filter.DoFilter(filterables, ft)
+}
+
// TriggerType represents the type of trigger.
type TriggerType string
diff --git a/src/replication/model/policy_test.go b/src/replication/model/policy_test.go
index 190e1b6e8..262c0b23f 100644
--- a/src/replication/model/policy_test.go
+++ b/src/replication/model/policy_test.go
@@ -105,8 +105,8 @@ func TestValidOfPolicy(t *testing.T) {
Value: ResourceTypeImage,
},
{
- Type: FilterTypeName,
- Value: "a[",
+ Type: FilterTypeTag,
+ Value: "",
},
},
},
diff --git a/src/replication/model/registry.go b/src/replication/model/registry.go
index b8b074aed..dfb743cce 100644
--- a/src/replication/model/registry.go
+++ b/src/replication/model/registry.go
@@ -26,9 +26,16 @@ const (
RegistryTypeDockerHub RegistryType = "docker-hub"
RegistryTypeDockerRegistry RegistryType = "docker-registry"
RegistryTypeHuawei RegistryType = "huawei-SWR"
+ RegistryTypeGoogleGcr RegistryType = "google-gcr"
+ RegistryTypeAwsEcr RegistryType = "aws-ecr"
+ RegistryTypeAzureAcr RegistryType = "azure-acr"
+ RegistryTypeAliAcr RegistryType = "ali-acr"
+
+ RegistryTypeHelmHub RegistryType = "helm-hub"
FilterStyleTypeText = "input"
FilterStyleTypeRadio = "radio"
+ FilterStyleTypeList = "list"
)
// RegistryType indicates the type of registry
diff --git a/src/replication/operation/controller.go b/src/replication/operation/controller.go
index 878325e7e..35d6e84fe 100644
--- a/src/replication/operation/controller.go
+++ b/src/replication/operation/controller.go
@@ -16,10 +16,12 @@ package operation
import (
"fmt"
+ "regexp"
"time"
"github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/utils/log"
+ hjob "github.com/goharbor/harbor/src/jobservice/job"
"github.com/goharbor/harbor/src/replication/dao/models"
"github.com/goharbor/harbor/src/replication/model"
"github.com/goharbor/harbor/src/replication/operation/execution"
@@ -45,6 +47,11 @@ const (
maxReplicators = 1024
)
+var (
+ statusBehindErrorPattern = "mismatch job status for stopping job: .*, job status (.*) is behind Running"
+ statusBehindErrorReg = regexp.MustCompile(statusBehindErrorPattern)
+)
+
// NewController returns a controller implementation
func NewController(js job.Client) Controller {
ctl := &controller{
@@ -149,19 +156,36 @@ func (c *controller) StopReplication(executionID int64) error {
}
// got tasks, stopping the tasks one by one
for _, task := range tasks {
- if !isTaskRunning(task) {
- log.Debugf("the task %d(job ID: %s) isn't running, its status is %s, skip", task.ID, task.JobID, task.Status)
+ if isTaskInFinalStatus(task) {
+ log.Debugf("the task %d(job ID: %s) is in final status, its status is %s, skip", task.ID, task.JobID, task.Status)
continue
}
if err = c.scheduler.Stop(task.JobID); err != nil {
- return err
+ status, flag := isStatusBehindError(err)
+ if flag {
+ switch hjob.Status(status) {
+ case hjob.ErrorStatus:
+ status = models.TaskStatusFailed
+ case hjob.SuccessStatus:
+ status = models.TaskStatusSucceed
+ }
+ e := c.executionMgr.UpdateTaskStatus(task.ID, status)
+ if e != nil {
+ log.Errorf("failed to update the status the task %d(job ID: %s): %v", task.ID, task.JobID, e)
+ } else {
+ log.Debugf("got status behind error for task %d, update it's status to %s directly", task.ID, status)
+ }
+ continue
+ }
+ log.Errorf("failed to stop the task %d(job ID: %s): %v", task.ID, task.JobID, err)
+ continue
}
log.Debugf("the stop request for task %d(job ID: %s) sent", task.ID, task.JobID)
}
return nil
}
-func isTaskRunning(task *models.Task) bool {
+func isTaskInFinalStatus(task *models.Task) bool {
if task == nil {
return false
}
@@ -169,9 +193,20 @@ func isTaskRunning(task *models.Task) bool {
case models.TaskStatusSucceed,
models.TaskStatusStopped,
models.TaskStatusFailed:
- return false
+ return true
}
- return true
+ return false
+}
+
+func isStatusBehindError(err error) (string, bool) {
+ if err == nil {
+ return "", false
+ }
+ strs := statusBehindErrorReg.FindStringSubmatch(err.Error())
+ if len(strs) != 2 {
+ return "", false
+ }
+ return strs[1], true
}
func (c *controller) ListExecutions(query ...*models.ExecutionQuery) (int64, []*models.Execution, error) {
diff --git a/src/replication/operation/controller_test.go b/src/replication/operation/controller_test.go
index c19daa8c9..b6f4f5d77 100644
--- a/src/replication/operation/controller_test.go
+++ b/src/replication/operation/controller_test.go
@@ -15,6 +15,7 @@
package operation
import (
+ "errors"
"io"
"os"
"testing"
@@ -344,40 +345,57 @@ func TestGetTaskLog(t *testing.T) {
func TestIsTaskRunning(t *testing.T) {
cases := []struct {
- task *models.Task
- isRunning bool
+ task *models.Task
+ isFinalStatus bool
}{
{
- task: nil,
- isRunning: false,
+ task: nil,
+ isFinalStatus: false,
},
{
task: &models.Task{
Status: models.TaskStatusSucceed,
},
- isRunning: false,
+ isFinalStatus: true,
},
{
task: &models.Task{
Status: models.TaskStatusFailed,
},
- isRunning: false,
+ isFinalStatus: true,
},
{
task: &models.Task{
Status: models.TaskStatusStopped,
},
- isRunning: false,
+ isFinalStatus: true,
},
{
task: &models.Task{
Status: models.TaskStatusInProgress,
},
- isRunning: true,
+ isFinalStatus: false,
},
}
for _, c := range cases {
- assert.Equal(t, c.isRunning, isTaskRunning(c.task))
+ assert.Equal(t, c.isFinalStatus, isTaskInFinalStatus(c.task))
}
}
+
+func TestIsStatusBehindError(t *testing.T) {
+ // nil error
+ status, flag := isStatusBehindError(nil)
+ assert.False(t, flag)
+
+ // not status behind error
+ err := errors.New("not status behind error")
+ status, flag = isStatusBehindError(err)
+ assert.False(t, flag)
+
+ // status behind error
+ err = errors.New("mismatch job status for stopping job: 9feedf9933jffs, job status Error is behind Running")
+ status, flag = isStatusBehindError(err)
+ assert.True(t, flag)
+ assert.Equal(t, "Error", status)
+}
diff --git a/src/replication/operation/execution/execution.go b/src/replication/operation/execution/execution.go
index ba9189826..0d4db946c 100644
--- a/src/replication/operation/execution/execution.go
+++ b/src/replication/operation/execution/execution.go
@@ -152,13 +152,9 @@ func (dm *DefaultManager) UpdateTask(task *models.Task, props ...string) error {
// UpdateTaskStatus ...
func (dm *DefaultManager) UpdateTaskStatus(taskID int64, status string, statusCondition ...string) error {
- n, err := dao.UpdateTaskStatus(taskID, status, statusCondition...)
- if err != nil {
+ if _, err := dao.UpdateTaskStatus(taskID, status, statusCondition...); err != nil {
return err
}
- if n == 0 {
- return fmt.Errorf("Update task status failed %d: -> %s ", taskID, status)
- }
return nil
}
diff --git a/src/replication/operation/execution/execution_test.go b/src/replication/operation/execution/execution_test.go
index 9805754e6..65e35fd07 100644
--- a/src/replication/operation/execution/execution_test.go
+++ b/src/replication/operation/execution/execution_test.go
@@ -6,7 +6,6 @@ import (
"time"
"github.com/goharbor/harbor/src/common/dao"
- "github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/replication/dao/models"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -15,24 +14,8 @@ import (
var executionManager = NewDefaultManager()
func TestMain(m *testing.M) {
- databases := []string{"postgresql"}
- for _, database := range databases {
- log.Infof("run test cases for database: %s", database)
- result := 1
- switch database {
- case "postgresql":
- dao.PrepareTestForPostgresSQL()
- default:
- log.Fatalf("invalid database: %s", database)
- }
-
- result = m.Run()
-
- if result != 0 {
- os.Exit(result)
- }
- }
-
+ dao.PrepareTestForPostgresSQL()
+ os.Exit(m.Run())
}
func TestMethodOfExecutionManager(t *testing.T) {
diff --git a/src/replication/operation/flow/stage.go b/src/replication/operation/flow/stage.go
index 5476f3c13..27110f001 100644
--- a/src/replication/operation/flow/stage.go
+++ b/src/replication/operation/flow/stage.go
@@ -17,7 +17,6 @@ package flow
import (
"errors"
"fmt"
- "strings"
"time"
"github.com/goharbor/harbor/src/common/utils/log"
@@ -331,15 +330,16 @@ func getResourceName(res *model.Resource) string {
if meta == nil {
return ""
}
+ repositoryName := meta.Repository.Name
if len(meta.Vtags) == 0 {
- return meta.Repository.Name
+ return repositoryName
}
- if len(meta.Vtags) <= 5 {
- return meta.Repository.Name + ":[" + strings.Join(meta.Vtags, ",") + "]"
+ if len(meta.Vtags) == 1 {
+ return repositoryName + ":[" + meta.Vtags[0] + "]"
}
- return fmt.Sprintf("%s:[%s ... %d in total]", meta.GetResourceName(), strings.Join(meta.Vtags[:5], ","), len(meta.Vtags))
+ return fmt.Sprintf("%s:[%s ... %d in total]", repositoryName, meta.Vtags[0], len(meta.Vtags))
}
// repository:c namespace:n -> n/c
diff --git a/src/replication/operation/hook/task.go b/src/replication/operation/hook/task.go
index 576d0deab..220763dbd 100644
--- a/src/replication/operation/hook/task.go
+++ b/src/replication/operation/hook/task.go
@@ -25,17 +25,23 @@ func UpdateTask(ctl operation.Controller, id int64, status string) error {
jobStatus := job.Status(status)
// convert the job status to task status
s := ""
+ preStatus := []string{}
switch jobStatus {
case job.PendingStatus:
s = models.TaskStatusPending
+ preStatus = append(preStatus, models.TaskStatusInitialized)
case job.ScheduledStatus, job.RunningStatus:
s = models.TaskStatusInProgress
+ preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending)
case job.StoppedStatus:
s = models.TaskStatusStopped
+ preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending, models.TaskStatusInProgress)
case job.ErrorStatus:
s = models.TaskStatusFailed
+ preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending, models.TaskStatusInProgress)
case job.SuccessStatus:
s = models.TaskStatusSucceed
+ preStatus = append(preStatus, models.TaskStatusInitialized, models.TaskStatusPending, models.TaskStatusInProgress)
}
- return ctl.UpdateTaskStatus(id, s)
+ return ctl.UpdateTaskStatus(id, s, preStatus...)
}
diff --git a/src/replication/policy/manager/manager.go b/src/replication/policy/manager/manager.go
index 4cbf1f404..cdd67a20c 100644
--- a/src/replication/policy/manager/manager.go
+++ b/src/replication/policy/manager/manager.go
@@ -258,7 +258,7 @@ func parseFilters(str string) ([]*model.Filter, error) {
case "tag":
filter.Type = model.FilterTypeTag
case "label":
- // TODO if we support the label filter, remove the checking logic here
+ // drop all legend label filters
continue
default:
log.Warningf("unknown filter type: %s", filter.Type)
@@ -271,6 +271,13 @@ func parseFilters(str string) ([]*model.Filter, error) {
if filter.Type == model.FilterTypeResource {
filter.Value = (model.ResourceType)(filter.Value.(string))
}
+ if filter.Type == model.FilterTypeLabel {
+ labels := []string{}
+ for _, label := range filter.Value.([]interface{}) {
+ labels = append(labels, label.(string))
+ }
+ filter.Value = labels
+ }
filters = append(filters, filter)
}
return filters, nil
diff --git a/src/replication/replication.go b/src/replication/replication.go
index e15d2aeee..8b234df89 100644
--- a/src/replication/replication.go
+++ b/src/replication/replication.go
@@ -35,6 +35,16 @@ import (
_ "github.com/goharbor/harbor/src/replication/adapter/native"
// register the huawei adapter
_ "github.com/goharbor/harbor/src/replication/adapter/huawei"
+ // register the Google Gcr adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/googlegcr"
+ // register the AwsEcr adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/awsecr"
+ // register the AzureAcr adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/azurecr"
+ // register the AliACR adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/aliacr"
+ // register the Helm Hub adapter
+ _ "github.com/goharbor/harbor/src/replication/adapter/helmhub"
)
var (
diff --git a/src/replication/transfer/image/transfer.go b/src/replication/transfer/image/transfer.go
index e47526b1b..898eebb8f 100644
--- a/src/replication/transfer/image/transfer.go
+++ b/src/replication/transfer/image/transfer.go
@@ -211,18 +211,15 @@ func (t *transfer) copyContent(content distribution.Descriptor, srcRepo, dstRepo
case schema2.MediaTypeManifest:
// as using digest as the reference, so set the override to true directly
return t.copyImage(srcRepo, digest, dstRepo, digest, true)
- // copy layer or image config
- case schema2.MediaTypeLayer, schema2.MediaTypeImageConfig:
- return t.copyBlob(srcRepo, dstRepo, digest)
// handle foreign layer
case schema2.MediaTypeForeignLayer:
t.logger.Infof("the layer %s is a foreign layer, skip", digest)
return nil
- // others
+ // copy layer or image config
+ // the media type of the layer or config can be "application/octet-stream",
+ // schema1.MediaTypeManifestLayer, schema2.MediaTypeLayer, schema2.MediaTypeImageConfig
default:
- err := fmt.Errorf("unsupported media type: %s", content.MediaType)
- t.logger.Error(err.Error())
- return err
+ return t.copyBlob(srcRepo, dstRepo, digest)
}
}
@@ -264,6 +261,7 @@ func (t *transfer) pullManifest(repository, reference string) (
t.logger.Infof("pulling the manifest of image %s:%s ...", repository, reference)
manifest, digest, err := t.src.PullManifest(repository, reference, []string{
schema1.MediaTypeManifest,
+ schema1.MediaTypeSignedManifest,
schema2.MediaTypeManifest,
manifestlist.MediaTypeManifestList,
})
@@ -288,6 +286,7 @@ func (t *transfer) handleManifest(manifest distribution.Manifest, repository, di
}
// manifest
if mediaType == schema1.MediaTypeManifest ||
+ mediaType == schema1.MediaTypeSignedManifest ||
mediaType == schema2.MediaTypeManifest {
return manifest, digest, nil
}
diff --git a/tests/apitests/api-testing/client/docker_client.go b/src/testing/apitests/api-testing/client/docker_client.go
similarity index 92%
rename from tests/apitests/api-testing/client/docker_client.go
rename to src/testing/apitests/api-testing/client/docker_client.go
index 0ee6fee6f..a48fa49e7 100644
--- a/tests/apitests/api-testing/client/docker_client.go
+++ b/src/testing/apitests/api-testing/client/docker_client.go
@@ -6,10 +6,10 @@ import "errors"
import "bufio"
import "fmt"
-//DockerClient : Run docker commands
+// DockerClient : Run docker commands
type DockerClient struct{}
-//Status : Check if docker daemon is there
+// Status : Check if docker daemon is there
func (dc *DockerClient) Status() error {
cmdName := "docker"
args := []string{"info"}
@@ -17,7 +17,7 @@ func (dc *DockerClient) Status() error {
return dc.runCommand(cmdName, args)
}
-//Pull : Pull image
+// Pull : Pull image
func (dc *DockerClient) Pull(image string) error {
if len(strings.TrimSpace(image)) == 0 {
return errors.New("Empty image")
@@ -29,7 +29,7 @@ func (dc *DockerClient) Pull(image string) error {
return dc.runCommandWithOutput(cmdName, args)
}
-//Tag :Tag image
+// Tag :Tag image
func (dc *DockerClient) Tag(source, target string) error {
if len(strings.TrimSpace(source)) == 0 ||
len(strings.TrimSpace(target)) == 0 {
@@ -42,7 +42,7 @@ func (dc *DockerClient) Tag(source, target string) error {
return dc.runCommandWithOutput(cmdName, args)
}
-//Push : push image
+// Push : push image
func (dc *DockerClient) Push(image string) error {
if len(strings.TrimSpace(image)) == 0 {
return errors.New("Empty image")
@@ -54,7 +54,7 @@ func (dc *DockerClient) Push(image string) error {
return dc.runCommandWithOutput(cmdName, args)
}
-//Login : Login docker
+// Login : Login docker
func (dc *DockerClient) Login(userName, password string, uri string) error {
if len(strings.TrimSpace(userName)) == 0 ||
len(strings.TrimSpace(password)) == 0 {
diff --git a/tests/apitests/api-testing/client/harbor_api_client.go b/src/testing/apitests/api-testing/client/harbor_api_client.go
similarity index 89%
rename from tests/apitests/api-testing/client/harbor_api_client.go
rename to src/testing/apitests/api-testing/client/harbor_api_client.go
index 7f392be24..8d48b77ae 100644
--- a/tests/apitests/api-testing/client/harbor_api_client.go
+++ b/src/testing/apitests/api-testing/client/harbor_api_client.go
@@ -17,7 +17,7 @@ const (
httpHeaderAccept = "Accept"
)
-//APIClientConfig : Keep config options for APIClient
+// APIClientConfig : Keep config options for APIClient
type APIClientConfig struct {
Username string
Password string
@@ -27,24 +27,24 @@ type APIClientConfig struct {
Proxy string
}
-//APIClient provided the http client for trigger http requests
+// APIClient provided the http client for trigger http requests
type APIClient struct {
- //http client
+ // http client
client *http.Client
- //Configuration
+ // Configuration
config APIClientConfig
}
-//NewAPIClient is constructor of APIClient
+// NewAPIClient is constructor of APIClient
func NewAPIClient(config APIClientConfig) (*APIClient, error) {
- //Load client cert
+ // Load client cert
cert, err := tls.LoadX509KeyPair(config.CertFile, config.KeyFile)
if err != nil {
return nil, err
}
- //Add ca
+ // Add ca
caCert, err := ioutil.ReadFile(config.CaFile)
if err != nil {
return nil, err
@@ -63,7 +63,7 @@ func NewAPIClient(config APIClientConfig) (*APIClient, error) {
TLSClientConfig: tlsConfig,
}
- //If proxy should be set
+ // If proxy should be set
if len(strings.TrimSpace(config.Proxy)) > 0 {
if proxyURL, err := url.Parse(config.Proxy); err == nil {
transport.Proxy = http.ProxyURL(proxyURL)
@@ -81,7 +81,7 @@ func NewAPIClient(config APIClientConfig) (*APIClient, error) {
}
-//Get data
+// Get data
func (ac *APIClient) Get(url string) ([]byte, error) {
if strings.TrimSpace(url) == "" {
return nil, errors.New("empty url")
@@ -116,7 +116,7 @@ func (ac *APIClient) Get(url string) ([]byte, error) {
return data, nil
}
-//Post data
+// Post data
func (ac *APIClient) Post(url string, data []byte) error {
if strings.TrimSpace(url) == "" {
return errors.New("Empty url")
@@ -146,7 +146,7 @@ func (ac *APIClient) Post(url string, data []byte) error {
return nil
}
-//Delete data
+// Delete data
func (ac *APIClient) Delete(url string) error {
if strings.TrimSpace(url) == "" {
return errors.New("Empty url")
@@ -176,7 +176,7 @@ func (ac *APIClient) Delete(url string) error {
return nil
}
-//SwitchAccount : Switch account
+// SwitchAccount : Switch account
func (ac *APIClient) SwitchAccount(username, password string) {
if len(strings.TrimSpace(username)) == 0 ||
len(strings.TrimSpace(password)) == 0 {
@@ -187,14 +187,14 @@ func (ac *APIClient) SwitchAccount(username, password string) {
ac.config.Password = password
}
-//Read error message from response body
+// Read error message from response body
func getErrorMessage(resp *http.Response) error {
if resp == nil {
return errors.New("nil response")
}
if resp.Body == nil || resp.ContentLength == 0 {
- //nothing to read
+ // nothing to read
return nil
}
@@ -202,7 +202,7 @@ func getErrorMessage(resp *http.Response) error {
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
- //abandon to read deatiled error message
+ // abandon to read deatiled error message
return nil
}
diff --git a/tests/apitests/api-testing/envs/concourse_ci.go b/src/testing/apitests/api-testing/envs/concourse_ci.go
similarity index 90%
rename from tests/apitests/api-testing/envs/concourse_ci.go
rename to src/testing/apitests/api-testing/envs/concourse_ci.go
index 89c1c59d3..bb319b754 100644
--- a/tests/apitests/api-testing/envs/concourse_ci.go
+++ b/src/testing/apitests/api-testing/envs/concourse_ci.go
@@ -1,6 +1,6 @@
package envs
-//ConcourseCIEnv : Env for concourse pipeline
+// ConcourseCIEnv : Env for concourse pipeline
var ConcourseCIEnv = Environment{
Protocol: "https",
TestingProject: "concoursecitesting01",
diff --git a/tests/apitests/api-testing/envs/concourse_ci_ldap.go b/src/testing/apitests/api-testing/envs/concourse_ci_ldap.go
similarity index 88%
rename from tests/apitests/api-testing/envs/concourse_ci_ldap.go
rename to src/testing/apitests/api-testing/envs/concourse_ci_ldap.go
index f709e2dd2..5ccff3783 100644
--- a/tests/apitests/api-testing/envs/concourse_ci_ldap.go
+++ b/src/testing/apitests/api-testing/envs/concourse_ci_ldap.go
@@ -1,6 +1,6 @@
package envs
-//ConcourseCILdapEnv : Ldap env for concourse pipeline
+// ConcourseCILdapEnv : Ldap env for concourse pipeline
var ConcourseCILdapEnv = Environment{
Protocol: "https",
TestingProject: "concoursecitesting01",
diff --git a/tests/apitests/api-testing/envs/environment.go b/src/testing/apitests/api-testing/envs/environment.go
similarity index 69%
rename from tests/apitests/api-testing/envs/environment.go
rename to src/testing/apitests/api-testing/envs/environment.go
index 8e86f5807..7609f0fcb 100644
--- a/tests/apitests/api-testing/envs/environment.go
+++ b/src/testing/apitests/api-testing/envs/environment.go
@@ -5,36 +5,36 @@ import (
"os"
"strings"
- "github.com/goharbor/harbor/tests/apitests/api-testing/client"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/client"
)
-//Environment keeps the testing env info
+// Environment keeps the testing env info
type Environment struct {
- Protocol string //env var: HTTP_PROTOCOL
- Hostname string //env var: TESTING_ENV_HOSTNAME
- Account string //env var: TESTING_ENV_ACCOUNT
- Password string //env var: TESTING_ENV_PASSWORD
- Admin string //env var: TESTING_ENV_ADMIN
- AdminPass string //env var: TESTING_ENV_ADMIN_PASS
- TestingProject string //env var: TESTING_PROJECT_NAME
- ImageName string //env var: TESTING_IMAGE_NAME
- ImageTag string //env var: TESTING_IMAGE_TAG
- CAFile string //env var: CA_FILE_PATH
- CertFile string //env var: CERT_FILE_PATH
- KeyFile string //env var: KEY_FILE_PATH
- ProxyURL string //env var: http_proxy, https_proxy, HTTP_PROXY, HTTPS_PROXY
+ Protocol string // env var: HTTP_PROTOCOL
+ Hostname string // env var: TESTING_ENV_HOSTNAME
+ Account string // env var: TESTING_ENV_ACCOUNT
+ Password string // env var: TESTING_ENV_PASSWORD
+ Admin string // env var: TESTING_ENV_ADMIN
+ AdminPass string // env var: TESTING_ENV_ADMIN_PASS
+ TestingProject string // env var: TESTING_PROJECT_NAME
+ ImageName string // env var: TESTING_IMAGE_NAME
+ ImageTag string // env var: TESTING_IMAGE_TAG
+ CAFile string // env var: CA_FILE_PATH
+ CertFile string // env var: CERT_FILE_PATH
+ KeyFile string // env var: KEY_FILE_PATH
+ ProxyURL string // env var: http_proxy, https_proxy, HTTP_PROXY, HTTPS_PROXY
- //API client
+ // API client
HTTPClient *client.APIClient
- //Docker client
+ // Docker client
DockerClient *client.DockerClient
- //Initialize status
+ // Initialize status
loaded bool
}
-//Load test env info
+// Load test env info
func (env *Environment) Load() error {
host := os.Getenv("TESTING_ENV_HOSTNAME")
if isNotEmpty(host) {
@@ -131,7 +131,7 @@ func (env *Environment) Load() error {
return nil
}
-//RootURI : The root URI like https://
+// RootURI : The root URI like https://
func (env *Environment) RootURI() string {
return fmt.Sprintf("%s://%s", env.Protocol, env.Hostname)
}
diff --git a/tests/apitests/api-testing/lib/image.go b/src/testing/apitests/api-testing/lib/image.go
similarity index 89%
rename from tests/apitests/api-testing/lib/image.go
rename to src/testing/apitests/api-testing/lib/image.go
index 231d64ff3..24b9ac393 100644
--- a/tests/apitests/api-testing/lib/image.go
+++ b/src/testing/apitests/api-testing/lib/image.go
@@ -7,17 +7,17 @@ import (
"strings"
"time"
- "github.com/goharbor/harbor/tests/apitests/api-testing/client"
- "github.com/goharbor/harbor/tests/apitests/api-testing/models"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/client"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/models"
)
-//ImageUtil : For repository and tag functions
+// ImageUtil : For repository and tag functions
type ImageUtil struct {
rootURI string
testingClient *client.APIClient
}
-//NewImageUtil : Constructor
+// NewImageUtil : Constructor
func NewImageUtil(rootURI string, httpClient *client.APIClient) *ImageUtil {
if len(strings.TrimSpace(rootURI)) == 0 || httpClient == nil {
return nil
@@ -29,7 +29,7 @@ func NewImageUtil(rootURI string, httpClient *client.APIClient) *ImageUtil {
}
}
-//DeleteRepo : Delete repo
+// DeleteRepo : Delete repo
func (iu *ImageUtil) DeleteRepo(repoName string) error {
if len(strings.TrimSpace(repoName)) == 0 {
return errors.New("Empty repo name for deleting")
@@ -43,7 +43,7 @@ func (iu *ImageUtil) DeleteRepo(repoName string) error {
return nil
}
-//ScanTag :Scan a tag
+// ScanTag :Scan a tag
func (iu *ImageUtil) ScanTag(repoName string, tagName string) error {
if len(strings.TrimSpace(repoName)) == 0 {
return errors.New("Empty repo name for scanning")
@@ -64,7 +64,7 @@ func (iu *ImageUtil) ScanTag(repoName string, tagName string) error {
errchan := make(chan error)
url = fmt.Sprintf("%s%s%s%s%s", iu.rootURI, "/api/repositories/", repoName, "/tags/", tagName)
go func() {
- for _ = range tk.C {
+ for range tk.C {
data, err := iu.testingClient.Get(url)
if err != nil {
errchan <- err
@@ -90,7 +90,7 @@ func (iu *ImageUtil) ScanTag(repoName string, tagName string) error {
}
}
-//GetRepos : Get repos in the project
+// GetRepos : Get repos in the project
func (iu *ImageUtil) GetRepos(projectName string) ([]models.Repository, error) {
if len(strings.TrimSpace(projectName)) == 0 {
return nil, errors.New("Empty project name for getting repos")
@@ -116,7 +116,7 @@ func (iu *ImageUtil) GetRepos(projectName string) ([]models.Repository, error) {
return repos, nil
}
-//GetTags : Get tags
+// GetTags : Get tags
func (iu *ImageUtil) GetTags(repoName string) ([]models.Tag, error) {
if len(strings.TrimSpace(repoName)) == 0 {
return nil, errors.New("Empty repository name for getting tags")
diff --git a/tests/apitests/api-testing/lib/project.go b/src/testing/apitests/api-testing/lib/project.go
similarity index 86%
rename from tests/apitests/api-testing/lib/project.go
rename to src/testing/apitests/api-testing/lib/project.go
index 2d0264338..34a8a3c19 100644
--- a/tests/apitests/api-testing/lib/project.go
+++ b/src/testing/apitests/api-testing/lib/project.go
@@ -6,17 +6,17 @@ import (
"fmt"
"strings"
- "github.com/goharbor/harbor/tests/apitests/api-testing/client"
- "github.com/goharbor/harbor/tests/apitests/api-testing/models"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/client"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/models"
)
-//ProjectUtil : Util methods for project related
+// ProjectUtil : Util methods for project related
type ProjectUtil struct {
rootURI string
testingClient *client.APIClient
}
-//NewProjectUtil : Constructor
+// NewProjectUtil : Constructor
func NewProjectUtil(rootURI string, httpClient *client.APIClient) *ProjectUtil {
if len(strings.TrimSpace(rootURI)) == 0 || httpClient == nil {
return nil
@@ -28,8 +28,8 @@ func NewProjectUtil(rootURI string, httpClient *client.APIClient) *ProjectUtil {
}
}
-//GetProjects : Get projects
-//If name specified, then only get the specified project
+// GetProjects : Get projects
+// If name specified, then only get the specified project
func (pu *ProjectUtil) GetProjects(name string) ([]models.ExistingProject, error) {
url := pu.rootURI + "/api/projects"
if len(strings.TrimSpace(name)) > 0 {
@@ -48,8 +48,8 @@ func (pu *ProjectUtil) GetProjects(name string) ([]models.ExistingProject, error
return pros, nil
}
-//GetProjectID : Get the project ID
-//If no project existing with the name, then return -1
+// GetProjectID : Get the project ID
+// If no project existing with the name, then return -1
func (pu *ProjectUtil) GetProjectID(projectName string) int {
pros, err := pu.GetProjects(projectName)
if err != nil {
@@ -69,7 +69,7 @@ func (pu *ProjectUtil) GetProjectID(projectName string) int {
return -1
}
-//CreateProject :Create project
+// CreateProject :Create project
func (pu *ProjectUtil) CreateProject(projectName string, accessLevel bool) error {
if len(strings.TrimSpace(projectName)) == 0 {
return errors.New("Empty project name for creating")
@@ -92,7 +92,7 @@ func (pu *ProjectUtil) CreateProject(projectName string, accessLevel bool) error
return pu.testingClient.Post(url, body)
}
-//DeleteProject : Delete project
+// DeleteProject : Delete project
func (pu *ProjectUtil) DeleteProject(projectName string) error {
if len(strings.TrimSpace(projectName)) == 0 {
return errors.New("Empty project name for deleting")
@@ -108,7 +108,7 @@ func (pu *ProjectUtil) DeleteProject(projectName string) error {
return pu.testingClient.Delete(url)
}
-//AssignRole : Assign role to user
+// AssignRole : Assign role to user
func (pu *ProjectUtil) AssignRole(projectName, username string) error {
if len(strings.TrimSpace(projectName)) == 0 ||
len(strings.TrimSpace(username)) == 0 {
@@ -137,7 +137,7 @@ func (pu *ProjectUtil) AssignRole(projectName, username string) error {
return pu.testingClient.Post(url, body)
}
-//RevokeRole : RevokeRole role from user
+// RevokeRole : RevokeRole role from user
func (pu *ProjectUtil) RevokeRole(projectName string, username string) error {
if len(strings.TrimSpace(projectName)) == 0 {
return errors.New("Project name is required for revoking role")
@@ -162,7 +162,7 @@ func (pu *ProjectUtil) RevokeRole(projectName string, username string) error {
return pu.testingClient.Delete(url)
}
-//GetProjectMember : Get the project member by name
+// GetProjectMember : Get the project member by name
func (pu *ProjectUtil) GetProjectMember(pid int, member string) (*models.ExistingMember, error) {
if pid == 0 {
return nil, errors.New("invalid project ID")
diff --git a/tests/apitests/api-testing/lib/report.go b/src/testing/apitests/api-testing/lib/report.go
similarity index 92%
rename from tests/apitests/api-testing/lib/report.go
rename to src/testing/apitests/api-testing/lib/report.go
index 6b39a552c..0bcceb742 100644
--- a/tests/apitests/api-testing/lib/report.go
+++ b/src/testing/apitests/api-testing/lib/report.go
@@ -9,12 +9,12 @@ type Report struct {
failed []string
}
-//Passed case
+// Passed case
func (r *Report) Passed(caseName string) {
r.passed = append(r.passed, fmt.Sprintf("%s: [%s]", caseName, "PASSED"))
}
-//Failed case
+// Failed case
func (r *Report) Failed(caseName string, err error) {
errMsg := ""
if err != nil {
@@ -23,7 +23,7 @@ func (r *Report) Failed(caseName string, err error) {
r.failed = append(r.failed, fmt.Sprintf("%s: [%s] %s", caseName, "FAILED", errMsg))
}
-//Print report
+// Print report
func (r *Report) Print() {
passed := len(r.passed)
failed := len(r.failed)
@@ -41,7 +41,7 @@ func (r *Report) Print() {
}
}
-//IsFail : Overall result
+// IsFail : Overall result
func (r *Report) IsFail() bool {
return len(r.failed) > 0
}
diff --git a/tests/apitests/api-testing/lib/system.go b/src/testing/apitests/api-testing/lib/system.go
similarity index 78%
rename from tests/apitests/api-testing/lib/system.go
rename to src/testing/apitests/api-testing/lib/system.go
index 456de073e..ff7923241 100644
--- a/tests/apitests/api-testing/lib/system.go
+++ b/src/testing/apitests/api-testing/lib/system.go
@@ -5,18 +5,18 @@ import (
"fmt"
"strings"
- "github.com/goharbor/harbor/tests/apitests/api-testing/client"
- "github.com/goharbor/harbor/tests/apitests/api-testing/models"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/client"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/models"
)
-//SystemUtil : For getting system info
+// SystemUtil : For getting system info
type SystemUtil struct {
rootURI string
hostname string
testingClient *client.APIClient
}
-//NewSystemUtil : Constructor
+// NewSystemUtil : Constructor
func NewSystemUtil(rootURI, hostname string, httpClient *client.APIClient) *SystemUtil {
if len(strings.TrimSpace(rootURI)) == 0 || httpClient == nil {
return nil
@@ -29,7 +29,7 @@ func NewSystemUtil(rootURI, hostname string, httpClient *client.APIClient) *Syst
}
}
-//GetSystemInfo : Get systeminfo
+// GetSystemInfo : Get systeminfo
func (nsu *SystemUtil) GetSystemInfo() error {
url := nsu.rootURI + "/api/systeminfo"
data, err := nsu.testingClient.Get(url)
diff --git a/tests/apitests/api-testing/lib/user.go b/src/testing/apitests/api-testing/lib/user.go
similarity index 82%
rename from tests/apitests/api-testing/lib/user.go
rename to src/testing/apitests/api-testing/lib/user.go
index ab820dbd8..caf039ca4 100644
--- a/tests/apitests/api-testing/lib/user.go
+++ b/src/testing/apitests/api-testing/lib/user.go
@@ -6,17 +6,17 @@ import (
"fmt"
"strings"
- "github.com/goharbor/harbor/tests/apitests/api-testing/client"
- "github.com/goharbor/harbor/tests/apitests/api-testing/models"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/client"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/models"
)
-//UserUtil : For user related
+// UserUtil : For user related
type UserUtil struct {
rootURI string
testingClient *client.APIClient
}
-//NewUserUtil : Constructor
+// NewUserUtil : Constructor
func NewUserUtil(rootURI string, httpClient *client.APIClient) *UserUtil {
if len(strings.TrimSpace(rootURI)) == 0 || httpClient == nil {
return nil
@@ -28,7 +28,7 @@ func NewUserUtil(rootURI string, httpClient *client.APIClient) *UserUtil {
}
}
-//CreateUser : Create user
+// CreateUser : Create user
func (uu *UserUtil) CreateUser(username, password string) error {
if len(strings.TrimSpace(username)) == 0 ||
len(strings.TrimSpace(password)) == 0 {
@@ -56,7 +56,7 @@ func (uu *UserUtil) CreateUser(username, password string) error {
return nil
}
-//DeleteUser : Delete testing account
+// DeleteUser : Delete testing account
func (uu *UserUtil) DeleteUser(username string) error {
uid := uu.GetUserID(username)
if uid == -1 {
@@ -71,8 +71,8 @@ func (uu *UserUtil) DeleteUser(username string) error {
return nil
}
-//GetUsers : Get users
-//If name specified, then return that one
+// GetUsers : Get users
+// If name specified, then return that one
func (uu *UserUtil) GetUsers(name string) ([]models.ExistingUser, error) {
url := fmt.Sprintf("%s%s", uu.rootURI, "/api/users")
if len(strings.TrimSpace(name)) > 0 {
@@ -92,8 +92,8 @@ func (uu *UserUtil) GetUsers(name string) ([]models.ExistingUser, error) {
return users, nil
}
-//GetUserID : Get user ID
-//If user with the username is not existing, then return -1
+// GetUserID : Get user ID
+// If user with the username is not existing, then return -1
func (uu *UserUtil) GetUserID(username string) int {
if len(strings.TrimSpace(username)) == 0 {
return -1
diff --git a/tests/apitests/api-testing/models/endpoint.go b/src/testing/apitests/api-testing/models/endpoint.go
similarity index 87%
rename from tests/apitests/api-testing/models/endpoint.go
rename to src/testing/apitests/api-testing/models/endpoint.go
index 56b4a9c23..1a20bb98f 100644
--- a/tests/apitests/api-testing/models/endpoint.go
+++ b/src/testing/apitests/api-testing/models/endpoint.go
@@ -1,6 +1,6 @@
package models
-//Endpoint : For /api/targets
+// Endpoint : For /api/targets
type Endpoint struct {
Endpoint string `json:"endpoint"`
Name string `json:"name"`
diff --git a/tests/apitests/api-testing/models/image.go b/src/testing/apitests/api-testing/models/image.go
similarity index 79%
rename from tests/apitests/api-testing/models/image.go
rename to src/testing/apitests/api-testing/models/image.go
index 61e62f00f..76e386ce4 100644
--- a/tests/apitests/api-testing/models/image.go
+++ b/src/testing/apitests/api-testing/models/image.go
@@ -1,12 +1,12 @@
package models
-//Repository : For /api/repositories
+// Repository : For /api/repositories
type Repository struct {
ID int `json:"id"`
Name string `json:"name"`
}
-//Tag : For /api/repositories/:repo/tags
+// Tag : For /api/repositories/:repo/tags
type Tag struct {
Digest string `json:"digest"`
Name string `json:"name"`
@@ -14,7 +14,7 @@ type Tag struct {
ScanOverview *ScanOverview `json:"scan_overview, omitempty"`
}
-//ScanOverview : For scanning
+// ScanOverview : For scanning
type ScanOverview struct {
Status string `json:"scan_status"`
}
diff --git a/tests/apitests/api-testing/models/member.go b/src/testing/apitests/api-testing/models/member.go
similarity index 72%
rename from tests/apitests/api-testing/models/member.go
rename to src/testing/apitests/api-testing/models/member.go
index e39d3731a..05a8d276a 100644
--- a/tests/apitests/api-testing/models/member.go
+++ b/src/testing/apitests/api-testing/models/member.go
@@ -1,17 +1,17 @@
package models
-//Member : For /api/projects/:pid/members
+// Member : For /api/projects/:pid/members
type Member struct {
RoleID int `json:"role_id"`
Member *MemberUser `json:"member_user"`
}
-//MemberUser ...
+// MemberUser ...
type MemberUser struct {
Username string `json:"username"`
}
-//ExistingMember : For GET /api/projects/20/members
+// ExistingMember : For GET /api/projects/20/members
type ExistingMember struct {
MID int `json:"id"`
Name string `json:"entity_name"`
diff --git a/tests/apitests/api-testing/models/project.go b/src/testing/apitests/api-testing/models/project.go
similarity index 71%
rename from tests/apitests/api-testing/models/project.go
rename to src/testing/apitests/api-testing/models/project.go
index 05a9ab9fe..400117631 100644
--- a/tests/apitests/api-testing/models/project.go
+++ b/src/testing/apitests/api-testing/models/project.go
@@ -1,17 +1,17 @@
package models
-//Project : For /api/projects
+// Project : For /api/projects
type Project struct {
Name string `json:"project_name"`
Metadata *Metadata `json:"metadata,omitempty"`
}
-//Metadata : Metadata for project
+// Metadata : Metadata for project
type Metadata struct {
AccessLevel string `json:"public"`
}
-//ExistingProject : For /api/projects?name=***
+// ExistingProject : For /api/projects?name=***
type ExistingProject struct {
Name string `json:"name"`
ID int `json:"project_id"`
diff --git a/tests/apitests/api-testing/models/replication.go b/src/testing/apitests/api-testing/models/replication.go
similarity index 73%
rename from tests/apitests/api-testing/models/replication.go
rename to src/testing/apitests/api-testing/models/replication.go
index 0bac421af..582b5670f 100644
--- a/tests/apitests/api-testing/models/replication.go
+++ b/src/testing/apitests/api-testing/models/replication.go
@@ -1,6 +1,6 @@
package models
-//ReplicationPolicy : For /api/replications
+// ReplicationPolicy : For /api/replications
type ReplicationPolicy struct {
ProjectID int `json:"project_id"`
}
diff --git a/tests/apitests/api-testing/models/system_info.go b/src/testing/apitests/api-testing/models/system_info.go
similarity index 75%
rename from tests/apitests/api-testing/models/system_info.go
rename to src/testing/apitests/api-testing/models/system_info.go
index 78da9aa4d..a6aee4a1e 100644
--- a/tests/apitests/api-testing/models/system_info.go
+++ b/src/testing/apitests/api-testing/models/system_info.go
@@ -1,6 +1,6 @@
package models
-//SystemInfo : For GET /api/systeminfo
+// SystemInfo : For GET /api/systeminfo
type SystemInfo struct {
AuthMode string `json:"auth_mode"`
RegistryURL string `json:"registry_url"`
diff --git a/tests/apitests/api-testing/models/user.go b/src/testing/apitests/api-testing/models/user.go
similarity index 81%
rename from tests/apitests/api-testing/models/user.go
rename to src/testing/apitests/api-testing/models/user.go
index b6b2d6055..6d28be397 100644
--- a/tests/apitests/api-testing/models/user.go
+++ b/src/testing/apitests/api-testing/models/user.go
@@ -1,6 +1,6 @@
package models
-//User : For /api/users
+// User : For /api/users
type User struct {
Username string `json:"username"`
RealName string `json:"realname"`
@@ -9,7 +9,7 @@ type User struct {
Comment string `json:"comment"`
}
-//ExistingUser : For GET /api/users
+// ExistingUser : For GET /api/users
type ExistingUser struct {
User
ID int `json:"user_id"`
diff --git a/tests/apitests/api-testing/tests/suites/base/suite.go b/src/testing/apitests/api-testing/tests/suites/base/suite.go
similarity index 83%
rename from tests/apitests/api-testing/tests/suites/base/suite.go
rename to src/testing/apitests/api-testing/tests/suites/base/suite.go
index 31a11cfd2..8fa8b5af6 100644
--- a/tests/apitests/api-testing/tests/suites/base/suite.go
+++ b/src/testing/apitests/api-testing/tests/suites/base/suite.go
@@ -3,20 +3,20 @@ package base
import (
"fmt"
- "github.com/goharbor/harbor/tests/apitests/api-testing/envs"
- "github.com/goharbor/harbor/tests/apitests/api-testing/lib"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/envs"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/lib"
)
-//ConcourseCiSuite : Provides some base cases
+// ConcourseCiSuite : Provides some base cases
type ConcourseCiSuite struct{}
-//Run cases
-//Not implemented
+// Run cases
+// Not implemented
func (ccs *ConcourseCiSuite) Run(onEnvironment *envs.Environment) *lib.Report {
return &lib.Report{}
}
-//PushImage : Push image to the registry
+// PushImage : Push image to the registry
func (ccs *ConcourseCiSuite) PushImage(onEnvironment *envs.Environment) error {
docker := onEnvironment.DockerClient
if err := docker.Status(); err != nil {
@@ -49,7 +49,7 @@ func (ccs *ConcourseCiSuite) PushImage(onEnvironment *envs.Environment) error {
return nil
}
-//PullImage : Pull image from registry
+// PullImage : Pull image from registry
func (ccs *ConcourseCiSuite) PullImage(onEnvironment *envs.Environment) error {
docker := onEnvironment.DockerClient
if err := docker.Status(); err != nil {
diff --git a/src/testing/apitests/api-testing/tests/suites/suite.go b/src/testing/apitests/api-testing/tests/suites/suite.go
new file mode 100644
index 000000000..613e80ed2
--- /dev/null
+++ b/src/testing/apitests/api-testing/tests/suites/suite.go
@@ -0,0 +1,11 @@
+package suites
+
+import (
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/envs"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/lib"
+)
+
+// Suite : Run a group of test cases
+type Suite interface {
+ Run(onEnvironment envs.Environment) *lib.Report
+}
diff --git a/tests/apitests/api-testing/tests/suites/suite01/run_test.go b/src/testing/apitests/api-testing/tests/suites/suite01/run_test.go
similarity index 69%
rename from tests/apitests/api-testing/tests/suites/suite01/run_test.go
rename to src/testing/apitests/api-testing/tests/suites/suite01/run_test.go
index c8b3f746a..767b72489 100644
--- a/tests/apitests/api-testing/tests/suites/suite01/run_test.go
+++ b/src/testing/apitests/api-testing/tests/suites/suite01/run_test.go
@@ -3,12 +3,12 @@ package suite01
import (
"testing"
- "github.com/goharbor/harbor/tests/apitests/api-testing/envs"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/envs"
)
-//TestRun : Start to run the case
+// TestRun : Start to run the case
func TestRun(t *testing.T) {
- //Initialize env
+ // Initialize env
if err := envs.ConcourseCIEnv.Load(); err != nil {
t.Fatal(err.Error())
}
diff --git a/tests/apitests/api-testing/tests/suites/suite01/suite.go b/src/testing/apitests/api-testing/tests/suites/suite01/suite.go
similarity index 86%
rename from tests/apitests/api-testing/tests/suites/suite01/suite.go
rename to src/testing/apitests/api-testing/tests/suites/suite01/suite.go
index b23221fb6..8d6a9091a 100644
--- a/tests/apitests/api-testing/tests/suites/suite01/suite.go
+++ b/src/testing/apitests/api-testing/tests/suites/suite01/suite.go
@@ -3,12 +3,12 @@ package suite01
import (
"fmt"
- "github.com/goharbor/harbor/tests/apitests/api-testing/envs"
- "github.com/goharbor/harbor/tests/apitests/api-testing/lib"
- "github.com/goharbor/harbor/tests/apitests/api-testing/tests/suites/base"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/envs"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/lib"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/tests/suites/base"
)
-//Steps of suite01:
+// Steps of suite01:
// s0: Get systeminfo
// s1: create project
// s2: create user "cody"
@@ -22,16 +22,16 @@ import (
// s10: delete project
// s11: delete user
-//ConcourseCiSuite01 : For harbor journey in concourse pipeline
+// ConcourseCiSuite01 : For harbor journey in concourse pipeline
type ConcourseCiSuite01 struct {
base.ConcourseCiSuite
}
-//Run : Run a group of cases
+// Run : Run a group of cases
func (ccs *ConcourseCiSuite01) Run(onEnvironment *envs.Environment) *lib.Report {
report := &lib.Report{}
- //s0
+ // s0
sys := lib.NewSystemUtil(onEnvironment.RootURI(), onEnvironment.Hostname, onEnvironment.HTTPClient)
if err := sys.GetSystemInfo(); err != nil {
report.Failed("GetSystemInfo", err)
@@ -39,7 +39,7 @@ func (ccs *ConcourseCiSuite01) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("GetSystemInfo")
}
- //s1
+ // s1
pro := lib.NewProjectUtil(onEnvironment.RootURI(), onEnvironment.HTTPClient)
if err := pro.CreateProject(onEnvironment.TestingProject, false); err != nil {
report.Failed("CreateProject", err)
@@ -47,7 +47,7 @@ func (ccs *ConcourseCiSuite01) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("CreateProject")
}
- //s2
+ // s2
usr := lib.NewUserUtil(onEnvironment.RootURI(), onEnvironment.HTTPClient)
if err := usr.CreateUser(onEnvironment.Account, onEnvironment.Password); err != nil {
report.Failed("CreateUser", err)
@@ -55,21 +55,21 @@ func (ccs *ConcourseCiSuite01) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("CreateUser")
}
- //s3
+ // s3
if err := pro.AssignRole(onEnvironment.TestingProject, onEnvironment.Account); err != nil {
report.Failed("AssignRole", err)
} else {
report.Passed("AssignRole")
}
- //s4
+ // s4
if err := ccs.PushImage(onEnvironment); err != nil {
report.Failed("pushImage", err)
} else {
report.Passed("pushImage")
}
- //s5
+ // s5
img := lib.NewImageUtil(onEnvironment.RootURI(), onEnvironment.HTTPClient)
repoName := fmt.Sprintf("%s/%s", onEnvironment.TestingProject, onEnvironment.ImageName)
if err := img.ScanTag(repoName, onEnvironment.ImageTag); err != nil {
@@ -78,42 +78,42 @@ func (ccs *ConcourseCiSuite01) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("ScanTag")
}
- //s6
+ // s6
if err := ccs.PullImage(onEnvironment); err != nil {
report.Failed("pullImage[1]", err)
} else {
report.Passed("pullImage[1]")
}
- //s7
+ // s7
if err := pro.RevokeRole(onEnvironment.TestingProject, onEnvironment.Account); err != nil {
report.Failed("RevokeRole", err)
} else {
report.Passed("RevokeRole")
}
- //s8
+ // s8
if err := ccs.PullImage(onEnvironment); err == nil {
report.Failed("pullImage[2]", err)
} else {
report.Passed("pullImage[2]")
}
- //s9
+ // s9
if err := img.DeleteRepo(repoName); err != nil {
report.Failed("DeleteRepo", err)
} else {
report.Passed("DeleteRepo")
}
- //s10
+ // s10
if err := pro.DeleteProject(onEnvironment.TestingProject); err != nil {
report.Failed("DeleteProject", err)
} else {
report.Passed("DeleteProject")
}
- //s11
+ // s11
if err := usr.DeleteUser(onEnvironment.Account); err != nil {
report.Failed("DeleteUser", err)
} else {
diff --git a/tests/apitests/api-testing/tests/suites/suite02/run_test.go b/src/testing/apitests/api-testing/tests/suites/suite02/run_test.go
similarity index 70%
rename from tests/apitests/api-testing/tests/suites/suite02/run_test.go
rename to src/testing/apitests/api-testing/tests/suites/suite02/run_test.go
index 352e3cb72..e69778d15 100644
--- a/tests/apitests/api-testing/tests/suites/suite02/run_test.go
+++ b/src/testing/apitests/api-testing/tests/suites/suite02/run_test.go
@@ -3,12 +3,12 @@ package suite02
import (
"testing"
- "github.com/goharbor/harbor/tests/apitests/api-testing/envs"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/envs"
)
-//TestRun : Start to run the case
+// TestRun : Start to run the case
func TestRun(t *testing.T) {
- //Initialize env
+ // Initialize env
if err := envs.ConcourseCILdapEnv.Load(); err != nil {
t.Fatal(err.Error())
}
diff --git a/tests/apitests/api-testing/tests/suites/suite02/suite.go b/src/testing/apitests/api-testing/tests/suites/suite02/suite.go
similarity index 85%
rename from tests/apitests/api-testing/tests/suites/suite02/suite.go
rename to src/testing/apitests/api-testing/tests/suites/suite02/suite.go
index 46270aeee..48159fd34 100644
--- a/tests/apitests/api-testing/tests/suites/suite02/suite.go
+++ b/src/testing/apitests/api-testing/tests/suites/suite02/suite.go
@@ -3,12 +3,12 @@ package suite02
import (
"fmt"
- "github.com/goharbor/harbor/tests/apitests/api-testing/envs"
- "github.com/goharbor/harbor/tests/apitests/api-testing/lib"
- "github.com/goharbor/harbor/tests/apitests/api-testing/tests/suites/base"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/envs"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/lib"
+ "github.com/goharbor/harbor/src/testing/apitests/api-testing/tests/suites/base"
)
-//Steps of suite01:
+// Steps of suite01:
// s0: Get systeminfo
// s1: create project
// s2: assign ldap user "mike" as developer
@@ -20,16 +20,16 @@ import (
// s8: remove repository busybox
// s9: delete project
-//ConcourseCiSuite02 : For harbor ldap journey in concourse pipeline
+// ConcourseCiSuite02 : For harbor ldap journey in concourse pipeline
type ConcourseCiSuite02 struct {
base.ConcourseCiSuite
}
-//Run : Run a group of cases
+// Run : Run a group of cases
func (ccs *ConcourseCiSuite02) Run(onEnvironment *envs.Environment) *lib.Report {
report := &lib.Report{}
- //s0
+ // s0
sys := lib.NewSystemUtil(onEnvironment.RootURI(), onEnvironment.Hostname, onEnvironment.HTTPClient)
if err := sys.GetSystemInfo(); err != nil {
report.Failed("GetSystemInfo", err)
@@ -37,7 +37,7 @@ func (ccs *ConcourseCiSuite02) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("GetSystemInfo")
}
- //s1
+ // s1
pro := lib.NewProjectUtil(onEnvironment.RootURI(), onEnvironment.HTTPClient)
if err := pro.CreateProject(onEnvironment.TestingProject, false); err != nil {
report.Failed("CreateProject", err)
@@ -45,21 +45,21 @@ func (ccs *ConcourseCiSuite02) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("CreateProject")
}
- //s2
+ // s2
if err := pro.AssignRole(onEnvironment.TestingProject, onEnvironment.Account); err != nil {
report.Failed("AssignRole", err)
} else {
report.Passed("AssignRole")
}
- //s3
+ // s3
if err := ccs.PushImage(onEnvironment); err != nil {
report.Failed("pushImage", err)
} else {
report.Passed("pushImage")
}
- //s4
+ // s4
img := lib.NewImageUtil(onEnvironment.RootURI(), onEnvironment.HTTPClient)
repoName := fmt.Sprintf("%s/%s", onEnvironment.TestingProject, onEnvironment.ImageName)
if err := img.ScanTag(repoName, onEnvironment.ImageTag); err != nil {
@@ -68,35 +68,35 @@ func (ccs *ConcourseCiSuite02) Run(onEnvironment *envs.Environment) *lib.Report
report.Passed("ScanTag")
}
- //s5
+ // s5
if err := ccs.PullImage(onEnvironment); err != nil {
report.Failed("pullImage[1]", err)
} else {
report.Passed("pullImage[1]")
}
- //s6
+ // s6
if err := pro.RevokeRole(onEnvironment.TestingProject, onEnvironment.Account); err != nil {
report.Failed("RevokeRole", err)
} else {
report.Passed("RevokeRole")
}
- //s7
+ // s7
if err := ccs.PullImage(onEnvironment); err == nil {
report.Failed("pullImage[2]", err)
} else {
report.Passed("pullImage[2]")
}
- //s8
+ // s8
if err := img.DeleteRepo(repoName); err != nil {
report.Failed("DeleteRepo", err)
} else {
report.Passed("DeleteRepo")
}
- //s9
+ // s9
if err := pro.DeleteProject(onEnvironment.TestingProject); err != nil {
report.Failed("DeleteProject", err)
} else {
diff --git a/tests/apitests/apilib/access_log.go b/src/testing/apitests/apilib/access_log.go
similarity index 100%
rename from tests/apitests/apilib/access_log.go
rename to src/testing/apitests/apilib/access_log.go
diff --git a/tests/apitests/apilib/access_log_filter.go b/src/testing/apitests/apilib/access_log_filter.go
similarity index 100%
rename from tests/apitests/apilib/access_log_filter.go
rename to src/testing/apitests/apilib/access_log_filter.go
diff --git a/tests/apitests/apilib/admin_job.go b/src/testing/apitests/apilib/admin_job.go
similarity index 100%
rename from tests/apitests/apilib/admin_job.go
rename to src/testing/apitests/apilib/admin_job.go
diff --git a/tests/apitests/apilib/admin_job_req.go b/src/testing/apitests/apilib/admin_job_req.go
similarity index 100%
rename from tests/apitests/apilib/admin_job_req.go
rename to src/testing/apitests/apilib/admin_job_req.go
diff --git a/src/testing/apitests/apilib/harborapi.go b/src/testing/apitests/apilib/harborapi.go
new file mode 100644
index 000000000..93337e954
--- /dev/null
+++ b/src/testing/apitests/apilib/harborapi.go
@@ -0,0 +1,224 @@
+// Package apilib
+// These APIs provide services for manipulating Harbor project.
+package apilib
+
+import (
+ "encoding/json"
+ // "fmt"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/dghubble/sling"
+)
+
+type HarborAPI struct {
+ basePath string
+}
+
+func NewHarborAPI() *HarborAPI {
+ return &HarborAPI{
+ basePath: "http://localhost",
+ }
+}
+
+func NewHarborAPIWithBasePath(basePath string) *HarborAPI {
+ return &HarborAPI{
+ basePath: basePath,
+ }
+}
+
+type UsrInfo struct {
+ Name string
+ Passwd string
+}
+
+// Search for projects and repositories
+// Implementation Notes
+// The Search endpoint returns information about the projects and repositories
+// offered at public status or related to the current logged in user.
+// The response includes the project and repository list in a proper display order.
+// @param q Search parameter for project and repository name.
+// @return []Search
+// func (a HarborAPI) SearchGet (q string) (Search, error) {
+func (a HarborAPI) SearchGet(q string) (Search, error) {
+
+ _sling := sling.New().Get(a.basePath)
+
+ // create path and map variables
+ path := "/api/search"
+
+ _sling = _sling.Path(path)
+
+ type QueryParams struct {
+ Query string `url:"q,omitempty"`
+ }
+
+ _sling = _sling.QueryStruct(&QueryParams{Query: q})
+
+ // accept header
+ accepts := []string{"application/json", "text/plain"}
+ for key := range accepts {
+ _sling = _sling.Set("Accept", accepts[key])
+ break // only use the first Accept
+ }
+
+ req, err := _sling.Request()
+
+ client := &http.Client{}
+ httpResponse, err := client.Do(req)
+ defer httpResponse.Body.Close()
+
+ body, err := ioutil.ReadAll(httpResponse.Body)
+ if err != nil {
+ // handle error
+ }
+
+ var successPayload = new(Search)
+ err = json.Unmarshal(body, &successPayload)
+ return *successPayload, err
+}
+
+// Create a new project.
+// Implementation Notes
+// This endpoint is for user to create a new project.
+// @param project New created project.
+// @return void
+// func (a HarborAPI) ProjectsPost (prjUsr UsrInfo, project Project) (int, error) {
+func (a HarborAPI) ProjectsPost(prjUsr UsrInfo, project Project) (int, error) {
+
+ _sling := sling.New().Post(a.basePath)
+
+ // create path and map variables
+ path := "/api/projects"
+
+ _sling = _sling.Path(path)
+
+ // accept header
+ accepts := []string{"application/json", "text/plain"}
+ for key := range accepts {
+ _sling = _sling.Set("Accept", accepts[key])
+ break // only use the first Accept
+ }
+
+ // body params
+ _sling = _sling.BodyJSON(project)
+
+ req, err := _sling.Request()
+ req.SetBasicAuth(prjUsr.Name, prjUsr.Passwd)
+
+ client := &http.Client{}
+ httpResponse, err := client.Do(req)
+ defer httpResponse.Body.Close()
+
+ return httpResponse.StatusCode, err
+}
+
+// Delete a repository or a tag in a repository.
+// Delete a repository or a tag in a repository.
+// This endpoint let user delete repositories and tags with repo name and tag.\n
+// @param repoName The name of repository which will be deleted.
+// @param tag Tag of a repository.
+// @return void
+// func (a HarborAPI) RepositoriesDelete(prjUsr UsrInfo, repoName string, tag string) (int, error) {
+func (a HarborAPI) RepositoriesDelete(prjUsr UsrInfo, repoName string, tag string) (int, error) {
+ _sling := sling.New().Delete(a.basePath)
+
+ // create path and map variables
+ path := "/api/repositories"
+
+ _sling = _sling.Path(path)
+
+ type QueryParams struct {
+ RepoName string `url:"repo_name,omitempty"`
+ Tag string `url:"tag,omitempty"`
+ }
+
+ _sling = _sling.QueryStruct(&QueryParams{RepoName: repoName, Tag: tag})
+ // accept header
+ accepts := []string{"application/json", "text/plain"}
+ for key := range accepts {
+ _sling = _sling.Set("Accept", accepts[key])
+ break // only use the first Accept
+ }
+
+ req, err := _sling.Request()
+ req.SetBasicAuth(prjUsr.Name, prjUsr.Passwd)
+ // fmt.Printf("request %+v", req)
+
+ client := &http.Client{}
+ httpResponse, err := client.Do(req)
+ defer httpResponse.Body.Close()
+
+ if err != nil {
+ // handle error
+ }
+ return httpResponse.StatusCode, err
+}
+
+// Return projects created by Harbor
+// func (a HarborApi) ProjectsGet (projectName string, isPublic int32) ([]Project, error) {
+// }
+
+// Check if the project name user provided already exists.
+// func (a HarborApi) ProjectsHead (projectName string) (error) {
+// }
+
+// Get access logs accompany with a relevant project.
+// func (a HarborApi) ProjectsProjectIdLogsFilterPost (projectId int32, accessLog AccessLog) ([]AccessLog, error) {
+// }
+
+// Return a project's relevant role members.
+// func (a HarborApi) ProjectsProjectIdMembersGet (projectId int32) ([]Role, error) {
+// }
+
+// Add project role member accompany with relevant project and user.
+// func (a HarborApi) ProjectsProjectIdMembersPost (projectId int32, roles RoleParam) (error) {
+// }
+
+// Delete project role members accompany with relevant project and user.
+// func (a HarborApi) ProjectsProjectIdMembersUserIdDelete (projectId int32, userId int32) (error) {
+// }
+
+// Return role members accompany with relevant project and user.
+// func (a HarborApi) ProjectsProjectIdMembersUserIdGet (projectId int32, userId int32) ([]Role, error) {
+// }
+
+// Update project role members accompany with relevant project and user.
+// func (a HarborApi) ProjectsProjectIdMembersUserIdPut (projectId int32, userId int32, roles RoleParam) (error) {
+// }
+
+// Update properties for a selected project.
+// func (a HarborApi) ProjectsProjectIdPut (projectId int32, project Project) (error) {
+// }
+
+// Get repositories accompany with relevant project and repo name.
+// func (a HarborApi) RepositoriesGet (projectId int32, q string) ([]Repository, error) {
+// }
+
+// Get manifests of a relevant repository.
+// func (a HarborApi) RepositoriesManifestGet (repoName string, tag string) (error) {
+// }
+
+// Get tags of a relevant repository.
+// func (a HarborApi) RepositoriesTagsGet (repoName string) (error) {
+// }
+
+// Get registered users of Harbor.
+// func (a HarborApi) UsersGet (userName string) ([]User, error) {
+// }
+
+// Creates a new user account.
+// func (a HarborApi) UsersPost (user User) (error) {
+// }
+
+// Mark a registered user as be removed.
+// func (a HarborApi) UsersUserIdDelete (userId int32) (error) {
+// }
+
+// Change the password on a user that already exists.
+// func (a HarborApi) UsersUserIdPasswordPut (userId int32, password Password) (error) {
+// }
+
+// Update a registered user to change to be an administrator of Harbor.
+// func (a HarborApi) UsersUserIdPut (userId int32) (error) {
+// }
diff --git a/tests/apitests/apilib/harborlogout.bak b/src/testing/apitests/apilib/harborlogout.bak
similarity index 100%
rename from tests/apitests/apilib/harborlogout.bak
rename to src/testing/apitests/apilib/harborlogout.bak
diff --git a/tests/apitests/apilib/harborlogout.go b/src/testing/apitests/apilib/harborlogout.go
similarity index 100%
rename from tests/apitests/apilib/harborlogout.go
rename to src/testing/apitests/apilib/harborlogout.go
diff --git a/tests/apitests/apilib/harlogin.bak b/src/testing/apitests/apilib/harlogin.bak
similarity index 100%
rename from tests/apitests/apilib/harlogin.bak
rename to src/testing/apitests/apilib/harlogin.bak
diff --git a/tests/apitests/apilib/harlogin.go b/src/testing/apitests/apilib/harlogin.go
similarity index 73%
rename from tests/apitests/apilib/harlogin.go
rename to src/testing/apitests/apilib/harlogin.go
index 1b9117ecd..0870825b7 100644
--- a/tests/apitests/apilib/harlogin.go
+++ b/src/testing/apitests/apilib/harlogin.go
@@ -14,15 +14,15 @@ func (a HarborAPI) HarborLogin(user UsrInfo) (int, error) {
v.Set("principal", user.Name)
v.Set("password", user.Passwd)
- body := ioutil.NopCloser(strings.NewReader(v.Encode())) //endode v:[body struce]
+ body := ioutil.NopCloser(strings.NewReader(v.Encode())) // endode v:[body struce]
client := &http.Client{}
reqest, err := http.NewRequest("POST", a.basePath+"/login", body)
- reqest.Header.Set("Content-Type", "application/x-www-form-urlencoded;param=value") //setting post head
+ reqest.Header.Set("Content-Type", "application/x-www-form-urlencoded;param=value") // setting post head
resp, err := client.Do(reqest)
- defer resp.Body.Close() //close resp.Body
+ defer resp.Body.Close() // close resp.Body
return resp.StatusCode, err
}
diff --git a/tests/apitests/apilib/job_status.go b/src/testing/apitests/apilib/job_status.go
similarity index 100%
rename from tests/apitests/apilib/job_status.go
rename to src/testing/apitests/apilib/job_status.go
diff --git a/tests/apitests/apilib/ldap.go b/src/testing/apitests/apilib/ldap.go
similarity index 100%
rename from tests/apitests/apilib/ldap.go
rename to src/testing/apitests/apilib/ldap.go
diff --git a/tests/apitests/apilib/password.go b/src/testing/apitests/apilib/password.go
similarity index 100%
rename from tests/apitests/apilib/password.go
rename to src/testing/apitests/apilib/password.go
diff --git a/tests/apitests/apilib/project.go b/src/testing/apitests/apilib/project.go
similarity index 83%
rename from tests/apitests/apilib/project.go
rename to src/testing/apitests/apilib/project.go
index d4d7cd398..13dcdfaf7 100644
--- a/tests/apitests/apilib/project.go
+++ b/src/testing/apitests/apilib/project.go
@@ -67,3 +67,19 @@ type ProjectQuery struct {
Page int64 `url:"page,omitempty"`
PageSize int64 `url:"page_size,omitempty"`
}
+
+// ProjectSummary ...
+type ProjectSummary struct {
+ RepoCount int64 `json:"repo_count"`
+ ChartCount uint64 `json:"chart_count"`
+
+ ProjectAdminCount int64 `json:"project_admin_count"`
+ MasterCount int64 `json:"master_count"`
+ DeveloperCount int64 `json:"developer_count"`
+ GuestCount int64 `json:"guest_count"`
+
+ Quota struct {
+ Hard map[string]int64 `json:"hard"`
+ Used map[string]int64 `json:"used"`
+ } `json:"quota"`
+}
diff --git a/tests/apitests/apilib/project_req.go b/src/testing/apitests/apilib/project_req.go
similarity index 84%
rename from tests/apitests/apilib/project_req.go
rename to src/testing/apitests/apilib/project_req.go
index 91ce76e4e..774fd36c7 100644
--- a/tests/apitests/apilib/project_req.go
+++ b/src/testing/apitests/apilib/project_req.go
@@ -27,4 +27,8 @@ type ProjectReq struct {
ProjectName string `json:"project_name,omitempty"`
// The metadata of the project.
Metadata map[string]string `json:"metadata,omitempty"`
+ // The count quota of the project.
+ CountLimit *int64 `json:"count_limit,omitempty"`
+ // The storage quota of the project
+ StorageLimit *int64 `json:"storage_limit,omitempty"`
}
diff --git a/src/testing/apitests/apilib/quota.go b/src/testing/apitests/apilib/quota.go
new file mode 100644
index 000000000..288fb7918
--- /dev/null
+++ b/src/testing/apitests/apilib/quota.go
@@ -0,0 +1,39 @@
+/*
+ * Harbor API
+ *
+ * These APIs provide services for manipulating Harbor project.
+ *
+ * OpenAPI spec version: 0.3.0
+ *
+ * Generated by: https://github.com/swagger-api/swagger-codegen.git
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package apilib
+
+// QuotaQuery query for quota
+type QuotaQuery struct {
+ Reference string `url:"reference,omitempty"`
+ ReferenceID string `url:"reference_id,omitempty"`
+ Page int64 `url:"page,omitempty"`
+ PageSize int64 `url:"page_size,omitempty"`
+}
+
+// Quota ...
+type Quota struct {
+ ID int `json:"id"`
+ Ref map[string]interface{} `json:"ref"`
+ Hard map[string]int64 `json:"hard"`
+ Used map[string]int64 `json:"used"`
+}
diff --git a/tests/apitests/apilib/rep_policy.go b/src/testing/apitests/apilib/rep_policy.go
similarity index 100%
rename from tests/apitests/apilib/rep_policy.go
rename to src/testing/apitests/apilib/rep_policy.go
diff --git a/tests/apitests/apilib/rep_policy_enablement_req.go b/src/testing/apitests/apilib/rep_policy_enablement_req.go
similarity index 100%
rename from tests/apitests/apilib/rep_policy_enablement_req.go
rename to src/testing/apitests/apilib/rep_policy_enablement_req.go
diff --git a/tests/apitests/apilib/rep_policy_post.go b/src/testing/apitests/apilib/rep_policy_post.go
similarity index 100%
rename from tests/apitests/apilib/rep_policy_post.go
rename to src/testing/apitests/apilib/rep_policy_post.go
diff --git a/tests/apitests/apilib/rep_policy_update.go b/src/testing/apitests/apilib/rep_policy_update.go
similarity index 100%
rename from tests/apitests/apilib/rep_policy_update.go
rename to src/testing/apitests/apilib/rep_policy_update.go
diff --git a/tests/apitests/apilib/rep_target.go b/src/testing/apitests/apilib/rep_target.go
similarity index 100%
rename from tests/apitests/apilib/rep_target.go
rename to src/testing/apitests/apilib/rep_target.go
diff --git a/tests/apitests/apilib/rep_target_post.go b/src/testing/apitests/apilib/rep_target_post.go
similarity index 100%
rename from tests/apitests/apilib/rep_target_post.go
rename to src/testing/apitests/apilib/rep_target_post.go
diff --git a/tests/apitests/apilib/repository.go b/src/testing/apitests/apilib/repository.go
similarity index 100%
rename from tests/apitests/apilib/repository.go
rename to src/testing/apitests/apilib/repository.go
diff --git a/tests/apitests/apilib/role.go b/src/testing/apitests/apilib/role.go
similarity index 100%
rename from tests/apitests/apilib/role.go
rename to src/testing/apitests/apilib/role.go
diff --git a/tests/apitests/apilib/role_param.go b/src/testing/apitests/apilib/role_param.go
similarity index 100%
rename from tests/apitests/apilib/role_param.go
rename to src/testing/apitests/apilib/role_param.go
diff --git a/tests/apitests/apilib/search.go b/src/testing/apitests/apilib/search.go
similarity index 100%
rename from tests/apitests/apilib/search.go
rename to src/testing/apitests/apilib/search.go
diff --git a/tests/apitests/apilib/search_project.go b/src/testing/apitests/apilib/search_project.go
similarity index 100%
rename from tests/apitests/apilib/search_project.go
rename to src/testing/apitests/apilib/search_project.go
diff --git a/tests/apitests/apilib/search_repository.go b/src/testing/apitests/apilib/search_repository.go
similarity index 100%
rename from tests/apitests/apilib/search_repository.go
rename to src/testing/apitests/apilib/search_repository.go
diff --git a/tests/apitests/apilib/statistic_map.go b/src/testing/apitests/apilib/statistic_map.go
similarity index 100%
rename from tests/apitests/apilib/statistic_map.go
rename to src/testing/apitests/apilib/statistic_map.go
diff --git a/tests/apitests/apilib/system_info.go b/src/testing/apitests/apilib/system_info.go
similarity index 100%
rename from tests/apitests/apilib/system_info.go
rename to src/testing/apitests/apilib/system_info.go
diff --git a/tests/apitests/apilib/tags.go b/src/testing/apitests/apilib/tags.go
similarity index 100%
rename from tests/apitests/apilib/tags.go
rename to src/testing/apitests/apilib/tags.go
diff --git a/tests/apitests/apilib/top_repo.go b/src/testing/apitests/apilib/top_repo.go
similarity index 100%
rename from tests/apitests/apilib/top_repo.go
rename to src/testing/apitests/apilib/top_repo.go
diff --git a/tests/apitests/apilib/user.go b/src/testing/apitests/apilib/user.go
similarity index 100%
rename from tests/apitests/apilib/user.go
rename to src/testing/apitests/apilib/user.go
diff --git a/tests/apitests/apilib/user_profile.go b/src/testing/apitests/apilib/user_profile.go
similarity index 100%
rename from tests/apitests/apilib/user_profile.go
rename to src/testing/apitests/apilib/user_profile.go
diff --git a/src/testing/clients/dumb_core_client.go b/src/testing/clients/dumb_core_client.go
new file mode 100644
index 000000000..a0a27129b
--- /dev/null
+++ b/src/testing/clients/dumb_core_client.go
@@ -0,0 +1,54 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clients
+
+import (
+ "github.com/goharbor/harbor/src/chartserver"
+ "github.com/goharbor/harbor/src/common/models"
+)
+
+// DumbCoreClient provides an empty implement for pkg/clients/core.Client
+// it is only used for testing
+type DumbCoreClient struct{}
+
+// ListAllImages ...
+func (d *DumbCoreClient) ListAllImages(project, repository string) ([]*models.TagResp, error) {
+ return nil, nil
+}
+
+// DeleteImage ...
+func (d *DumbCoreClient) DeleteImage(project, repository, tag string) error {
+ return nil
+}
+
+// DeleteImageRepository ...
+func (d *DumbCoreClient) DeleteImageRepository(project, repository string) error {
+ return nil
+}
+
+// ListAllCharts ...
+func (d *DumbCoreClient) ListAllCharts(project, repository string) ([]*chartserver.ChartVersion, error) {
+ return nil, nil
+}
+
+// DeleteChart ...
+func (d *DumbCoreClient) DeleteChart(project, repository, version string) error {
+ return nil
+}
+
+// DeleteChartRepository ...
+func (d *DumbCoreClient) DeleteChartRepository(project, repository string) error {
+ return nil
+}
diff --git a/src/testing/job/mock_client.go b/src/testing/job/mock_client.go
index ef43f301c..efda03fc7 100644
--- a/src/testing/job/mock_client.go
+++ b/src/testing/job/mock_client.go
@@ -5,8 +5,8 @@ import (
"math/rand"
"github.com/goharbor/harbor/src/common/http"
- "github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/job/models"
+ "github.com/goharbor/harbor/src/jobservice/job"
)
// MockJobClient ...
@@ -27,12 +27,9 @@ func (mjc *MockJobClient) GetJobLog(uuid string) ([]byte, error) {
// SubmitJob ...
func (mjc *MockJobClient) SubmitJob(data *models.JobData) (string, error) {
- if data.Name == job.ImageScanAllJob || data.Name == job.Replication || data.Name == job.ImageGC || data.Name == job.ImageScanJob {
- uuid := fmt.Sprintf("u-%d", rand.Int())
- mjc.JobUUID = append(mjc.JobUUID, uuid)
- return uuid, nil
- }
- return "", fmt.Errorf("unsupported job %s", data.Name)
+ uuid := fmt.Sprintf("u-%d", rand.Int())
+ mjc.JobUUID = append(mjc.JobUUID, uuid)
+ return uuid, nil
}
// PostAction ...
@@ -46,6 +43,11 @@ func (mjc *MockJobClient) PostAction(uuid, action string) error {
return nil
}
+// GetExecutions ...
+func (mjc *MockJobClient) GetExecutions(uuid string) ([]job.Stats, error) {
+ return nil, nil
+}
+
func (mjc *MockJobClient) validUUID(uuid string) bool {
for _, u := range mjc.JobUUID {
if uuid == u {
diff --git a/src/testing/scheduler.go b/src/testing/scheduler.go
new file mode 100644
index 000000000..c7e502e8e
--- /dev/null
+++ b/src/testing/scheduler.go
@@ -0,0 +1,77 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testing
+
+import (
+ "fmt"
+
+ "github.com/goharbor/harbor/src/pkg/scheduler/model"
+)
+
+// FakeSchedulerManager ...
+type FakeSchedulerManager struct {
+ idCounter int64
+ Schedules []*model.Schedule
+}
+
+// Create ...
+func (f *FakeSchedulerManager) Create(schedule *model.Schedule) (int64, error) {
+ f.idCounter++
+ id := f.idCounter
+ schedule.ID = id
+ f.Schedules = append(f.Schedules, schedule)
+ return id, nil
+}
+
+// Update ...
+func (f *FakeSchedulerManager) Update(schedule *model.Schedule, props ...string) error {
+ for i, sch := range f.Schedules {
+ if sch.ID == schedule.ID {
+ f.Schedules[i] = schedule
+ return nil
+ }
+ }
+ return fmt.Errorf("the execution %d not found", schedule.ID)
+}
+
+// Delete ...
+func (f *FakeSchedulerManager) Delete(id int64) error {
+ length := len(f.Schedules)
+ for i, sch := range f.Schedules {
+ if sch.ID == id {
+ f.Schedules = f.Schedules[:i]
+ if i != length-1 {
+ f.Schedules = append(f.Schedules, f.Schedules[i+1:]...)
+ }
+ return nil
+ }
+ }
+ return fmt.Errorf("the execution %d not found", id)
+}
+
+// Get ...
+func (f *FakeSchedulerManager) Get(id int64) (*model.Schedule, error) {
+ for _, sch := range f.Schedules {
+ if sch.ID == id {
+ return sch, nil
+ }
+ }
+ return nil, fmt.Errorf("the execution %d not found", id)
+}
+
+// List ...
+func (f *FakeSchedulerManager) List(...*model.ScheduleQuery) ([]*model.Schedule, error) {
+ return f.Schedules, nil
+}
diff --git a/src/testing/suite.go b/src/testing/suite.go
new file mode 100644
index 000000000..679de182a
--- /dev/null
+++ b/src/testing/suite.go
@@ -0,0 +1,93 @@
+// Copyright Project Harbor Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package testing
+
+import (
+ "fmt"
+ "math/rand"
+ "strconv"
+ "time"
+
+ "github.com/goharbor/harbor/src/common/dao"
+ "github.com/goharbor/harbor/src/common/models"
+ "github.com/goharbor/harbor/src/core/config"
+ "github.com/goharbor/harbor/src/pkg/types"
+ "github.com/stretchr/testify/suite"
+)
+
+func init() {
+ rand.Seed(time.Now().UnixNano())
+}
+
+// Suite ...
+type Suite struct {
+ suite.Suite
+}
+
+// SetupSuite ...
+func (suite *Suite) SetupSuite() {
+ config.Init()
+ dao.PrepareTestForPostgresSQL()
+}
+
+// RandString ...
+func (suite *Suite) RandString(n int, letters ...string) string {
+ if len(letters) == 0 || len(letters[0]) == 0 {
+ letters = []string{"abcdefghijklmnopqrstuvwxyz"}
+ }
+
+ letterBytes := []byte(letters[0])
+
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letterBytes[rand.Intn(len(letterBytes))]
+ }
+ return string(b)
+}
+
+// WithProject ...
+func (suite *Suite) WithProject(f func(int64, string), projectNames ...string) {
+ var projectName string
+ if len(projectNames) > 0 {
+ projectName = projectNames[0]
+ } else {
+ projectName = suite.RandString(5)
+ }
+
+ projectID, err := dao.AddProject(models.Project{
+ Name: projectName,
+ OwnerID: 1,
+ })
+ if err != nil {
+ panic(err)
+ }
+
+ defer func() {
+ dao.DeleteProject(projectID)
+ }()
+
+ f(projectID, projectName)
+}
+
+// AssertResourceUsage ...
+func (suite *Suite) AssertResourceUsage(expected int64, resource types.ResourceName, projectID int64) {
+ usage := models.QuotaUsage{Reference: "project", ReferenceID: strconv.FormatInt(projectID, 10)}
+ err := dao.GetOrmer().Read(&usage, "reference", "reference_id")
+ suite.Nil(err, fmt.Sprintf("Failed to get resource %s usage of project %d, error: %v", resource, projectID, err))
+
+ used, err := types.NewResourceList(usage.Used)
+ suite.Nil(err, "Bad resource usage of project %d", projectID)
+ suite.Equal(expected, used[resource])
+}
diff --git a/src/vendor/github.com/BurntSushi/toml/COPYING b/src/vendor/github.com/BurntSushi/toml/COPYING
index 5a8e33254..01b574320 100644
--- a/src/vendor/github.com/BurntSushi/toml/COPYING
+++ b/src/vendor/github.com/BurntSushi/toml/COPYING
@@ -1,14 +1,21 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
+The MIT License (MIT)
- Copyright (C) 2004 Sam Hocevar
+Copyright (c) 2013 TOML authors
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/src/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/src/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
deleted file mode 100644
index 5a8e33254..000000000
--- a/src/vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/src/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/src/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
deleted file mode 100644
index 5a8e33254..000000000
--- a/src/vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/src/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/src/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
deleted file mode 100644
index 5a8e33254..000000000
--- a/src/vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
+++ /dev/null
@@ -1,14 +0,0 @@
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- Version 2, December 2004
-
- Copyright (C) 2004 Sam Hocevar
-
- Everyone is permitted to copy and distribute verbatim or modified
- copies of this license document, and changing it is allowed as long
- as the name is changed.
-
- DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. You just DO WHAT THE FUCK YOU WANT TO.
-
diff --git a/src/vendor/github.com/BurntSushi/toml/lex.go b/src/vendor/github.com/BurntSushi/toml/lex.go
index 6dee7fc79..e0a742a88 100644
--- a/src/vendor/github.com/BurntSushi/toml/lex.go
+++ b/src/vendor/github.com/BurntSushi/toml/lex.go
@@ -775,7 +775,7 @@ func lexDatetime(lx *lexer) stateFn {
return lexDatetime
}
switch r {
- case '-', 'T', ':', '.', 'Z':
+ case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
diff --git a/src/vendor/github.com/Knetic/govaluate/test.sh b/src/vendor/github.com/Knetic/govaluate/test.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go
new file mode 100644
index 000000000..d7968dab7
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go
@@ -0,0 +1,249 @@
+package sdk
+
+import (
+ "encoding/json"
+ "strings"
+ "time"
+)
+
+var apiTimeouts = `{
+ "ecs": {
+ "ActivateRouterInterface": 10,
+ "AddTags": 61,
+ "AllocateDedicatedHosts": 10,
+ "AllocateEipAddress": 17,
+ "AllocatePublicIpAddress": 36,
+ "ApplyAutoSnapshotPolicy": 10,
+ "AssignIpv6Addresses": 10,
+ "AssignPrivateIpAddresses": 10,
+ "AssociateEipAddress": 17,
+ "AttachClassicLinkVpc": 14,
+ "AttachDisk": 36,
+ "AttachInstanceRamRole": 11,
+ "AttachKeyPair": 16,
+ "AttachNetworkInterface": 16,
+ "AuthorizeSecurityGroupEgress": 16,
+ "AuthorizeSecurityGroup": 16,
+ "CancelAutoSnapshotPolicy": 10,
+ "CancelCopyImage": 10,
+ "CancelPhysicalConnection": 10,
+ "CancelSimulatedSystemEvents": 10,
+ "CancelTask": 10,
+ "ConnectRouterInterface": 10,
+ "ConvertNatPublicIpToEip": 12,
+ "CopyImage": 10,
+ "CreateAutoSnapshotPolicy": 10,
+ "CreateCommand": 16,
+ "CreateDeploymentSet": 16,
+ "CreateDisk": 36,
+ "CreateHpcCluster": 10,
+ "CreateImage": 36,
+ "CreateInstance": 86,
+ "CreateKeyPair": 10,
+ "CreateLaunchTemplate": 10,
+ "CreateLaunchTemplateVersion": 10,
+ "CreateNatGateway": 36,
+ "CreateNetworkInterfacePermission": 13,
+ "CreateNetworkInterface": 16,
+ "CreatePhysicalConnection": 10,
+ "CreateRouteEntry": 17,
+ "CreateRouterInterface": 10,
+ "CreateSecurityGroup": 86,
+ "CreateSimulatedSystemEvents": 10,
+ "CreateSnapshot": 86,
+ "CreateVirtualBorderRouter": 10,
+ "CreateVpc": 16,
+ "CreateVSwitch": 17,
+ "DeactivateRouterInterface": 10,
+ "DeleteAutoSnapshotPolicy": 10,
+ "DeleteBandwidthPackage": 10,
+ "DeleteCommand": 16,
+ "DeleteDeploymentSet": 12,
+ "DeleteDisk": 16,
+ "DeleteHpcCluster": 10,
+ "DeleteImage": 36,
+ "DeleteInstance": 66,
+ "DeleteKeyPairs": 10,
+ "DeleteLaunchTemplate": 10,
+ "DeleteLaunchTemplateVersion": 10,
+ "DeleteNatGateway": 10,
+ "DeleteNetworkInterfacePermission": 10,
+ "DeleteNetworkInterface": 16,
+ "DeletePhysicalConnection": 10,
+ "DeleteRouteEntry": 16,
+ "DeleteRouterInterface": 10,
+ "DeleteSecurityGroup": 87,
+ "DeleteSnapshot": 17,
+ "DeleteVirtualBorderRouter": 10,
+ "DeleteVpc": 17,
+ "DeleteVSwitch": 17,
+ "DescribeAccessPoints": 10,
+ "DescribeAccountAttributes": 10,
+ "DescribeAutoSnapshotPolicyEx": 16,
+ "DescribeAvailableResource": 10,
+ "DescribeBandwidthLimitation": 16,
+ "DescribeBandwidthPackages": 10,
+ "DescribeClassicLinkInstances": 15,
+ "DescribeCloudAssistantStatus": 16,
+ "DescribeClusters": 10,
+ "DescribeCommands": 16,
+ "DescribeDedicatedHosts": 10,
+ "DescribeDedicatedHostTypes": 10,
+ "DescribeDeploymentSets": 26,
+ "DescribeDiskMonitorData": 16,
+ "DescribeDisksFullStatus": 14,
+ "DescribeDisks": 19,
+ "DescribeEipAddresses": 16,
+ "DescribeEipMonitorData": 16,
+ "DescribeEniMonitorData": 10,
+ "DescribeHaVips": 10,
+ "DescribeHpcClusters": 16,
+ "DescribeImageSharePermission": 10,
+ "DescribeImages": 38,
+ "DescribeImageSupportInstanceTypes": 16,
+ "DescribeInstanceAttribute": 36,
+ "DescribeInstanceAutoRenewAttribute": 17,
+ "DescribeInstanceHistoryEvents": 19,
+ "DescribeInstanceMonitorData": 19,
+ "DescribeInstancePhysicalAttribute": 10,
+ "DescribeInstanceRamRole": 11,
+ "DescribeInstancesFullStatus": 14,
+ "DescribeInstances": 10,
+ "DescribeInstanceStatus": 26,
+ "DescribeInstanceTopology": 12,
+ "DescribeInstanceTypeFamilies": 17,
+ "DescribeInstanceTypes": 17,
+ "DescribeInstanceVncPasswd": 10,
+ "DescribeInstanceVncUrl": 36,
+ "DescribeInvocationResults": 16,
+ "DescribeInvocations": 16,
+ "DescribeKeyPairs": 12,
+ "DescribeLaunchTemplates": 16,
+ "DescribeLaunchTemplateVersions": 16,
+ "DescribeLimitation": 36,
+ "DescribeNatGateways": 10,
+ "DescribeNetworkInterfacePermissions": 13,
+ "DescribeNetworkInterfaces": 16,
+ "DescribeNewProjectEipMonitorData": 16,
+ "DescribePhysicalConnections": 10,
+ "DescribePrice": 16,
+ "DescribeRecommendInstanceType": 10,
+ "DescribeRegions": 19,
+ "DescribeRenewalPrice": 16,
+ "DescribeResourceByTags": 10,
+ "DescribeResourcesModification": 17,
+ "DescribeRouterInterfaces": 10,
+ "DescribeRouteTables": 17,
+ "DescribeSecurityGroupAttribute": 133,
+ "DescribeSecurityGroupReferences": 16,
+ "DescribeSecurityGroups": 25,
+ "DescribeSnapshotLinks": 17,
+ "DescribeSnapshotMonitorData": 12,
+ "DescribeSnapshotPackage": 10,
+ "DescribeSnapshots": 26,
+ "DescribeSnapshotsUsage": 26,
+ "DescribeSpotPriceHistory": 22,
+ "DescribeTags": 17,
+ "DescribeTaskAttribute": 10,
+ "DescribeTasks": 11,
+ "DescribeUserBusinessBehavior": 13,
+ "DescribeUserData": 10,
+ "DescribeVirtualBorderRoutersForPhysicalConnection": 10,
+ "DescribeVirtualBorderRouters": 10,
+ "DescribeVpcs": 41,
+ "DescribeVRouters": 17,
+ "DescribeVSwitches": 17,
+ "DescribeZones": 103,
+ "DetachClassicLinkVpc": 14,
+ "DetachDisk": 17,
+ "DetachInstanceRamRole": 10,
+ "DetachKeyPair": 10,
+ "DetachNetworkInterface": 16,
+ "EipFillParams": 19,
+ "EipFillProduct": 13,
+ "EipNotifyPaid": 10,
+ "EnablePhysicalConnection": 10,
+ "ExportImage": 10,
+ "GetInstanceConsoleOutput": 14,
+ "GetInstanceScreenshot": 14,
+ "ImportImage": 29,
+ "ImportKeyPair": 10,
+ "InstallCloudAssistant": 10,
+ "InvokeCommand": 16,
+ "JoinResourceGroup": 10,
+ "JoinSecurityGroup": 66,
+ "LeaveSecurityGroup": 66,
+ "ModifyAutoSnapshotPolicyEx": 10,
+ "ModifyBandwidthPackageSpec": 11,
+ "ModifyCommand": 10,
+ "ModifyDeploymentSetAttribute": 10,
+ "ModifyDiskAttribute": 16,
+ "ModifyDiskChargeType": 13,
+ "ModifyEipAddressAttribute": 14,
+ "ModifyImageAttribute": 10,
+ "ModifyImageSharePermission": 16,
+ "ModifyInstanceAttribute": 22,
+ "ModifyInstanceAutoReleaseTime": 15,
+ "ModifyInstanceAutoRenewAttribute": 16,
+ "ModifyInstanceChargeType": 22,
+ "ModifyInstanceDeployment": 10,
+ "ModifyInstanceNetworkSpec": 36,
+ "ModifyInstanceSpec": 62,
+ "ModifyInstanceVncPasswd": 35,
+ "ModifyInstanceVpcAttribute": 15,
+ "ModifyLaunchTemplateDefaultVersion": 10,
+ "ModifyNetworkInterfaceAttribute": 10,
+ "ModifyPhysicalConnectionAttribute": 10,
+ "ModifyPrepayInstanceSpec": 13,
+ "ModifyRouterInterfaceAttribute": 10,
+ "ModifySecurityGroupAttribute": 10,
+ "ModifySecurityGroupEgressRule": 10,
+ "ModifySecurityGroupPolicy": 10,
+ "ModifySecurityGroupRule": 16,
+ "ModifySnapshotAttribute": 10,
+ "ModifyUserBusinessBehavior": 10,
+ "ModifyVirtualBorderRouterAttribute": 10,
+ "ModifyVpcAttribute": 10,
+ "ModifyVRouterAttribute": 10,
+ "ModifyVSwitchAttribute": 10,
+ "ReActivateInstances": 10,
+ "RebootInstance": 27,
+ "RedeployInstance": 14,
+ "ReInitDisk": 16,
+ "ReleaseDedicatedHost": 10,
+ "ReleaseEipAddress": 16,
+ "ReleasePublicIpAddress": 10,
+ "RemoveTags": 10,
+ "RenewInstance": 19,
+ "ReplaceSystemDisk": 36,
+ "ResetDisk": 36,
+ "ResizeDisk": 11,
+ "RevokeSecurityGroupEgress": 13,
+ "RevokeSecurityGroup": 16,
+ "RunInstances": 86,
+ "StartInstance": 46,
+ "StopInstance": 27,
+ "StopInvocation": 10,
+ "TerminatePhysicalConnection": 10,
+ "TerminateVirtualBorderRouter": 10,
+ "UnassignIpv6Addresses": 10,
+ "UnassignPrivateIpAddresses": 10,
+ "UnassociateEipAddress": 16
+ }
+}
+`
+
+func getAPIMaxTimeout(product, actionName string) (time.Duration, bool) {
+ timeout := make(map[string]map[string]int)
+ err := json.Unmarshal([]byte(apiTimeouts), &timeout)
+ if err != nil {
+ return 0 * time.Millisecond, false
+ }
+
+ obj := timeout[strings.ToLower(product)]
+ if obj != nil && obj[actionName] != 0 {
+ return time.Duration(obj[actionName]) * time.Second, true
+ }
+
+ return 0 * time.Millisecond, false
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go
new file mode 100644
index 000000000..7f20b7a40
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go
@@ -0,0 +1,18 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package auth
+
+type Credential interface {
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go
new file mode 100644
index 000000000..68f822633
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go
@@ -0,0 +1,34 @@
+package credentials
+
+// Deprecated: Use AccessKeyCredential in this package instead.
+type BaseCredential struct {
+ AccessKeyId string
+ AccessKeySecret string
+}
+
+type AccessKeyCredential struct {
+ AccessKeyId string
+ AccessKeySecret string
+}
+
+// Deprecated: Use NewAccessKeyCredential in this package instead.
+func NewBaseCredential(accessKeyId, accessKeySecret string) *BaseCredential {
+ return &BaseCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ }
+}
+
+func (baseCred *BaseCredential) ToAccessKeyCredential() *AccessKeyCredential {
+ return &AccessKeyCredential{
+ AccessKeyId: baseCred.AccessKeyId,
+ AccessKeySecret: baseCred.AccessKeySecret,
+ }
+}
+
+func NewAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential {
+ return &AccessKeyCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go
new file mode 100644
index 000000000..6d4763e66
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go
@@ -0,0 +1,12 @@
+package credentials
+
+type BearerTokenCredential struct {
+ BearerToken string
+}
+
+// NewBearerTokenCredential return a BearerTokenCredential object
+func NewBearerTokenCredential(token string) *BearerTokenCredential {
+ return &BearerTokenCredential{
+ BearerToken: token,
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go
new file mode 100644
index 000000000..55a5c2da0
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go
@@ -0,0 +1,29 @@
+package credentials
+
+func (oldCred *StsRoleNameOnEcsCredential) ToEcsRamRoleCredential() *EcsRamRoleCredential {
+ return &EcsRamRoleCredential{
+ RoleName: oldCred.RoleName,
+ }
+}
+
+type EcsRamRoleCredential struct {
+ RoleName string
+}
+
+func NewEcsRamRoleCredential(roleName string) *EcsRamRoleCredential {
+ return &EcsRamRoleCredential{
+ RoleName: roleName,
+ }
+}
+
+// Deprecated: Use EcsRamRoleCredential in this package instead.
+type StsRoleNameOnEcsCredential struct {
+ RoleName string
+}
+
+// Deprecated: Use NewEcsRamRoleCredential in this package instead.
+func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential {
+ return &StsRoleNameOnEcsCredential{
+ RoleName: roleName,
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go
new file mode 100644
index 000000000..3cd0d020a
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go
@@ -0,0 +1,30 @@
+package provider
+
+import (
+ "errors"
+ "os"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+)
+
+type EnvProvider struct{}
+
+var ProviderEnv = new(EnvProvider)
+
+func NewEnvProvider() Provider {
+ return &EnvProvider{}
+}
+
+func (p *EnvProvider) Resolve() (auth.Credential, error) {
+ accessKeyID, ok1 := os.LookupEnv(ENVAccessKeyID)
+ accessKeySecret, ok2 := os.LookupEnv(ENVAccessKeySecret)
+ if !ok1 || !ok2 {
+ return nil, nil
+ }
+ if accessKeyID == "" || accessKeySecret == "" {
+ return nil, errors.New("Environmental variable (ALIBABACLOUD_ACCESS_KEY_ID or ALIBABACLOUD_ACCESS_KEY_SECRET) is empty")
+ }
+ return credentials.NewAccessKeyCredential(accessKeyID, accessKeySecret), nil
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go
new file mode 100644
index 000000000..1906d21f6
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go
@@ -0,0 +1,92 @@
+package provider
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+)
+
+var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
+
+type InstanceCredentialsProvider struct{}
+
+var ProviderInstance = new(InstanceCredentialsProvider)
+
+var HookGet = func(fn func(string) (int, []byte, error)) func(string) (int, []byte, error) {
+ return fn
+}
+
+func NewInstanceCredentialsProvider() Provider {
+ return &InstanceCredentialsProvider{}
+}
+
+func (p *InstanceCredentialsProvider) Resolve() (auth.Credential, error) {
+ roleName, ok := os.LookupEnv(ENVEcsMetadata)
+ if !ok {
+ return nil, nil
+ }
+ if roleName == "" {
+ return nil, errors.New("Environmental variable 'ALIBABA_CLOUD_ECS_METADATA' are empty")
+ }
+ status, content, err := HookGet(get)(securityCredURL + roleName)
+ if err != nil {
+ return nil, err
+ }
+ if status != 200 {
+ if status == 404 {
+ return nil, fmt.Errorf("The role was not found in the instance")
+ }
+ return nil, fmt.Errorf("Received %d when getting security credentials for %s", status, roleName)
+ }
+ body := make(map[string]interface{})
+
+ if err := json.Unmarshal(content, &body); err != nil {
+ return nil, err
+ }
+
+ accessKeyID, err := extractString(body, "AccessKeyId")
+ if err != nil {
+ return nil, err
+ }
+ accessKeySecret, err := extractString(body, "AccessKeySecret")
+ if err != nil {
+ return nil, err
+ }
+ securityToken, err := extractString(body, "SecurityToken")
+ if err != nil {
+ return nil, err
+ }
+
+ return credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, securityToken), nil
+}
+
+func get(url string) (status int, content []byte, err error) {
+ httpClient := http.DefaultClient
+ httpClient.Timeout = time.Second * 1
+ resp, err := httpClient.Get(url)
+ if err != nil {
+ return
+ }
+ defer resp.Body.Close()
+ content, err = ioutil.ReadAll(resp.Body)
+ return resp.StatusCode, content, err
+}
+
+func extractString(m map[string]interface{}, key string) (string, error) {
+ raw, ok := m[key]
+ if !ok {
+ return "", fmt.Errorf("%s not in map", key)
+ }
+ str, ok := raw.(string)
+ if !ok {
+ return "", fmt.Errorf("%s is not a string in map", key)
+ }
+ return str, nil
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go
new file mode 100644
index 000000000..8d525c37a
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go
@@ -0,0 +1,158 @@
+package provider
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "runtime"
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+
+ ini "gopkg.in/ini.v1"
+)
+
+type ProfileProvider struct {
+ Profile string
+}
+
+var ProviderProfile = NewProfileProvider()
+
+// NewProfileProvider receive zero or more parameters,
+// when length of name is 0, the value of field Profile will be "default",
+// and when there are multiple inputs, the function will take the
+// first one and discard the other values.
+func NewProfileProvider(name ...string) Provider {
+ p := new(ProfileProvider)
+ if len(name) == 0 {
+ p.Profile = "default"
+ } else {
+ p.Profile = name[0]
+ }
+ return p
+}
+
+// Resolve implements the Provider interface
+// when credential type is rsa_key_pair, the content of private_key file
+// must be able to be parsed directly into the required string
+// that NewRsaKeyPairCredential function needed
+func (p *ProfileProvider) Resolve() (auth.Credential, error) {
+ path, ok := os.LookupEnv(ENVCredentialFile)
+ if !ok {
+ path, err := checkDefaultPath()
+ if err != nil {
+ return nil, err
+ }
+ if path == "" {
+ return nil, nil
+ }
+ } else if path == "" {
+ return nil, errors.New("Environment variable '" + ENVCredentialFile + "' cannot be empty")
+ }
+
+ ini, err := ini.Load(path)
+ if err != nil {
+ return nil, errors.New("ERROR: Can not open file" + err.Error())
+ }
+
+ section, err := ini.GetSection(p.Profile)
+ if err != nil {
+ return nil, errors.New("ERROR: Can not load section" + err.Error())
+ }
+
+ value, err := section.GetKey("type")
+ if err != nil {
+ return nil, errors.New("ERROR: Can not find credential type" + err.Error())
+ }
+
+ switch value.String() {
+ case "access_key":
+ value1, err1 := section.GetKey("access_key_id")
+ value2, err2 := section.GetKey("access_key_secret")
+ if err1 != nil || err2 != nil {
+ return nil, errors.New("ERROR: Failed to get value")
+ }
+ if value1.String() == "" || value2.String() == "" {
+ return nil, errors.New("ERROR: Value can't be empty")
+ }
+ return credentials.NewAccessKeyCredential(value1.String(), value2.String()), nil
+ case "ecs_ram_role":
+ value1, err1 := section.GetKey("role_name")
+ if err1 != nil {
+ return nil, errors.New("ERROR: Failed to get value")
+ }
+ if value1.String() == "" {
+ return nil, errors.New("ERROR: Value can't be empty")
+ }
+ return credentials.NewEcsRamRoleCredential(value1.String()), nil
+ case "ram_role_arn":
+ value1, err1 := section.GetKey("access_key_id")
+ value2, err2 := section.GetKey("access_key_secret")
+ value3, err3 := section.GetKey("role_arn")
+ value4, err4 := section.GetKey("role_session_name")
+ if err1 != nil || err2 != nil || err3 != nil || err4 != nil {
+ return nil, errors.New("ERROR: Failed to get value")
+ }
+ if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" {
+ return nil, errors.New("ERROR: Value can't be empty")
+ }
+ return credentials.NewRamRoleArnCredential(value1.String(), value2.String(), value3.String(), value4.String(), 3600), nil
+ case "rsa_key_pair":
+ value1, err1 := section.GetKey("public_key_id")
+ value2, err2 := section.GetKey("private_key_file")
+ if err1 != nil || err2 != nil {
+ return nil, errors.New("ERROR: Failed to get value")
+ }
+ if value1.String() == "" || value2.String() == "" {
+ return nil, errors.New("ERROR: Value can't be empty")
+ }
+ file, err := os.Open(value2.String())
+ if err != nil {
+ return nil, errors.New("ERROR: Can not get private_key")
+ }
+ defer file.Close()
+ var privateKey string
+ scan := bufio.NewScanner(file)
+ var data string
+ for scan.Scan() {
+ if strings.HasPrefix(scan.Text(), "----") {
+ continue
+ }
+ data += scan.Text() + "\n"
+ }
+ return credentials.NewRsaKeyPairCredential(privateKey, value1.String(), 3600), nil
+ default:
+ return nil, errors.New("ERROR: Failed to get credential")
+ }
+}
+
+// GetHomePath return home directory according to the system.
+// if the environmental virables does not exist, will return empty
+func GetHomePath() string {
+ if runtime.GOOS == "windows" {
+ path, ok := os.LookupEnv("USERPROFILE")
+ if !ok {
+ return ""
+ }
+ return path
+ }
+ path, ok := os.LookupEnv("HOME")
+ if !ok {
+ return ""
+ }
+ return path
+}
+
+func checkDefaultPath() (path string, err error) {
+ path = GetHomePath()
+ if path == "" {
+ return "", errors.New("The default credential file path is invalid")
+ }
+ path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1)
+ _, err = os.Stat(path)
+ if err != nil {
+ return "", nil
+ }
+ return path, nil
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go
new file mode 100644
index 000000000..ae4e168eb
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go
@@ -0,0 +1,19 @@
+package provider
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+)
+
+//Environmental virables that may be used by the provider
+const (
+ ENVAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID"
+ ENVAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET"
+ ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE"
+ ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA"
+ PATHCredentialFile = "~/.alibabacloud/credentials"
+)
+
+// When you want to customize the provider, you only need to implement the method of the interface.
+type Provider interface {
+ Resolve() (auth.Credential, error)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go
new file mode 100644
index 000000000..3f9315d13
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go
@@ -0,0 +1,34 @@
+package provider
+
+import (
+ "errors"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+)
+
+type ProviderChain struct {
+ Providers []Provider
+}
+
+var defaultproviders = []Provider{ProviderEnv, ProviderProfile, ProviderInstance}
+var DefaultChain = NewProviderChain(defaultproviders)
+
+func NewProviderChain(providers []Provider) Provider {
+ return &ProviderChain{
+ Providers: providers,
+ }
+}
+
+func (p *ProviderChain) Resolve() (auth.Credential, error) {
+ for _, provider := range p.Providers {
+ creds, err := provider.Resolve()
+ if err != nil {
+ return nil, err
+ } else if err == nil && creds == nil {
+ continue
+ }
+ return creds, err
+ }
+ return nil, errors.New("No credential found")
+
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go
new file mode 100644
index 000000000..00d688eb8
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go
@@ -0,0 +1,15 @@
+package credentials
+
+type RsaKeyPairCredential struct {
+ PrivateKey string
+ PublicKeyId string
+ SessionExpiration int
+}
+
+func NewRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int) *RsaKeyPairCredential {
+ return &RsaKeyPairCredential{
+ PrivateKey: privateKey,
+ PublicKeyId: publicKeyId,
+ SessionExpiration: sessionExpiration,
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go
new file mode 100644
index 000000000..554431ff0
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go
@@ -0,0 +1,15 @@
+package credentials
+
+type StsTokenCredential struct {
+ AccessKeyId string
+ AccessKeySecret string
+ AccessKeyStsToken string
+}
+
+func NewStsTokenCredential(accessKeyId, accessKeySecret, accessKeyStsToken string) *StsTokenCredential {
+ return &StsTokenCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ AccessKeyStsToken: accessKeyStsToken,
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go
new file mode 100644
index 000000000..27602fd74
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go
@@ -0,0 +1,61 @@
+package credentials
+
+// Deprecated: Use RamRoleArnCredential in this package instead.
+type StsRoleArnCredential struct {
+ AccessKeyId string
+ AccessKeySecret string
+ RoleArn string
+ RoleSessionName string
+ RoleSessionExpiration int
+}
+
+type RamRoleArnCredential struct {
+ AccessKeyId string
+ AccessKeySecret string
+ RoleArn string
+ RoleSessionName string
+ RoleSessionExpiration int
+ Policy string
+}
+
+// Deprecated: Use RamRoleArnCredential in this package instead.
+func NewStsRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *StsRoleArnCredential {
+ return &StsRoleArnCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ RoleArn: roleArn,
+ RoleSessionName: roleSessionName,
+ RoleSessionExpiration: roleSessionExpiration,
+ }
+}
+
+func (oldCred *StsRoleArnCredential) ToRamRoleArnCredential() *RamRoleArnCredential {
+ return &RamRoleArnCredential{
+ AccessKeyId: oldCred.AccessKeyId,
+ AccessKeySecret: oldCred.AccessKeySecret,
+ RoleArn: oldCred.RoleArn,
+ RoleSessionName: oldCred.RoleSessionName,
+ RoleSessionExpiration: oldCred.RoleSessionExpiration,
+ }
+}
+
+func NewRamRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *RamRoleArnCredential {
+ return &RamRoleArnCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ RoleArn: roleArn,
+ RoleSessionName: roleSessionName,
+ RoleSessionExpiration: roleSessionExpiration,
+ }
+}
+
+func NewRamRoleArnWithPolicyCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int) *RamRoleArnCredential {
+ return &RamRoleArnCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ RoleArn: roleArn,
+ RoleSessionName: roleSessionName,
+ RoleSessionExpiration: roleSessionExpiration,
+ Policy: policy,
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go
new file mode 100644
index 000000000..8b4037a00
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go
@@ -0,0 +1,136 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package auth
+
+import (
+ "bytes"
+ "sort"
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+var debug utils.Debug
+
+var hookGetDate = func(fn func() string) string {
+ return fn()
+}
+
+func init() {
+ debug = utils.Init("sdk")
+}
+
+func signRoaRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) {
+ completeROASignParams(request, signer, regionId)
+ stringToSign := buildRoaStringToSign(request)
+ request.SetStringToSign(stringToSign)
+ accessKeyId, err := signer.GetAccessKeyId()
+ if err != nil {
+ return err
+ }
+
+ signature := signer.Sign(stringToSign, "")
+ request.GetHeaders()["Authorization"] = "acs " + accessKeyId + ":" + signature
+
+ return
+}
+
+func completeROASignParams(request requests.AcsRequest, signer Signer, regionId string) {
+ headerParams := request.GetHeaders()
+
+ // complete query params
+ queryParams := request.GetQueryParams()
+ //if _, ok := queryParams["RegionId"]; !ok {
+ // queryParams["RegionId"] = regionId
+ //}
+ if extraParam := signer.GetExtraParam(); extraParam != nil {
+ for key, value := range extraParam {
+ if key == "SecurityToken" {
+ headerParams["x-acs-security-token"] = value
+ continue
+ }
+ if key == "BearerToken" {
+ headerParams["x-acs-bearer-token"] = value
+ continue
+ }
+ queryParams[key] = value
+ }
+ }
+
+ // complete header params
+ headerParams["Date"] = hookGetDate(utils.GetTimeInFormatRFC2616)
+ headerParams["x-acs-signature-method"] = signer.GetName()
+ headerParams["x-acs-signature-version"] = signer.GetVersion()
+ if request.GetFormParams() != nil && len(request.GetFormParams()) > 0 {
+ formString := utils.GetUrlFormedMap(request.GetFormParams())
+ request.SetContent([]byte(formString))
+ headerParams["Content-Type"] = requests.Form
+ }
+ contentMD5 := utils.GetMD5Base64(request.GetContent())
+ headerParams["Content-MD5"] = contentMD5
+ if _, contains := headerParams["Content-Type"]; !contains {
+ headerParams["Content-Type"] = requests.Raw
+ }
+ switch format := request.GetAcceptFormat(); format {
+ case "JSON":
+ headerParams["Accept"] = requests.Json
+ case "XML":
+ headerParams["Accept"] = requests.Xml
+ default:
+ headerParams["Accept"] = requests.Raw
+ }
+}
+
+func buildRoaStringToSign(request requests.AcsRequest) (stringToSign string) {
+
+ headers := request.GetHeaders()
+
+ stringToSignBuilder := bytes.Buffer{}
+ stringToSignBuilder.WriteString(request.GetMethod())
+ stringToSignBuilder.WriteString(requests.HeaderSeparator)
+
+ // append header keys for sign
+ appendIfContain(headers, &stringToSignBuilder, "Accept", requests.HeaderSeparator)
+ appendIfContain(headers, &stringToSignBuilder, "Content-MD5", requests.HeaderSeparator)
+ appendIfContain(headers, &stringToSignBuilder, "Content-Type", requests.HeaderSeparator)
+ appendIfContain(headers, &stringToSignBuilder, "Date", requests.HeaderSeparator)
+
+ // sort and append headers witch starts with 'x-acs-'
+ var acsHeaders []string
+ for key := range headers {
+ if strings.HasPrefix(key, "x-acs-") {
+ acsHeaders = append(acsHeaders, key)
+ }
+ }
+ sort.Strings(acsHeaders)
+ for _, key := range acsHeaders {
+ stringToSignBuilder.WriteString(key + ":" + headers[key])
+ stringToSignBuilder.WriteString(requests.HeaderSeparator)
+ }
+
+ // append query params
+ stringToSignBuilder.WriteString(request.BuildQueries())
+ stringToSign = stringToSignBuilder.String()
+ debug("stringToSign: %s", stringToSign)
+ return
+}
+
+func appendIfContain(sourceMap map[string]string, target *bytes.Buffer, key, separator string) {
+ if value, contain := sourceMap[key]; contain && len(value) > 0 {
+ target.WriteString(sourceMap[key])
+ target.WriteString(separator)
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go
new file mode 100644
index 000000000..33967b9ef
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go
@@ -0,0 +1,94 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package auth
+
+import (
+ "net/url"
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+var hookGetNonce = func(fn func() string) string {
+ return fn()
+}
+
+func signRpcRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) {
+ err = completeRpcSignParams(request, signer, regionId)
+ if err != nil {
+ return
+ }
+ // remove while retry
+ if _, containsSign := request.GetQueryParams()["Signature"]; containsSign {
+ delete(request.GetQueryParams(), "Signature")
+ }
+ stringToSign := buildRpcStringToSign(request)
+ request.SetStringToSign(stringToSign)
+ signature := signer.Sign(stringToSign, "&")
+ request.GetQueryParams()["Signature"] = signature
+
+ return
+}
+
+func completeRpcSignParams(request requests.AcsRequest, signer Signer, regionId string) (err error) {
+ queryParams := request.GetQueryParams()
+ queryParams["Version"] = request.GetVersion()
+ queryParams["Action"] = request.GetActionName()
+ queryParams["Format"] = request.GetAcceptFormat()
+ queryParams["Timestamp"] = hookGetDate(utils.GetTimeInFormatISO8601)
+ queryParams["SignatureMethod"] = signer.GetName()
+ queryParams["SignatureType"] = signer.GetType()
+ queryParams["SignatureVersion"] = signer.GetVersion()
+ queryParams["SignatureNonce"] = hookGetNonce(utils.GetUUID)
+ queryParams["AccessKeyId"], err = signer.GetAccessKeyId()
+
+ if err != nil {
+ return
+ }
+
+ if _, contains := queryParams["RegionId"]; !contains {
+ queryParams["RegionId"] = regionId
+ }
+ if extraParam := signer.GetExtraParam(); extraParam != nil {
+ for key, value := range extraParam {
+ queryParams[key] = value
+ }
+ }
+
+ request.GetHeaders()["Content-Type"] = requests.Form
+ formString := utils.GetUrlFormedMap(request.GetFormParams())
+ request.SetContent([]byte(formString))
+
+ return
+}
+
+func buildRpcStringToSign(request requests.AcsRequest) (stringToSign string) {
+ signParams := make(map[string]string)
+ for key, value := range request.GetQueryParams() {
+ signParams[key] = value
+ }
+ for key, value := range request.GetFormParams() {
+ signParams[key] = value
+ }
+
+ stringToSign = utils.GetUrlFormedMap(signParams)
+ stringToSign = strings.Replace(stringToSign, "+", "%20", -1)
+ stringToSign = strings.Replace(stringToSign, "*", "%2A", -1)
+ stringToSign = strings.Replace(stringToSign, "%7E", "~", -1)
+ stringToSign = url.QueryEscape(stringToSign)
+ stringToSign = request.GetMethod() + "&%2F&" + stringToSign
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go
new file mode 100644
index 000000000..cbbc3cef7
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go
@@ -0,0 +1,98 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package auth
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+type Signer interface {
+ GetName() string
+ GetType() string
+ GetVersion() string
+ GetAccessKeyId() (string, error)
+ GetExtraParam() map[string]string
+ Sign(stringToSign, secretSuffix string) string
+}
+
+func NewSignerWithCredential(credential Credential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer Signer, err error) {
+ switch instance := credential.(type) {
+ case *credentials.AccessKeyCredential:
+ {
+ signer = signers.NewAccessKeySigner(instance)
+ }
+ case *credentials.StsTokenCredential:
+ {
+ signer = signers.NewStsTokenSigner(instance)
+ }
+ case *credentials.BearerTokenCredential:
+ {
+ signer = signers.NewBearerTokenSigner(instance)
+ }
+ case *credentials.RamRoleArnCredential:
+ {
+ signer, err = signers.NewRamRoleArnSigner(instance, commonApi)
+ }
+ case *credentials.RsaKeyPairCredential:
+ {
+ signer, err = signers.NewSignerKeyPair(instance, commonApi)
+ }
+ case *credentials.EcsRamRoleCredential:
+ {
+ signer = signers.NewEcsRamRoleSigner(instance, commonApi)
+ }
+ case *credentials.BaseCredential: // deprecated user interface
+ {
+ signer = signers.NewAccessKeySigner(instance.ToAccessKeyCredential())
+ }
+ case *credentials.StsRoleArnCredential: // deprecated user interface
+ {
+ signer, err = signers.NewRamRoleArnSigner(instance.ToRamRoleArnCredential(), commonApi)
+ }
+ case *credentials.StsRoleNameOnEcsCredential: // deprecated user interface
+ {
+ signer = signers.NewEcsRamRoleSigner(instance.ToEcsRamRoleCredential(), commonApi)
+ }
+ default:
+ message := fmt.Sprintf(errors.UnsupportedCredentialErrorMessage, reflect.TypeOf(credential))
+ err = errors.NewClientError(errors.UnsupportedCredentialErrorCode, message, nil)
+ }
+ return
+}
+
+func Sign(request requests.AcsRequest, signer Signer, regionId string) (err error) {
+ switch request.GetStyle() {
+ case requests.ROA:
+ {
+ err = signRoaRequest(request, signer, regionId)
+ }
+ case requests.RPC:
+ {
+ err = signRpcRequest(request, signer, regionId)
+ }
+ default:
+ message := fmt.Sprintf(errors.UnknownRequestTypeErrorMessage, reflect.TypeOf(request))
+ err = errors.NewClientError(errors.UnknownRequestTypeErrorCode, message, nil)
+ }
+
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go
new file mode 100644
index 000000000..887f50209
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go
@@ -0,0 +1,57 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/base64"
+)
+
+func ShaHmac1(source, secret string) string {
+ key := []byte(secret)
+ hmac := hmac.New(sha1.New, key)
+ hmac.Write([]byte(source))
+ signedBytes := hmac.Sum(nil)
+ signedString := base64.StdEncoding.EncodeToString(signedBytes)
+ return signedString
+}
+
+func Sha256WithRsa(source, secret string) string {
+ // block, _ := pem.Decode([]byte(secret))
+ decodeString, err := base64.StdEncoding.DecodeString(secret)
+ if err != nil {
+ panic(err)
+ }
+ private, err := x509.ParsePKCS8PrivateKey(decodeString)
+ if err != nil {
+ panic(err)
+ }
+
+ h := crypto.Hash.New(crypto.SHA256)
+ h.Write([]byte(source))
+ hashed := h.Sum(nil)
+ signature, err := rsa.SignPKCS1v15(rand.Reader, private.(*rsa.PrivateKey),
+ crypto.SHA256, hashed)
+ if err != nil {
+ panic(err)
+ }
+
+ return base64.StdEncoding.EncodeToString(signature)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go
new file mode 100644
index 000000000..ba291a41e
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go
@@ -0,0 +1,54 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+const defaultInAdvanceScale = 0.95
+
+type credentialUpdater struct {
+ credentialExpiration int
+ lastUpdateTimestamp int64
+ inAdvanceScale float64
+ buildRequestMethod func() (*requests.CommonRequest, error)
+ responseCallBack func(response *responses.CommonResponse) error
+ refreshApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error)
+}
+
+func (updater *credentialUpdater) needUpdateCredential() (result bool) {
+ if updater.inAdvanceScale == 0 {
+ updater.inAdvanceScale = defaultInAdvanceScale
+ }
+ return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale)
+}
+
+func (updater *credentialUpdater) updateCredential() (err error) {
+ request, err := updater.buildRequestMethod()
+ if err != nil {
+ return
+ }
+ response, err := updater.refreshApi(request)
+ if err != nil {
+ return
+ }
+ updater.lastUpdateTimestamp = time.Now().Unix()
+ err = updater.responseCallBack(response)
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go
new file mode 100644
index 000000000..99c624c88
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go
@@ -0,0 +1,7 @@
+package signers
+
+type SessionCredential struct {
+ AccessKeyId string
+ AccessKeySecret string
+ StsToken string
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go
new file mode 100644
index 000000000..bc4f35b85
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go
@@ -0,0 +1,54 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+)
+
+type AccessKeySigner struct {
+ credential *credentials.AccessKeyCredential
+}
+
+func (signer *AccessKeySigner) GetExtraParam() map[string]string {
+ return nil
+}
+
+func NewAccessKeySigner(credential *credentials.AccessKeyCredential) *AccessKeySigner {
+ return &AccessKeySigner{
+ credential: credential,
+ }
+}
+
+func (*AccessKeySigner) GetName() string {
+ return "HMAC-SHA1"
+}
+
+func (*AccessKeySigner) GetType() string {
+ return ""
+}
+
+func (*AccessKeySigner) GetVersion() string {
+ return "1.0"
+}
+
+func (signer *AccessKeySigner) GetAccessKeyId() (accessKeyId string, err error) {
+ return signer.credential.AccessKeyId, nil
+}
+
+func (signer *AccessKeySigner) Sign(stringToSign, secretSuffix string) string {
+ secret := signer.credential.AccessKeySecret + secretSuffix
+ return ShaHmac1(stringToSign, secret)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go
new file mode 100644
index 000000000..75b78433a
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go
@@ -0,0 +1,35 @@
+package signers
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+)
+
+type BearerTokenSigner struct {
+ credential *credentials.BearerTokenCredential
+}
+
+func NewBearerTokenSigner(credential *credentials.BearerTokenCredential) *BearerTokenSigner {
+ return &BearerTokenSigner{
+ credential: credential,
+ }
+}
+
+func (signer *BearerTokenSigner) GetExtraParam() map[string]string {
+ return map[string]string{"BearerToken": signer.credential.BearerToken}
+}
+
+func (*BearerTokenSigner) GetName() string {
+ return ""
+}
+func (*BearerTokenSigner) GetType() string {
+ return "BEARERTOKEN"
+}
+func (*BearerTokenSigner) GetVersion() string {
+ return "1.0"
+}
+func (signer *BearerTokenSigner) GetAccessKeyId() (accessKeyId string, err error) {
+ return "", nil
+}
+func (signer *BearerTokenSigner) Sign(stringToSign, secretSuffix string) string {
+ return ""
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go
new file mode 100644
index 000000000..73788429e
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go
@@ -0,0 +1,167 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+ jmespath "github.com/jmespath/go-jmespath"
+)
+
+var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/"
+
+type EcsRamRoleSigner struct {
+ *credentialUpdater
+ sessionCredential *SessionCredential
+ credential *credentials.EcsRamRoleCredential
+ commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)
+}
+
+func NewEcsRamRoleSigner(credential *credentials.EcsRamRoleCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *EcsRamRoleSigner) {
+ signer = &EcsRamRoleSigner{
+ credential: credential,
+ commonApi: commonApi,
+ }
+
+ signer.credentialUpdater = &credentialUpdater{
+ credentialExpiration: defaultDurationSeconds / 60,
+ buildRequestMethod: signer.buildCommonRequest,
+ responseCallBack: signer.refreshCredential,
+ refreshApi: signer.refreshApi,
+ }
+
+ return signer
+}
+
+func (*EcsRamRoleSigner) GetName() string {
+ return "HMAC-SHA1"
+}
+
+func (*EcsRamRoleSigner) GetType() string {
+ return ""
+}
+
+func (*EcsRamRoleSigner) GetVersion() string {
+ return "1.0"
+}
+
+func (signer *EcsRamRoleSigner) GetAccessKeyId() (accessKeyId string, err error) {
+ if signer.sessionCredential == nil || signer.needUpdateCredential() {
+ err = signer.updateCredential()
+ if err != nil {
+ return
+ }
+ }
+ if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 {
+ return "", nil
+ }
+ return signer.sessionCredential.AccessKeyId, nil
+}
+
+func (signer *EcsRamRoleSigner) GetExtraParam() map[string]string {
+ if signer.sessionCredential == nil {
+ return make(map[string]string)
+ }
+ if len(signer.sessionCredential.StsToken) <= 0 {
+ return make(map[string]string)
+ }
+ return map[string]string{"SecurityToken": signer.sessionCredential.StsToken}
+}
+
+func (signer *EcsRamRoleSigner) Sign(stringToSign, secretSuffix string) string {
+ secret := signer.sessionCredential.AccessKeySecret + secretSuffix
+ return ShaHmac1(stringToSign, secret)
+}
+
+func (signer *EcsRamRoleSigner) buildCommonRequest() (request *requests.CommonRequest, err error) {
+ return
+}
+
+func (signer *EcsRamRoleSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) {
+ requestUrl := securityCredURL + signer.credential.RoleName
+ httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader(""))
+ if err != nil {
+ err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error())
+ return
+ }
+ httpClient := &http.Client{}
+ httpResponse, err := httpClient.Do(httpRequest)
+ if err != nil {
+ err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error())
+ return
+ }
+
+ response = responses.NewCommonResponse()
+ err = responses.Unmarshal(response, httpResponse, "")
+ return
+}
+
+func (signer *EcsRamRoleSigner) refreshCredential(response *responses.CommonResponse) (err error) {
+ if response.GetHttpStatus() != http.StatusOK {
+ return fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", response.GetHttpStatus(), response.GetHttpContentString())
+ }
+ var data interface{}
+ err = json.Unmarshal(response.GetHttpContentBytes(), &data)
+ if err != nil {
+ return fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error())
+ }
+ code, err := jmespath.Search("Code", data)
+ if err != nil {
+ return fmt.Errorf("refresh Ecs sts token err, fail to get Code: %s", err.Error())
+ }
+ if code.(string) != "Success" {
+ return fmt.Errorf("refresh Ecs sts token err, Code is not Success")
+ }
+ accessKeyId, err := jmespath.Search("AccessKeyId", data)
+ if err != nil {
+ return fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeyId: %s", err.Error())
+ }
+ accessKeySecret, err := jmespath.Search("AccessKeySecret", data)
+ if err != nil {
+ return fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeySecret: %s", err.Error())
+ }
+ securityToken, err := jmespath.Search("SecurityToken", data)
+ if err != nil {
+ return fmt.Errorf("refresh Ecs sts token err, fail to get SecurityToken: %s", err.Error())
+ }
+ expiration, err := jmespath.Search("Expiration", data)
+ if err != nil {
+ return fmt.Errorf("refresh Ecs sts token err, fail to get Expiration: %s", err.Error())
+ }
+ if accessKeyId == nil || accessKeySecret == nil || securityToken == nil || expiration == nil {
+ return
+ }
+
+ expirationTime, err := time.Parse("2006-01-02T15:04:05Z", expiration.(string))
+ signer.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix())
+ signer.sessionCredential = &SessionCredential{
+ AccessKeyId: accessKeyId.(string),
+ AccessKeySecret: accessKeySecret.(string),
+ StsToken: securityToken.(string),
+ }
+
+ return
+}
+
+func (signer *EcsRamRoleSigner) GetSessionCredential() *SessionCredential {
+ return signer.sessionCredential
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go
new file mode 100644
index 000000000..19273d5a6
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go
@@ -0,0 +1,148 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+ jmespath "github.com/jmespath/go-jmespath"
+)
+
+type SignerKeyPair struct {
+ *credentialUpdater
+ sessionCredential *SessionCredential
+ credential *credentials.RsaKeyPairCredential
+ commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)
+}
+
+func NewSignerKeyPair(credential *credentials.RsaKeyPairCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *SignerKeyPair, err error) {
+ signer = &SignerKeyPair{
+ credential: credential,
+ commonApi: commonApi,
+ }
+
+ signer.credentialUpdater = &credentialUpdater{
+ credentialExpiration: credential.SessionExpiration,
+ buildRequestMethod: signer.buildCommonRequest,
+ responseCallBack: signer.refreshCredential,
+ refreshApi: signer.refreshApi,
+ }
+
+ if credential.SessionExpiration > 0 {
+ if credential.SessionExpiration >= 900 && credential.SessionExpiration <= 3600 {
+ signer.credentialExpiration = credential.SessionExpiration
+ } else {
+ err = errors.NewClientError(errors.InvalidParamErrorCode, "Key Pair session duration should be in the range of 15min - 1Hr", nil)
+ }
+ } else {
+ signer.credentialExpiration = defaultDurationSeconds
+ }
+ return
+}
+
+func (*SignerKeyPair) GetName() string {
+ return "HMAC-SHA1"
+}
+
+func (*SignerKeyPair) GetType() string {
+ return ""
+}
+
+func (*SignerKeyPair) GetVersion() string {
+ return "1.0"
+}
+
+func (signer *SignerKeyPair) ensureCredential() error {
+ if signer.sessionCredential == nil || signer.needUpdateCredential() {
+ return signer.updateCredential()
+ }
+ return nil
+}
+
+func (signer *SignerKeyPair) GetAccessKeyId() (accessKeyId string, err error) {
+ err = signer.ensureCredential()
+ if err != nil {
+ return
+ }
+ if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 {
+ accessKeyId = ""
+ return
+ }
+
+ accessKeyId = signer.sessionCredential.AccessKeyId
+ return
+}
+
+func (signer *SignerKeyPair) GetExtraParam() map[string]string {
+ return make(map[string]string)
+}
+
+func (signer *SignerKeyPair) Sign(stringToSign, secretSuffix string) string {
+ secret := signer.sessionCredential.AccessKeySecret + secretSuffix
+ return ShaHmac1(stringToSign, secret)
+}
+
+func (signer *SignerKeyPair) buildCommonRequest() (request *requests.CommonRequest, err error) {
+ request = requests.NewCommonRequest()
+ request.Product = "Sts"
+ request.Version = "2015-04-01"
+ request.ApiName = "GenerateSessionAccessKey"
+ request.Scheme = requests.HTTPS
+ request.SetDomain("sts.ap-northeast-1.aliyuncs.com")
+ request.QueryParams["PublicKeyId"] = signer.credential.PublicKeyId
+ request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration)
+ return
+}
+
+func (signer *SignerKeyPair) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) {
+ signerV2 := NewSignerV2(signer.credential)
+ return signer.commonApi(request, signerV2)
+}
+
+func (signer *SignerKeyPair) refreshCredential(response *responses.CommonResponse) (err error) {
+ if response.GetHttpStatus() != http.StatusOK {
+ message := "refresh session AccessKey failed"
+ err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message)
+ return
+ }
+ var data interface{}
+ err = json.Unmarshal(response.GetHttpContentBytes(), &data)
+ if err != nil {
+ return fmt.Errorf("refresh KeyPair err, json.Unmarshal fail: %s", err.Error())
+ }
+ accessKeyId, err := jmespath.Search("SessionAccessKey.SessionAccessKeyId", data)
+ if err != nil {
+ return fmt.Errorf("refresh KeyPair err, fail to get SessionAccessKeyId: %s", err.Error())
+ }
+ accessKeySecret, err := jmespath.Search("SessionAccessKey.SessionAccessKeySecret", data)
+ if err != nil {
+ return fmt.Errorf("refresh KeyPair err, fail to get SessionAccessKeySecret: %s", err.Error())
+ }
+ if accessKeyId == nil || accessKeySecret == nil {
+ return
+ }
+ signer.sessionCredential = &SessionCredential{
+ AccessKeyId: accessKeyId.(string),
+ AccessKeySecret: accessKeySecret.(string),
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go
new file mode 100644
index 000000000..c945c8aeb
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go
@@ -0,0 +1,175 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strconv"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+ jmespath "github.com/jmespath/go-jmespath"
+)
+
+const (
+ defaultDurationSeconds = 3600
+)
+
+type RamRoleArnSigner struct {
+ *credentialUpdater
+ roleSessionName string
+ sessionCredential *SessionCredential
+ credential *credentials.RamRoleArnCredential
+ commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)
+}
+
+func NewRamRoleArnSigner(credential *credentials.RamRoleArnCredential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer *RamRoleArnSigner, err error) {
+ signer = &RamRoleArnSigner{
+ credential: credential,
+ commonApi: commonApi,
+ }
+
+ signer.credentialUpdater = &credentialUpdater{
+ credentialExpiration: credential.RoleSessionExpiration,
+ buildRequestMethod: signer.buildCommonRequest,
+ responseCallBack: signer.refreshCredential,
+ refreshApi: signer.refreshApi,
+ }
+
+ if len(credential.RoleSessionName) > 0 {
+ signer.roleSessionName = credential.RoleSessionName
+ } else {
+ signer.roleSessionName = "aliyun-go-sdk-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10)
+ }
+ if credential.RoleSessionExpiration > 0 {
+ if credential.RoleSessionExpiration >= 900 && credential.RoleSessionExpiration <= 3600 {
+ signer.credentialExpiration = credential.RoleSessionExpiration
+ } else {
+ err = errors.NewClientError(errors.InvalidParamErrorCode, "Assume Role session duration should be in the range of 15min - 1Hr", nil)
+ }
+ } else {
+ signer.credentialExpiration = defaultDurationSeconds
+ }
+ return
+}
+
+func (*RamRoleArnSigner) GetName() string {
+ return "HMAC-SHA1"
+}
+
+func (*RamRoleArnSigner) GetType() string {
+ return ""
+}
+
+func (*RamRoleArnSigner) GetVersion() string {
+ return "1.0"
+}
+
+func (signer *RamRoleArnSigner) GetAccessKeyId() (accessKeyId string, err error) {
+ if signer.sessionCredential == nil || signer.needUpdateCredential() {
+ err = signer.updateCredential()
+ if err != nil {
+ return
+ }
+ }
+
+ if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 {
+ return "", err
+ }
+
+ return signer.sessionCredential.AccessKeyId, nil
+}
+
+func (signer *RamRoleArnSigner) GetExtraParam() map[string]string {
+ if signer.sessionCredential == nil || signer.needUpdateCredential() {
+ signer.updateCredential()
+ }
+ if signer.sessionCredential == nil || len(signer.sessionCredential.StsToken) <= 0 {
+ return make(map[string]string)
+ }
+ return map[string]string{"SecurityToken": signer.sessionCredential.StsToken}
+}
+
+func (signer *RamRoleArnSigner) Sign(stringToSign, secretSuffix string) string {
+ secret := signer.sessionCredential.AccessKeySecret + secretSuffix
+ return ShaHmac1(stringToSign, secret)
+}
+
+func (signer *RamRoleArnSigner) buildCommonRequest() (request *requests.CommonRequest, err error) {
+ request = requests.NewCommonRequest()
+ request.Product = "Sts"
+ request.Version = "2015-04-01"
+ request.ApiName = "AssumeRole"
+ request.Scheme = requests.HTTPS
+ request.QueryParams["RoleArn"] = signer.credential.RoleArn
+ if signer.credential.Policy != "" {
+ request.QueryParams["Policy"] = signer.credential.Policy
+ }
+ request.QueryParams["RoleSessionName"] = signer.credential.RoleSessionName
+ request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration)
+ return
+}
+
+func (signer *RamRoleArnSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) {
+ credential := &credentials.AccessKeyCredential{
+ AccessKeyId: signer.credential.AccessKeyId,
+ AccessKeySecret: signer.credential.AccessKeySecret,
+ }
+ signerV1 := NewAccessKeySigner(credential)
+ return signer.commonApi(request, signerV1)
+}
+
+func (signer *RamRoleArnSigner) refreshCredential(response *responses.CommonResponse) (err error) {
+ if response.GetHttpStatus() != http.StatusOK {
+ message := "refresh session token failed"
+ err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message)
+ return
+ }
+ var data interface{}
+ err = json.Unmarshal(response.GetHttpContentBytes(), &data)
+ if err != nil {
+ return fmt.Errorf("refresh RoleArn sts token err, json.Unmarshal fail: %s", err.Error())
+ }
+ accessKeyId, err := jmespath.Search("Credentials.AccessKeyId", data)
+ if err != nil {
+ return fmt.Errorf("refresh RoleArn sts token err, fail to get AccessKeyId: %s", err.Error())
+ }
+ accessKeySecret, err := jmespath.Search("Credentials.AccessKeySecret", data)
+ if err != nil {
+ return fmt.Errorf("refresh RoleArn sts token err, fail to get AccessKeySecret: %s", err.Error())
+ }
+ securityToken, err := jmespath.Search("Credentials.SecurityToken", data)
+ if err != nil {
+ return fmt.Errorf("refresh RoleArn sts token err, fail to get SecurityToken: %s", err.Error())
+ }
+ if accessKeyId == nil || accessKeySecret == nil || securityToken == nil {
+ return
+ }
+ signer.sessionCredential = &SessionCredential{
+ AccessKeyId: accessKeyId.(string),
+ AccessKeySecret: accessKeySecret.(string),
+ StsToken: securityToken.(string),
+ }
+ return
+}
+
+func (signer *RamRoleArnSigner) GetSessionCredential() *SessionCredential {
+ return signer.sessionCredential
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go
new file mode 100644
index 000000000..d0ce36c38
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go
@@ -0,0 +1,54 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+)
+
+type StsTokenSigner struct {
+ credential *credentials.StsTokenCredential
+}
+
+func NewStsTokenSigner(credential *credentials.StsTokenCredential) *StsTokenSigner {
+ return &StsTokenSigner{
+ credential: credential,
+ }
+}
+
+func (*StsTokenSigner) GetName() string {
+ return "HMAC-SHA1"
+}
+
+func (*StsTokenSigner) GetType() string {
+ return ""
+}
+
+func (*StsTokenSigner) GetVersion() string {
+ return "1.0"
+}
+
+func (signer *StsTokenSigner) GetAccessKeyId() (accessKeyId string, err error) {
+ return signer.credential.AccessKeyId, nil
+}
+
+func (signer *StsTokenSigner) GetExtraParam() map[string]string {
+ return map[string]string{"SecurityToken": signer.credential.AccessKeyStsToken}
+}
+
+func (signer *StsTokenSigner) Sign(stringToSign, secretSuffix string) string {
+ secret := signer.credential.AccessKeySecret + secretSuffix
+ return ShaHmac1(stringToSign, secret)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go
new file mode 100644
index 000000000..973485298
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go
@@ -0,0 +1,54 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package signers
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+)
+
+type SignerV2 struct {
+ credential *credentials.RsaKeyPairCredential
+}
+
+func (signer *SignerV2) GetExtraParam() map[string]string {
+ return nil
+}
+
+func NewSignerV2(credential *credentials.RsaKeyPairCredential) *SignerV2 {
+ return &SignerV2{
+ credential: credential,
+ }
+}
+
+func (*SignerV2) GetName() string {
+ return "SHA256withRSA"
+}
+
+func (*SignerV2) GetType() string {
+ return "PRIVATEKEY"
+}
+
+func (*SignerV2) GetVersion() string {
+ return "1.0"
+}
+
+func (signer *SignerV2) GetAccessKeyId() (accessKeyId string, err error) {
+ return signer.credential.PublicKeyId, err
+}
+
+func (signer *SignerV2) Sign(stringToSign, secretSuffix string) string {
+ secret := signer.credential.PrivateKey
+ return Sha256WithRsa(stringToSign, secret)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go
new file mode 100644
index 000000000..aef8bdc05
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go
@@ -0,0 +1,766 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package sdk
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+var debug utils.Debug
+
+func init() {
+ debug = utils.Init("sdk")
+}
+
+// Version this value will be replaced while build: -ldflags="-X sdk.version=x.x.x"
+var Version = "0.0.1"
+var defaultConnectTimeout = 5 * time.Second
+var defaultReadTimeout = 10 * time.Second
+
+var DefaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), Version)
+
+var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) {
+ return fn
+}
+
+// Client the type Client
+type Client struct {
+ isInsecure bool
+ regionId string
+ config *Config
+ httpProxy string
+ httpsProxy string
+ noProxy string
+ logger *Logger
+ userAgent map[string]string
+ signer auth.Signer
+ httpClient *http.Client
+ asyncTaskQueue chan func()
+ readTimeout time.Duration
+ connectTimeout time.Duration
+ EndpointMap map[string]string
+ EndpointType string
+ Network string
+
+ debug bool
+ isRunning bool
+ // void "panic(write to close channel)" cause of addAsync() after Shutdown()
+ asyncChanLock *sync.RWMutex
+}
+
+func (client *Client) Init() (err error) {
+ panic("not support yet")
+}
+
+func (client *Client) SetEndpointRules(endpointMap map[string]string, endpointType string, netWork string) {
+ client.EndpointMap = endpointMap
+ client.Network = netWork
+ client.EndpointType = endpointType
+}
+
+func (client *Client) SetHTTPSInsecure(isInsecure bool) {
+ client.isInsecure = isInsecure
+}
+
+func (client *Client) GetHTTPSInsecure() bool {
+ return client.isInsecure
+}
+
+func (client *Client) SetHttpsProxy(httpsProxy string) {
+ client.httpsProxy = httpsProxy
+}
+
+func (client *Client) GetHttpsProxy() string {
+ return client.httpsProxy
+}
+
+func (client *Client) SetHttpProxy(httpProxy string) {
+ client.httpProxy = httpProxy
+}
+
+func (client *Client) GetHttpProxy() string {
+ return client.httpProxy
+}
+
+func (client *Client) SetNoProxy(noProxy string) {
+ client.noProxy = noProxy
+}
+
+func (client *Client) GetNoProxy() string {
+ return client.noProxy
+}
+
+// InitWithProviderChain will get credential from the providerChain,
+// the RsaKeyPairCredential Only applicable to regionID `ap-northeast-1`,
+// if your providerChain may return a credential type with RsaKeyPairCredential,
+// please ensure your regionID is `ap-northeast-1`.
+func (client *Client) InitWithProviderChain(regionId string, provider provider.Provider) (err error) {
+ config := client.InitClientConfig()
+ credential, err := provider.Resolve()
+ if err != nil {
+ return
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithOptions(regionId string, config *Config, credential auth.Credential) (err error) {
+ client.isRunning = true
+ client.asyncChanLock = new(sync.RWMutex)
+ client.regionId = regionId
+ client.config = config
+ client.httpClient = &http.Client{}
+
+ if config.HttpTransport != nil {
+ client.httpClient.Transport = config.HttpTransport
+ }
+
+ if config.Timeout > 0 {
+ client.httpClient.Timeout = config.Timeout
+ }
+
+ if config.EnableAsync {
+ client.EnableAsync(config.GoRoutinePoolSize, config.MaxTaskQueueSize)
+ }
+
+ client.signer, err = auth.NewSignerWithCredential(credential, client.ProcessCommonRequestWithSigner)
+
+ return
+}
+
+func (client *Client) SetReadTimeout(readTimeout time.Duration) {
+ client.readTimeout = readTimeout
+}
+
+func (client *Client) SetConnectTimeout(connectTimeout time.Duration) {
+ client.connectTimeout = connectTimeout
+}
+
+func (client *Client) GetReadTimeout() time.Duration {
+ return client.readTimeout
+}
+
+func (client *Client) GetConnectTimeout() time.Duration {
+ return client.connectTimeout
+}
+
+func (client *Client) getHttpProxy(scheme string) (proxy *url.URL, err error) {
+ if scheme == "https" {
+ if client.GetHttpsProxy() != "" {
+ proxy, err = url.Parse(client.httpsProxy)
+ } else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" {
+ proxy, err = url.Parse(rawurl)
+ } else if rawurl := os.Getenv("https_proxy"); rawurl != "" {
+ proxy, err = url.Parse(rawurl)
+ }
+ } else {
+ if client.GetHttpProxy() != "" {
+ proxy, err = url.Parse(client.httpProxy)
+ } else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" {
+ proxy, err = url.Parse(rawurl)
+ } else if rawurl := os.Getenv("http_proxy"); rawurl != "" {
+ proxy, err = url.Parse(rawurl)
+ }
+ }
+
+ return proxy, err
+}
+
+func (client *Client) getNoProxy(scheme string) []string {
+ var urls []string
+ if client.GetNoProxy() != "" {
+ urls = strings.Split(client.noProxy, ",")
+ } else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" {
+ urls = strings.Split(rawurl, ",")
+ } else if rawurl := os.Getenv("no_proxy"); rawurl != "" {
+ urls = strings.Split(rawurl, ",")
+ }
+
+ return urls
+}
+
+// EnableAsync enable the async task queue
+func (client *Client) EnableAsync(routinePoolSize, maxTaskQueueSize int) {
+ client.asyncTaskQueue = make(chan func(), maxTaskQueueSize)
+ for i := 0; i < routinePoolSize; i++ {
+ go func() {
+ for client.isRunning {
+ select {
+ case task, notClosed := <-client.asyncTaskQueue:
+ if notClosed {
+ task()
+ }
+ }
+ }
+ }()
+ }
+}
+
+func (client *Client) InitWithAccessKey(regionId, accessKeyId, accessKeySecret string) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.BaseCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithStsToken(regionId, accessKeyId, accessKeySecret, securityToken string) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.StsTokenCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ AccessKeyStsToken: securityToken,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.RamRoleArnCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ RoleArn: roleArn,
+ RoleSessionName: roleSessionName,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.RamRoleArnCredential{
+ AccessKeyId: accessKeyId,
+ AccessKeySecret: accessKeySecret,
+ RoleArn: roleArn,
+ RoleSessionName: roleSessionName,
+ Policy: policy,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithRsaKeyPair(regionId, publicKeyId, privateKey string, sessionExpiration int) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.RsaKeyPairCredential{
+ PrivateKey: privateKey,
+ PublicKeyId: publicKeyId,
+ SessionExpiration: sessionExpiration,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithEcsRamRole(regionId, roleName string) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.EcsRamRoleCredential{
+ RoleName: roleName,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitWithBearerToken(regionId, bearerToken string) (err error) {
+ config := client.InitClientConfig()
+ credential := &credentials.BearerTokenCredential{
+ BearerToken: bearerToken,
+ }
+ return client.InitWithOptions(regionId, config, credential)
+}
+
+func (client *Client) InitClientConfig() (config *Config) {
+ if client.config != nil {
+ return client.config
+ } else {
+ return NewConfig()
+ }
+}
+
+func (client *Client) DoAction(request requests.AcsRequest, response responses.AcsResponse) (err error) {
+ return client.DoActionWithSigner(request, response, nil)
+}
+
+func (client *Client) GetEndpointRules(regionId string, product string) (endpointRaw string) {
+ if client.EndpointType == "regional" {
+ endpointRaw = strings.Replace("..aliyuncs.com", "", regionId, 1)
+ } else {
+ endpointRaw = ".aliyuncs.com"
+ }
+ endpointRaw = strings.Replace(endpointRaw, "", strings.ToLower(product), 1)
+ if client.Network == "" || client.Network == "public" {
+ endpointRaw = strings.Replace(endpointRaw, "", "", 1)
+ } else {
+ endpointRaw = strings.Replace(endpointRaw, "", "-"+client.Network, 1)
+ }
+ return endpointRaw
+}
+
+func (client *Client) buildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (httpRequest *http.Request, err error) {
+ // add clientVersion
+ request.GetHeaders()["x-sdk-core-version"] = Version
+
+ regionId := client.regionId
+ if len(request.GetRegionId()) > 0 {
+ regionId = request.GetRegionId()
+ }
+
+ // resolve endpoint
+ endpoint := request.GetDomain()
+ if endpoint == "" && client.EndpointType != "" {
+ if client.EndpointMap != nil && client.Network == "" || client.Network == "public" {
+ endpoint = client.EndpointMap[regionId]
+ }
+
+ if endpoint == "" {
+ endpoint = client.GetEndpointRules(regionId, request.GetProduct())
+ }
+ }
+
+ if endpoint == "" {
+ resolveParam := &endpoints.ResolveParam{
+ Domain: request.GetDomain(),
+ Product: request.GetProduct(),
+ RegionId: regionId,
+ LocationProduct: request.GetLocationServiceCode(),
+ LocationEndpointType: request.GetLocationEndpointType(),
+ CommonApi: client.ProcessCommonRequest,
+ }
+ endpoint, err = endpoints.Resolve(resolveParam)
+ if err != nil {
+ return
+ }
+ }
+
+ request.SetDomain(endpoint)
+ if request.GetScheme() == "" {
+ request.SetScheme(client.config.Scheme)
+ }
+ // init request params
+ err = requests.InitParams(request)
+ if err != nil {
+ return
+ }
+
+ // signature
+ var finalSigner auth.Signer
+ if signer != nil {
+ finalSigner = signer
+ } else {
+ finalSigner = client.signer
+ }
+ httpRequest, err = buildHttpRequest(request, finalSigner, regionId)
+ if err == nil {
+ userAgent := DefaultUserAgent + getSendUserAgent(client.config.UserAgent, client.userAgent, request.GetUserAgent())
+ httpRequest.Header.Set("User-Agent", userAgent)
+ }
+
+ return
+}
+
+func getSendUserAgent(configUserAgent string, clientUserAgent, requestUserAgent map[string]string) string {
+ realUserAgent := ""
+ for key1, value1 := range clientUserAgent {
+ for key2, _ := range requestUserAgent {
+ if key1 == key2 {
+ key1 = ""
+ }
+ }
+ if key1 != "" {
+ realUserAgent += fmt.Sprintf(" %s/%s", key1, value1)
+
+ }
+ }
+ for key, value := range requestUserAgent {
+ realUserAgent += fmt.Sprintf(" %s/%s", key, value)
+ }
+ if configUserAgent != "" {
+ return realUserAgent + fmt.Sprintf(" Extra/%s", configUserAgent)
+ }
+ return realUserAgent
+}
+
+func (client *Client) AppendUserAgent(key, value string) {
+ newkey := true
+
+ if client.userAgent == nil {
+ client.userAgent = make(map[string]string)
+ }
+ if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" {
+ for tag, _ := range client.userAgent {
+ if tag == key {
+ client.userAgent[tag] = value
+ newkey = false
+ }
+ }
+ if newkey {
+ client.userAgent[key] = value
+ }
+ }
+}
+
+func (client *Client) BuildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (err error) {
+ _, err = client.buildRequestWithSigner(request, signer)
+ return
+}
+
+func (client *Client) getTimeout(request requests.AcsRequest) (time.Duration, time.Duration) {
+ readTimeout := defaultReadTimeout
+ connectTimeout := defaultConnectTimeout
+
+ reqReadTimeout := request.GetReadTimeout()
+ reqConnectTimeout := request.GetConnectTimeout()
+ if reqReadTimeout != 0*time.Millisecond {
+ readTimeout = reqReadTimeout
+ } else if client.readTimeout != 0*time.Millisecond {
+ readTimeout = client.readTimeout
+ } else if client.httpClient.Timeout != 0 {
+ readTimeout = client.httpClient.Timeout
+ } else if timeout, ok := getAPIMaxTimeout(request.GetProduct(), request.GetActionName()); ok {
+ readTimeout = timeout
+ }
+
+ if reqConnectTimeout != 0*time.Millisecond {
+ connectTimeout = reqConnectTimeout
+ } else if client.connectTimeout != 0*time.Millisecond {
+ connectTimeout = client.connectTimeout
+ }
+ return readTimeout, connectTimeout
+}
+
+func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) {
+ return func(ctx context.Context, network, address string) (net.Conn, error) {
+ return (&net.Dialer{
+ Timeout: connectTimeout,
+ DualStack: true,
+ }).DialContext(ctx, network, address)
+ }
+}
+
+func (client *Client) setTimeout(request requests.AcsRequest) {
+ readTimeout, connectTimeout := client.getTimeout(request)
+ client.httpClient.Timeout = readTimeout
+ if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil {
+ trans.DialContext = Timeout(connectTimeout)
+ client.httpClient.Transport = trans
+ } else {
+ client.httpClient.Transport = &http.Transport{
+ DialContext: Timeout(connectTimeout),
+ }
+ }
+}
+
+func (client *Client) getHTTPSInsecure(request requests.AcsRequest) (insecure bool) {
+ if request.GetHTTPSInsecure() != nil {
+ insecure = *request.GetHTTPSInsecure()
+ } else {
+ insecure = client.GetHTTPSInsecure()
+ }
+ return insecure
+}
+
+func (client *Client) DoActionWithSigner(request requests.AcsRequest, response responses.AcsResponse, signer auth.Signer) (err error) {
+
+ fieldMap := make(map[string]string)
+ initLogMsg(fieldMap)
+ defer func() {
+ client.printLog(fieldMap, err)
+ }()
+ httpRequest, err := client.buildRequestWithSigner(request, signer)
+ if err != nil {
+ return
+ }
+
+ client.setTimeout(request)
+ proxy, err := client.getHttpProxy(httpRequest.URL.Scheme)
+ if err != nil {
+ return err
+ }
+
+ noProxy := client.getNoProxy(httpRequest.URL.Scheme)
+
+ var flag bool
+ for _, value := range noProxy {
+ if value == httpRequest.Host {
+ flag = true
+ break
+ }
+ }
+
+ // Set whether to ignore certificate validation.
+ // Default InsecureSkipVerify is false.
+ if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil {
+ trans.TLSClientConfig = &tls.Config{
+ InsecureSkipVerify: client.getHTTPSInsecure(request),
+ }
+ if proxy != nil && !flag {
+ trans.Proxy = http.ProxyURL(proxy)
+ }
+ client.httpClient.Transport = trans
+ }
+
+ var httpResponse *http.Response
+ for retryTimes := 0; retryTimes <= client.config.MaxRetryTime; retryTimes++ {
+ if proxy != nil && proxy.User != nil {
+ if password, passwordSet := proxy.User.Password(); passwordSet {
+ httpRequest.SetBasicAuth(proxy.User.Username(), password)
+ }
+ }
+ if retryTimes > 0 {
+ client.printLog(fieldMap, err)
+ initLogMsg(fieldMap)
+ }
+ putMsgToMap(fieldMap, httpRequest)
+ debug("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto)
+ debug("> Host: %s", httpRequest.Host)
+ for key, value := range httpRequest.Header {
+ debug("> %s: %v", key, strings.Join(value, ""))
+ }
+ debug(">")
+ debug(" Retry Times: %d.", retryTimes)
+
+ startTime := time.Now()
+ fieldMap["{start_time}"] = startTime.Format("2006-01-02 15:04:05")
+ httpResponse, err = hookDo(client.httpClient.Do)(httpRequest)
+ fieldMap["{cost}"] = time.Now().Sub(startTime).String()
+ if err == nil {
+ fieldMap["{code}"] = strconv.Itoa(httpResponse.StatusCode)
+ fieldMap["{res_headers}"] = TransToString(httpResponse.Header)
+ debug("< %s %s", httpResponse.Proto, httpResponse.Status)
+ for key, value := range httpResponse.Header {
+ debug("< %s: %v", key, strings.Join(value, ""))
+ }
+ }
+ debug("<")
+ // receive error
+ if err != nil {
+ debug(" Error: %s.", err.Error())
+ if !client.config.AutoRetry {
+ return
+ } else if retryTimes >= client.config.MaxRetryTime {
+ // timeout but reached the max retry times, return
+ times := strconv.Itoa(retryTimes + 1)
+ timeoutErrorMsg := fmt.Sprintf(errors.TimeoutErrorMessage, times, times)
+ if strings.Contains(err.Error(), "Client.Timeout") {
+ timeoutErrorMsg += " Read timeout. Please set a valid ReadTimeout."
+ } else {
+ timeoutErrorMsg += " Connect timeout. Please set a valid ConnectTimeout."
+ }
+ err = errors.NewClientError(errors.TimeoutErrorCode, timeoutErrorMsg, err)
+ return
+ }
+ }
+ // if status code >= 500 or timeout, will trigger retry
+ if client.config.AutoRetry && (err != nil || isServerError(httpResponse)) {
+ client.setTimeout(request)
+ // rewrite signatureNonce and signature
+ httpRequest, err = client.buildRequestWithSigner(request, signer)
+ // buildHttpRequest(request, finalSigner, regionId)
+ if err != nil {
+ return
+ }
+ continue
+ }
+ break
+ }
+
+ err = responses.Unmarshal(response, httpResponse, request.GetAcceptFormat())
+ fieldMap["{res_body}"] = response.GetHttpContentString()
+ debug("%s", response.GetHttpContentString())
+ // wrap server errors
+ if serverErr, ok := err.(*errors.ServerError); ok {
+ var wrapInfo = map[string]string{}
+ wrapInfo["StringToSign"] = request.GetStringToSign()
+ err = errors.WrapServerError(serverErr, wrapInfo)
+ }
+ return
+}
+
+func putMsgToMap(fieldMap map[string]string, request *http.Request) {
+ fieldMap["{host}"] = request.Host
+ fieldMap["{method}"] = request.Method
+ fieldMap["{uri}"] = request.URL.RequestURI()
+ fieldMap["{pid}"] = strconv.Itoa(os.Getpid())
+ fieldMap["{version}"] = strings.Split(request.Proto, "/")[1]
+ hostname, _ := os.Hostname()
+ fieldMap["{hostname}"] = hostname
+ fieldMap["{req_headers}"] = TransToString(request.Header)
+ fieldMap["{target}"] = request.URL.Path + request.URL.RawQuery
+}
+
+func buildHttpRequest(request requests.AcsRequest, singer auth.Signer, regionId string) (httpRequest *http.Request, err error) {
+ err = auth.Sign(request, singer, regionId)
+ if err != nil {
+ return
+ }
+ requestMethod := request.GetMethod()
+ requestUrl := request.BuildUrl()
+ body := request.GetBodyReader()
+ httpRequest, err = http.NewRequest(requestMethod, requestUrl, body)
+ if err != nil {
+ return
+ }
+ for key, value := range request.GetHeaders() {
+ httpRequest.Header[key] = []string{value}
+ }
+ // host is a special case
+ if host, containsHost := request.GetHeaders()["Host"]; containsHost {
+ httpRequest.Host = host
+ }
+ return
+}
+
+func isServerError(httpResponse *http.Response) bool {
+ return httpResponse.StatusCode >= http.StatusInternalServerError
+}
+
+/**
+only block when any one of the following occurs:
+1. the asyncTaskQueue is full, increase the queue size to avoid this
+2. Shutdown() in progressing, the client is being closed
+**/
+func (client *Client) AddAsyncTask(task func()) (err error) {
+ if client.asyncTaskQueue != nil {
+ client.asyncChanLock.RLock()
+ defer client.asyncChanLock.RUnlock()
+ if client.isRunning {
+ client.asyncTaskQueue <- task
+ }
+ } else {
+ err = errors.NewClientError(errors.AsyncFunctionNotEnabledCode, errors.AsyncFunctionNotEnabledMessage, nil)
+ }
+ return
+}
+
+func (client *Client) GetConfig() *Config {
+ return client.config
+}
+
+func NewClient() (client *Client, err error) {
+ client = &Client{}
+ err = client.Init()
+ return
+}
+
+func NewClientWithProvider(regionId string, providers ...provider.Provider) (client *Client, err error) {
+ client = &Client{}
+ var pc provider.Provider
+ if len(providers) == 0 {
+ pc = provider.DefaultChain
+ } else {
+ pc = provider.NewProviderChain(providers)
+ }
+ err = client.InitWithProviderChain(regionId, pc)
+ return
+}
+
+func NewClientWithOptions(regionId string, config *Config, credential auth.Credential) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithOptions(regionId, config, credential)
+ return
+}
+
+func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret)
+ return
+}
+
+func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken)
+ return
+}
+
+func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
+ return
+}
+
+func NewClientWithRamRoleArnAndPolicy(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy)
+ return
+}
+
+func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithEcsRamRole(regionId, roleName)
+ return
+}
+
+func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration)
+ return
+}
+
+func NewClientWithBearerToken(regionId, bearerToken string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithBearerToken(regionId, bearerToken)
+ return
+}
+
+func (client *Client) ProcessCommonRequest(request *requests.CommonRequest) (response *responses.CommonResponse, err error) {
+ request.TransToAcsRequest()
+ response = responses.NewCommonResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+func (client *Client) ProcessCommonRequestWithSigner(request *requests.CommonRequest, signerInterface interface{}) (response *responses.CommonResponse, err error) {
+ if signer, isSigner := signerInterface.(auth.Signer); isSigner {
+ request.TransToAcsRequest()
+ response = responses.NewCommonResponse()
+ err = client.DoActionWithSigner(request, response, signer)
+ return
+ }
+ panic("should not be here")
+}
+
+func (client *Client) Shutdown() {
+ // lock the addAsync()
+ client.asyncChanLock.Lock()
+ defer client.asyncChanLock.Unlock()
+ if client.asyncTaskQueue != nil {
+ close(client.asyncTaskQueue)
+ }
+ client.isRunning = false
+}
+
+// Deprecated: Use NewClientWithRamRoleArn in this package instead.
+func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
+ return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
+}
+
+// Deprecated: Use NewClientWithEcsRamRole in this package instead.
+func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) {
+ return NewClientWithEcsRamRole(regionId, roleName)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go
new file mode 100644
index 000000000..5e50166fc
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go
@@ -0,0 +1,91 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package sdk
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+type Config struct {
+ AutoRetry bool `default:"true"`
+ MaxRetryTime int `default:"3"`
+ UserAgent string `default:""`
+ Debug bool `default:"false"`
+ HttpTransport *http.Transport `default:""`
+ EnableAsync bool `default:"false"`
+ MaxTaskQueueSize int `default:"1000"`
+ GoRoutinePoolSize int `default:"5"`
+ Scheme string `default:"HTTP"`
+ Timeout time.Duration
+}
+
+func NewConfig() (config *Config) {
+ config = &Config{}
+ utils.InitStructWithDefaultTag(config)
+ return
+}
+
+func (c *Config) WithAutoRetry(isAutoRetry bool) *Config {
+ c.AutoRetry = isAutoRetry
+ return c
+}
+
+func (c *Config) WithMaxRetryTime(maxRetryTime int) *Config {
+ c.MaxRetryTime = maxRetryTime
+ return c
+}
+
+func (c *Config) WithUserAgent(userAgent string) *Config {
+ c.UserAgent = userAgent
+ return c
+}
+
+func (c *Config) WithDebug(isDebug bool) *Config {
+ c.Debug = isDebug
+ return c
+}
+
+func (c *Config) WithTimeout(timeout time.Duration) *Config {
+ c.Timeout = timeout
+ return c
+}
+
+func (c *Config) WithHttpTransport(httpTransport *http.Transport) *Config {
+ c.HttpTransport = httpTransport
+ return c
+}
+
+func (c *Config) WithEnableAsync(isEnableAsync bool) *Config {
+ c.EnableAsync = isEnableAsync
+ return c
+}
+
+func (c *Config) WithMaxTaskQueueSize(maxTaskQueueSize int) *Config {
+ c.MaxTaskQueueSize = maxTaskQueueSize
+ return c
+}
+
+func (c *Config) WithGoRoutinePoolSize(goRoutinePoolSize int) *Config {
+ c.GoRoutinePoolSize = goRoutinePoolSize
+ return c
+}
+
+func (c *Config) WithScheme(scheme string) *Config {
+ c.Scheme = scheme
+ return c
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go
new file mode 100644
index 000000000..a4fc47098
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go
@@ -0,0 +1,4126 @@
+
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "sync"
+)
+
+const endpointsJson =`{
+ "products": [
+ {
+ "code": "emr",
+ "document_id": "28140",
+ "location_service_code": "emr",
+ "regional_endpoints": [
+ {
+ "region": "cn-qingdao",
+ "endpoint": "emr.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "emr.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "emr.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "emr.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "emr.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "emr.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "emr.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "emr.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "emr.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "emr.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "emr.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "emr.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "emr.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "emr.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "emr.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "emr.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "emr.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "emr.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "emr.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "emr.aliyuncs.com",
+ "regional_endpoint_pattern": "emr.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "petadata",
+ "document_id": "",
+ "location_service_code": "petadata",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "petadata.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "petadata.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "petadata.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "petadata.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "petadata.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "petadata.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "petadata.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "petadata.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "petadata.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dbs",
+ "document_id": "",
+ "location_service_code": "dbs",
+ "regional_endpoints": [
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "dbs-api.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "dbs-api.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "alidnsgtm",
+ "document_id": "",
+ "location_service_code": "alidnsgtm",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "alidns.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "alidns.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "elasticsearch",
+ "document_id": "",
+ "location_service_code": "elasticsearch",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "elasticsearch.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "elasticsearch.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "elasticsearch.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "elasticsearch.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "elasticsearch.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "elasticsearch.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "elasticsearch.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "elasticsearch.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "elasticsearch.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "elasticsearch.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "elasticsearch.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "elasticsearch.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "elasticsearch.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "elasticsearch.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "elasticsearch.ap-northeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "baas",
+ "document_id": "",
+ "location_service_code": "baas",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "baas.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "baas.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "baas.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "baas.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "baas.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "baas.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cr",
+ "document_id": "60716",
+ "location_service_code": "cr",
+ "regional_endpoints": null,
+ "global_endpoint": "cr.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cloudap",
+ "document_id": "",
+ "location_service_code": "cloudap",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cloudwf.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "imagesearch",
+ "document_id": "",
+ "location_service_code": "imagesearch",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "imagesearch.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "imagesearch.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "imagesearch.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "imagesearch.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "pts",
+ "document_id": "",
+ "location_service_code": "pts",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "pts.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ehs",
+ "document_id": "",
+ "location_service_code": "ehs",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "ehpc.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "ehpc.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ehpc.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "ehpc.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "ehpc.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "ehpc.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "ehpc.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ehpc.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ehpc.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "ehpc.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "ehpc.ap-southeast-2.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "polardb",
+ "document_id": "58764",
+ "location_service_code": "polardb",
+ "regional_endpoints": [
+ {
+ "region": "ap-south-1",
+ "endpoint": "polardb.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "polardb.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "polardb.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "polardb.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "polardb.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "polardb.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "polardb.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "polardb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "polardb.ap-southeast-5.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "polardb.aliyuncs.com"
+ },
+ {
+ "code": "r-kvstore",
+ "document_id": "60831",
+ "location_service_code": "redisa",
+ "regional_endpoints": [
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "r-kvstore.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "r-kvstore.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "r-kvstore.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "r-kvstore.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "r-kvstore.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "r-kvstore.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "r-kvstore.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "r-kvstore.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "r-kvstore.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "r-kvstore.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "r-kvstore.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "r-kvstore.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "r-kvstore.ap-southeast-5.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "r-kvstore.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "xianzhi",
+ "document_id": "",
+ "location_service_code": "xianzhi",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "xianzhi.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "pcdn",
+ "document_id": "",
+ "location_service_code": "pcdn",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "pcdn.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cdn",
+ "document_id": "27148",
+ "location_service_code": "cdn",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cdn.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "cdn.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cloudauth",
+ "document_id": "60687",
+ "location_service_code": "cloudauth",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cloudauth.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "cloudauth.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "nas",
+ "document_id": "62598",
+ "location_service_code": "nas",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "nas.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "nas.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "nas.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "nas.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "nas.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "nas.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "nas.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "nas.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "nas.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "nas.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "nas.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "nas.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "nas.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "nas.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "nas.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "nas.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "nas.ap-southeast-3.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "alidns",
+ "document_id": "29739",
+ "location_service_code": "alidns",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "alidns.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "alidns.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dts",
+ "document_id": "",
+ "location_service_code": "dts",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "dts.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "dts.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "emas",
+ "document_id": "",
+ "location_service_code": "emas",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "mhub.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "mhub.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dysmsapi",
+ "document_id": "",
+ "location_service_code": "dysmsapi",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-chengdu",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "dysmsapi.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cloudwf",
+ "document_id": "58111",
+ "location_service_code": "cloudwf",
+ "regional_endpoints": null,
+ "global_endpoint": "cloudwf.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "fc",
+ "document_id": "",
+ "location_service_code": "fc",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "cn-beijing.fc.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "ap-southeast-2.fc.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "cn-huhehaote.fc.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "cn-shanghai.fc.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cn-hangzhou.fc.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "cn-shenzhen.fc.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "saf",
+ "document_id": "",
+ "location_service_code": "saf",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "saf.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "saf.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "saf.cn-shenzhen.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "rds",
+ "document_id": "26223",
+ "location_service_code": "rds",
+ "regional_endpoints": [
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "rds.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "rds.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "rds.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "rds.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "rds.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "rds.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "rds.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "rds.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "rds.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "rds.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "rds.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "rds.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "rds.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "vpc",
+ "document_id": "34962",
+ "location_service_code": "vpc",
+ "regional_endpoints": [
+ {
+ "region": "ap-south-1",
+ "endpoint": "vpc.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "vpc.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "vpc.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "vpc.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "vpc.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "vpc.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "vpc.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "vpc.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "vpc.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "vpc.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "vpc.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "vpc.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "vpc.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "gpdb",
+ "document_id": "",
+ "location_service_code": "gpdb",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "gpdb.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "gpdb.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "gpdb.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "gpdb.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "gpdb.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "gpdb.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "gpdb.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "gpdb.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "gpdb.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "gpdb.ap-south-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "gpdb.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "yunmarket",
+ "document_id": "",
+ "location_service_code": "yunmarket",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "market.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "pvtz",
+ "document_id": "",
+ "location_service_code": "pvtz",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "pvtz.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "pvtz.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "oss",
+ "document_id": "",
+ "location_service_code": "oss",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "oss-cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "oss-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "oss-cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "oss-cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "oss-cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "oss-ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "oss-us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "oss-cn-qingdao.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "foas",
+ "document_id": "",
+ "location_service_code": "foas",
+ "regional_endpoints": [
+ {
+ "region": "cn-qingdao",
+ "endpoint": "foas.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "foas.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "foas.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "foas.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "foas.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "foas.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "foas.ap-northeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ddos",
+ "document_id": "",
+ "location_service_code": "ddos",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ddospro.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "ddospro.cn-hongkong.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cbn",
+ "document_id": "",
+ "location_service_code": "cbn",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "cbn.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "cbn.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "cbn.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "nlp",
+ "document_id": "",
+ "location_service_code": "nlp",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "nlp.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hsm",
+ "document_id": "",
+ "location_service_code": "hsm",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "hsm.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "hsm.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "hsm.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "hsm.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "hsm.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "hsm.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ons",
+ "document_id": "44416",
+ "location_service_code": "ons",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ons.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "ons.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "ons.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "ons.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "ons.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "ons.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ons.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "ons.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "ons.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "ons.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "ons.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "ons.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "ons.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "ons.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "ons.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "ons.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "ons.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ons.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "kms",
+ "document_id": "",
+ "location_service_code": "kms",
+ "regional_endpoints": [
+ {
+ "region": "cn-hongkong",
+ "endpoint": "kms.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "kms.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "kms.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "kms.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "kms.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "kms.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "kms.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "kms.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "kms.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "kms.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "kms.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "kms.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "kms.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "kms.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "kms.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "kms.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "kms.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "kms.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "kms.ap-northeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cps",
+ "document_id": "",
+ "location_service_code": "cps",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cloudpush.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ensdisk",
+ "document_id": "",
+ "location_service_code": "ensdisk",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ens.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cloudapi",
+ "document_id": "43590",
+ "location_service_code": "apigateway",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "apigateway.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "apigateway.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "apigateway.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "apigateway.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "apigateway.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "apigateway.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "apigateway.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "apigateway.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "apigateway.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "apigateway.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "apigateway.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "apigateway.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "apigateway.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "apigateway.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "apigateway.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "apigateway.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "apigateway.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "apigateway.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "apigateway.cn-hongkong.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "apigateway.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "eci",
+ "document_id": "",
+ "location_service_code": "eci",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "eci.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "eci.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "eci.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "eci.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "eci.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "eci.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "onsvip",
+ "document_id": "",
+ "location_service_code": "onsvip",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "ons.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ons.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ons.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "ons.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ons.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "ons.cn-qingdao.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "linkwan",
+ "document_id": "",
+ "location_service_code": "linkwan",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "linkwan.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "linkwan.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ddosdip",
+ "document_id": "",
+ "location_service_code": "ddosdip",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ddosdip.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "batchcompute",
+ "document_id": "44717",
+ "location_service_code": "batchcompute",
+ "regional_endpoints": [
+ {
+ "region": "us-west-1",
+ "endpoint": "batchcompute.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "batchcompute.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "batchcompute.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "batchcompute.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "batchcompute.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "batchcompute.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "batchcompute.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "batchcompute.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "batchcompute.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "batchcompute.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "aegis",
+ "document_id": "28449",
+ "location_service_code": "vipaegis",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "aegis.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "aegis.ap-southeast-3.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "aegis.cn-hangzhou.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "arms",
+ "document_id": "42924",
+ "location_service_code": "arms",
+ "regional_endpoints": [
+ {
+ "region": "cn-hongkong",
+ "endpoint": "arms.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "arms.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "arms.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "arms.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "arms.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "arms.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "arms.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "arms.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "live",
+ "document_id": "48207",
+ "location_service_code": "live",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "live.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "live.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "live.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "live.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "live.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "live.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "live.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "live.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "alimt",
+ "document_id": "",
+ "location_service_code": "alimt",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "mt.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "actiontrail",
+ "document_id": "",
+ "location_service_code": "actiontrail",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "actiontrail.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "actiontrail.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "actiontrail.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "actiontrail.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "actiontrail.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "actiontrail.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "actiontrail.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "actiontrail.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "actiontrail.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "actiontrail.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "actiontrail.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "actiontrail.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "actiontrail.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "actiontrail.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "actiontrail.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "actiontrail.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "actiontrail.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "actiontrail.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "actiontrail.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "smartag",
+ "document_id": "",
+ "location_service_code": "smartag",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "smartag.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "smartag.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "smartag.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "smartag.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "smartag.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "smartag.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "smartag.ap-southeast-2.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "vod",
+ "document_id": "60574",
+ "location_service_code": "vod",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "vod.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "vod.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "vod.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "vod.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "vod.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "vod.eu-central-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "domain",
+ "document_id": "42875",
+ "location_service_code": "domain",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "domain-intl.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "domain.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "domain.aliyuncs.com",
+ "regional_endpoint_pattern": "domain.aliyuncs.com"
+ },
+ {
+ "code": "ros",
+ "document_id": "28899",
+ "location_service_code": "ros",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ros.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "ros.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cloudphoto",
+ "document_id": "59902",
+ "location_service_code": "cloudphoto",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "cloudphoto.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "cloudphoto.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "rtc",
+ "document_id": "",
+ "location_service_code": "rtc",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "rtc.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "odpsmayi",
+ "document_id": "",
+ "location_service_code": "odpsmayi",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "bsb.cloud.alipay.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "bsb.cloud.alipay.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ims",
+ "document_id": "",
+ "location_service_code": "ims",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ims.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "csb",
+ "document_id": "64837",
+ "location_service_code": "csb",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "csb.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "csb.cn-beijing.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "csb.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "cds",
+ "document_id": "62887",
+ "location_service_code": "codepipeline",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "cds.cn-beijing.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "cds.cn-beijing.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ddosbgp",
+ "document_id": "",
+ "location_service_code": "ddosbgp",
+ "regional_endpoints": [
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "ddosbgp.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "ddosbgp.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dybaseapi",
+ "document_id": "",
+ "location_service_code": "dybaseapi",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-chengdu",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "dybaseapi.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ecs",
+ "document_id": "25484",
+ "location_service_code": "ecs",
+ "regional_endpoints": [
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "ecs.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "ecs.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "ecs.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "ecs.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "ecs.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "ecs.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "ecs.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "ecs.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "ecs.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "ecs.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ecs-cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "ecs-cn-hangzhou.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ccc",
+ "document_id": "63027",
+ "location_service_code": "ccc",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ccc.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ccc.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "ccc.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "cs",
+ "document_id": "26043",
+ "location_service_code": "cs",
+ "regional_endpoints": null,
+ "global_endpoint": "cs.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "drdspre",
+ "document_id": "",
+ "location_service_code": "drdspre",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "drds.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "drds.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "drds.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "drds.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "drds.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "drds.cn-qingdao.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "drds.cn-beijing.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dcdn",
+ "document_id": "",
+ "location_service_code": "dcdn",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "dcdn.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "dcdn.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "linkedmall",
+ "document_id": "",
+ "location_service_code": "linkedmall",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "linkedmall.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "linkedmall.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "trademark",
+ "document_id": "",
+ "location_service_code": "trademark",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "trademark.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "openanalytics",
+ "document_id": "",
+ "location_service_code": "openanalytics",
+ "regional_endpoints": [
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "openanalytics.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "openanalytics.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "openanalytics.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "openanalytics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "openanalytics.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "openanalytics.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "openanalytics.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "openanalytics.cn-zhangjiakou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "sts",
+ "document_id": "28756",
+ "location_service_code": "sts",
+ "regional_endpoints": null,
+ "global_endpoint": "sts.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "waf",
+ "document_id": "62847",
+ "location_service_code": "waf",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "wafopenapi.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ots",
+ "document_id": "",
+ "location_service_code": "ots",
+ "regional_endpoints": [
+ {
+ "region": "me-east-1",
+ "endpoint": "ots.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "ots.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "ots.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "ots.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "ots.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "ots.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "ots.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "ots.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "ots.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "ots.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "ots.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ots.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "ots.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ots.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ots.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "ots.ap-southeast-3.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cloudfirewall",
+ "document_id": "",
+ "location_service_code": "cloudfirewall",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cloudfw.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dm",
+ "document_id": "29434",
+ "location_service_code": "dm",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "dm.ap-southeast-2.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "dm.aliyuncs.com",
+ "regional_endpoint_pattern": "dm.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "oas",
+ "document_id": "",
+ "location_service_code": "oas",
+ "regional_endpoints": [
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "cn-shenzhen.oas.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "cn-beijing.oas.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cn-hangzhou.oas.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ddoscoo",
+ "document_id": "",
+ "location_service_code": "ddoscoo",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ddoscoo.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "jaq",
+ "document_id": "35037",
+ "location_service_code": "jaq",
+ "regional_endpoints": null,
+ "global_endpoint": "jaq.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "iovcc",
+ "document_id": "",
+ "location_service_code": "iovcc",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "iovcc.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "sas-api",
+ "document_id": "28498",
+ "location_service_code": "sas",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "sas.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "chatbot",
+ "document_id": "60760",
+ "location_service_code": "beebot",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "chatbot.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "chatbot.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "chatbot.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "airec",
+ "document_id": "",
+ "location_service_code": "airec",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "airec.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "airec.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "airec.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "dmsenterprise",
+ "document_id": "",
+ "location_service_code": "dmsenterprise",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "dms-enterprise.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "dms-enterprise.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "dms-enterprise.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "dms-enterprise.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "dms-enterprise.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "dms-enterprise.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ivision",
+ "document_id": "",
+ "location_service_code": "ivision",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ivision.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "ivision.cn-beijing.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "odpsplusmayi",
+ "document_id": "",
+ "location_service_code": "odpsplusmayi",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "bsb.cloud.alipay.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "bsb.cloud.alipay.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "bsb.cloud.alipay.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "gameshield",
+ "document_id": "",
+ "location_service_code": "gameshield",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "gameshield.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "gameshield.cn-zhangjiakou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "scdn",
+ "document_id": "",
+ "location_service_code": "scdn",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "scdn.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hitsdb",
+ "document_id": "",
+ "location_service_code": "hitsdb",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "hitsdb.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hdm",
+ "document_id": "",
+ "location_service_code": "hdm",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "hdm-api.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "slb",
+ "document_id": "27565",
+ "location_service_code": "slb",
+ "regional_endpoints": [
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "slb.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "slb.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "slb.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "slb.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "slb.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "slb.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "slb.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "slb.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "slb.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "slb.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "slb.cn-zhangjiakou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "slb.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "green",
+ "document_id": "28427",
+ "location_service_code": "green",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "green.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "green.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "green.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "green.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "green.us-west-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "green.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cccvn",
+ "document_id": "",
+ "location_service_code": "cccvn",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "voicenavigator.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ddosrewards",
+ "document_id": "",
+ "location_service_code": "ddosrewards",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ddosright.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "iot",
+ "document_id": "30557",
+ "location_service_code": "iot",
+ "regional_endpoints": [
+ {
+ "region": "us-east-1",
+ "endpoint": "iot.us-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "iot.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "iot.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "iot.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "iot.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "iot.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": "iot.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "bssopenapi",
+ "document_id": "",
+ "location_service_code": "bssopenapi",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "business.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "business.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "sca",
+ "document_id": "",
+ "location_service_code": "sca",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "qualitycheck.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "luban",
+ "document_id": "",
+ "location_service_code": "luban",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "luban.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "luban.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "drdspost",
+ "document_id": "",
+ "location_service_code": "drdspost",
+ "regional_endpoints": [
+ {
+ "region": "cn-shanghai",
+ "endpoint": "drds.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "drds.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "drds.ap-southeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "drds",
+ "document_id": "51111",
+ "location_service_code": "drds",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "drds.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "drds.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "drds.aliyuncs.com",
+ "regional_endpoint_pattern": "drds.aliyuncs.com"
+ },
+ {
+ "code": "httpdns",
+ "document_id": "52679",
+ "location_service_code": "httpdns",
+ "regional_endpoints": null,
+ "global_endpoint": "httpdns-api.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cas",
+ "document_id": "",
+ "location_service_code": "cas",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "cas.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "cas.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "cas.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "cas.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "cas.me-east-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hpc",
+ "document_id": "35201",
+ "location_service_code": "hpc",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "hpc.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "hpc.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "hpc.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ddosbasic",
+ "document_id": "",
+ "location_service_code": "ddosbasic",
+ "regional_endpoints": [
+ {
+ "region": "ap-south-1",
+ "endpoint": "antiddos-openapi.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "antiddos-openapi.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "antiddos-openapi.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "antiddos-openapi.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "antiddos-openapi.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "antiddos-openapi.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "antiddos-openapi.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "antiddos-openapi.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "antiddos-openapi.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "antiddos.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "antiddos-openapi.ap-northeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "antiddos.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "clouddesktop",
+ "document_id": "",
+ "location_service_code": "clouddesktop",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "clouddesktop.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "clouddesktop.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "clouddesktop.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "clouddesktop.cn-shenzhen.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "uis",
+ "document_id": "",
+ "location_service_code": "uis",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "uis.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "imm",
+ "document_id": "",
+ "location_service_code": "imm",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "imm.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "imm.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "imm.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "imm.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "imm.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "imm.cn-shenzhen.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ens",
+ "document_id": "",
+ "location_service_code": "ens",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ens.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ram",
+ "document_id": "28672",
+ "location_service_code": "ram",
+ "regional_endpoints": null,
+ "global_endpoint": "ram.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hcs_mgw",
+ "document_id": "",
+ "location_service_code": "hcs_mgw",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "mgw.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "mgw.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "mgw.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "itaas",
+ "document_id": "55759",
+ "location_service_code": "itaas",
+ "regional_endpoints": null,
+ "global_endpoint": "itaas.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "qualitycheck",
+ "document_id": "50807",
+ "location_service_code": "qualitycheck",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "qualitycheck.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "alikafka",
+ "document_id": "",
+ "location_service_code": "alikafka",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "alikafka.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "alikafka.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "alikafka.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "alikafka.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "alikafka.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "alikafka.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "alikafka.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "alikafka.cn-qingdao.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "faas",
+ "document_id": "",
+ "location_service_code": "faas",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "faas.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "faas.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "faas.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "faas.cn-beijing.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "alidfs",
+ "document_id": "",
+ "location_service_code": "alidfs",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "dfs.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "dfs.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "cms",
+ "document_id": "28615",
+ "location_service_code": "cms",
+ "regional_endpoints": [
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "metrics.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "metrics.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "metrics.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "metrics.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "metrics.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "metrics.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "metrics.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "metrics.cn-hangzhou.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "domain-intl",
+ "document_id": "",
+ "location_service_code": "domain-intl",
+ "regional_endpoints": null,
+ "global_endpoint": "domain-intl.aliyuncs.com",
+ "regional_endpoint_pattern": "domain-intl.aliyuncs.com"
+ },
+ {
+ "code": "kvstore",
+ "document_id": "",
+ "location_service_code": "kvstore",
+ "regional_endpoints": [
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "r-kvstore.ap-northeast-1.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ccs",
+ "document_id": "",
+ "location_service_code": "ccs",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ccs.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "ess",
+ "document_id": "25925",
+ "location_service_code": "ess",
+ "regional_endpoints": [
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "ess.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "ess.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "ess.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "ess.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "ess.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "ess.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "ess.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "ess.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "ess.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "ess.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "ess.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "ess.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "ess.aliyuncs.com",
+ "regional_endpoint_pattern": "ess.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "dds",
+ "document_id": "61715",
+ "location_service_code": "dds",
+ "regional_endpoints": [
+ {
+ "region": "me-east-1",
+ "endpoint": "mongodb.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "mongodb.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "mongodb.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "mongodb.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "mongodb.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "mongodb.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "mongodb.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "mongodb.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "mongodb.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "mongodb.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "mongodb.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "mongodb.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "mongodb.aliyuncs.com",
+ "regional_endpoint_pattern": "mongodb.[RegionId].aliyuncs.com"
+ },
+ {
+ "code": "mts",
+ "document_id": "29212",
+ "location_service_code": "mts",
+ "regional_endpoints": [
+ {
+ "region": "cn-beijing",
+ "endpoint": "mts.cn-beijing.aliyuncs.com"
+ },
+ {
+ "region": "ap-northeast-1",
+ "endpoint": "mts.ap-northeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "mts.cn-hongkong.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "mts.cn-shenzhen.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "mts.cn-zhangjiakou.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "mts.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "mts.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "mts.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "mts.us-west-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "mts.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-west-1",
+ "endpoint": "mts.eu-west-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "mts.cn-hangzhou.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "push",
+ "document_id": "30074",
+ "location_service_code": "push",
+ "regional_endpoints": null,
+ "global_endpoint": "cloudpush.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hcs_sgw",
+ "document_id": "",
+ "location_service_code": "hcs_sgw",
+ "regional_endpoints": [
+ {
+ "region": "eu-central-1",
+ "endpoint": "sgw.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-zhangjiakou",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "sgw.ap-southeast-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-hongkong",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "sgw.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "sgw.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "hbase",
+ "document_id": "",
+ "location_service_code": "hbase",
+ "regional_endpoints": [
+ {
+ "region": "cn-huhehaote",
+ "endpoint": "hbase.cn-huhehaote.aliyuncs.com"
+ },
+ {
+ "region": "ap-south-1",
+ "endpoint": "hbase.ap-south-1.aliyuncs.com"
+ },
+ {
+ "region": "us-west-1",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "me-east-1",
+ "endpoint": "hbase.me-east-1.aliyuncs.com"
+ },
+ {
+ "region": "eu-central-1",
+ "endpoint": "hbase.eu-central-1.aliyuncs.com"
+ },
+ {
+ "region": "cn-qingdao",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "cn-shenzhen",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-2",
+ "endpoint": "hbase.ap-southeast-2.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-3",
+ "endpoint": "hbase.ap-southeast-3.aliyuncs.com"
+ },
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "us-east-1",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-5",
+ "endpoint": "hbase.ap-southeast-5.aliyuncs.com"
+ },
+ {
+ "region": "cn-beijing",
+ "endpoint": "hbase.aliyuncs.com"
+ },
+ {
+ "region": "ap-southeast-1",
+ "endpoint": "hbase.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "hbase.aliyuncs.com",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "bastionhost",
+ "document_id": "",
+ "location_service_code": "bastionhost",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "yundun-bastionhost.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ },
+ {
+ "code": "vs",
+ "document_id": "",
+ "location_service_code": "vs",
+ "regional_endpoints": [
+ {
+ "region": "cn-hangzhou",
+ "endpoint": "vs.cn-hangzhou.aliyuncs.com"
+ },
+ {
+ "region": "cn-shanghai",
+ "endpoint": "vs.cn-shanghai.aliyuncs.com"
+ }
+ ],
+ "global_endpoint": "",
+ "regional_endpoint_pattern": ""
+ }
+ ]
+}`
+var initOnce sync.Once
+var data interface{}
+
+func getEndpointConfigData() interface{} {
+ initOnce.Do(func() {
+ err := json.Unmarshal([]byte(endpointsJson), &data)
+ if err != nil {
+ panic(fmt.Sprintf("init endpoint config data failed. %s", err))
+ }
+ })
+ return data
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go
new file mode 100644
index 000000000..160e62cb6
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go
@@ -0,0 +1,43 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package endpoints
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+type LocalGlobalResolver struct {
+}
+
+func (resolver *LocalGlobalResolver) GetName() (name string) {
+ name = "local global resolver"
+ return
+}
+
+func (resolver *LocalGlobalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) {
+ // get the global endpoints configs
+ endpointExpression := fmt.Sprintf("products[?code=='%s'].global_endpoint", strings.ToLower(param.Product))
+ endpointData, err := jmespath.Search(endpointExpression, getEndpointConfigData())
+ if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 {
+ endpoint = endpointData.([]interface{})[0].(string)
+ support = len(endpoint) > 0
+ return
+ }
+ support = false
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go
new file mode 100644
index 000000000..7fee64d42
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go
@@ -0,0 +1,48 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package endpoints
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+type LocalRegionalResolver struct {
+}
+
+func (resolver *LocalRegionalResolver) GetName() (name string) {
+ name = "local regional resolver"
+ return
+}
+
+func (resolver *LocalRegionalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) {
+ // get the regional endpoints configs
+ regionalExpression := fmt.Sprintf("products[?code=='%s'].regional_endpoints", strings.ToLower(param.Product))
+ regionalData, err := jmespath.Search(regionalExpression, getEndpointConfigData())
+ if err == nil && regionalData != nil && len(regionalData.([]interface{})) > 0 {
+ endpointExpression := fmt.Sprintf("[0][?region=='%s'].endpoint", strings.ToLower(param.RegionId))
+ var endpointData interface{}
+ endpointData, err = jmespath.Search(endpointExpression, regionalData)
+ if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 {
+ endpoint = endpointData.([]interface{})[0].(string)
+ support = len(endpoint) > 0
+ return
+ }
+ }
+ support = false
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go
new file mode 100644
index 000000000..cc354cc4d
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go
@@ -0,0 +1,176 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package endpoints
+
+import (
+ "encoding/json"
+ "sync"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+)
+
+const (
+ // EndpointCacheExpireTime ...
+ EndpointCacheExpireTime = 3600 //Seconds
+)
+
+// Cache caches endpoint for specific product and region
+type Cache struct {
+ sync.RWMutex
+ cache map[string]interface{}
+}
+
+// Get ...
+func (c *Cache) Get(k string) (v interface{}) {
+ c.RLock()
+ v = c.cache[k]
+ c.RUnlock()
+ return
+}
+
+// Set ...
+func (c *Cache) Set(k string, v interface{}) {
+ c.Lock()
+ c.cache[k] = v
+ c.Unlock()
+}
+
+var lastClearTimePerProduct = &Cache{cache: make(map[string]interface{})}
+var endpointCache = &Cache{cache: make(map[string]interface{})}
+
+// LocationResolver ...
+type LocationResolver struct {
+}
+
+func (resolver *LocationResolver) GetName() (name string) {
+ name = "location resolver"
+ return
+}
+
+// TryResolve resolves endpoint giving product and region
+func (resolver *LocationResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) {
+ if len(param.LocationProduct) <= 0 {
+ support = false
+ return
+ }
+
+ //get from cache
+ cacheKey := param.Product + "#" + param.RegionId
+ var ok bool
+ endpoint, ok = endpointCache.Get(cacheKey).(string)
+
+ if ok && len(endpoint) > 0 && !CheckCacheIsExpire(cacheKey) {
+ support = true
+ return
+ }
+
+ //get from remote
+ getEndpointRequest := requests.NewCommonRequest()
+
+ getEndpointRequest.Product = "Location"
+ getEndpointRequest.Version = "2015-06-12"
+ getEndpointRequest.ApiName = "DescribeEndpoints"
+ getEndpointRequest.Domain = "location-readonly.aliyuncs.com"
+ getEndpointRequest.Method = "GET"
+ getEndpointRequest.Scheme = requests.HTTPS
+
+ getEndpointRequest.QueryParams["Id"] = param.RegionId
+ getEndpointRequest.QueryParams["ServiceCode"] = param.LocationProduct
+ if len(param.LocationEndpointType) > 0 {
+ getEndpointRequest.QueryParams["Type"] = param.LocationEndpointType
+ } else {
+ getEndpointRequest.QueryParams["Type"] = "openAPI"
+ }
+
+ response, err := param.CommonApi(getEndpointRequest)
+ if err != nil {
+ support = false
+ return
+ }
+
+ if !response.IsSuccess() {
+ support = false
+ return
+ }
+
+ var getEndpointResponse GetEndpointResponse
+ err = json.Unmarshal([]byte(response.GetHttpContentString()), &getEndpointResponse)
+ if err != nil {
+ support = false
+ return
+ }
+
+ if !getEndpointResponse.Success || getEndpointResponse.Endpoints == nil {
+ support = false
+ return
+ }
+ if len(getEndpointResponse.Endpoints.Endpoint) <= 0 {
+ support = false
+ return
+ }
+ if len(getEndpointResponse.Endpoints.Endpoint[0].Endpoint) > 0 {
+ endpoint = getEndpointResponse.Endpoints.Endpoint[0].Endpoint
+ endpointCache.Set(cacheKey, endpoint)
+ lastClearTimePerProduct.Set(cacheKey, time.Now().Unix())
+ support = true
+ return
+ }
+
+ support = false
+ return
+}
+
+// CheckCacheIsExpire ...
+func CheckCacheIsExpire(cacheKey string) bool {
+ lastClearTime, ok := lastClearTimePerProduct.Get(cacheKey).(int64)
+ if !ok {
+ return true
+ }
+
+ if lastClearTime <= 0 {
+ lastClearTime = time.Now().Unix()
+ lastClearTimePerProduct.Set(cacheKey, lastClearTime)
+ }
+
+ now := time.Now().Unix()
+ elapsedTime := now - lastClearTime
+ if elapsedTime > EndpointCacheExpireTime {
+ return true
+ }
+
+ return false
+}
+
+// GetEndpointResponse ...
+type GetEndpointResponse struct {
+ Endpoints *EndpointsObj
+ RequestId string
+ Success bool
+}
+
+// EndpointsObj ...
+type EndpointsObj struct {
+ Endpoint []EndpointObj
+}
+
+// EndpointObj ...
+type EndpointObj struct {
+ // Protocols map[string]string
+ Type string
+ Namespace string
+ Id string
+ SerivceCode string
+ Endpoint string
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go
new file mode 100644
index 000000000..e39f53367
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go
@@ -0,0 +1,48 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package endpoints
+
+import (
+ "fmt"
+ "strings"
+)
+
+const keyFormatter = "%s::%s"
+
+var endpointMapping = make(map[string]string)
+
+// AddEndpointMapping Use product id and region id as key to store the endpoint into inner map
+func AddEndpointMapping(regionId, productId, endpoint string) (err error) {
+ key := fmt.Sprintf(keyFormatter, strings.ToLower(regionId), strings.ToLower(productId))
+ endpointMapping[key] = endpoint
+ return nil
+}
+
+// MappingResolver the mapping resolver type
+type MappingResolver struct {
+}
+
+// GetName get the resolver name: "mapping resolver"
+func (resolver *MappingResolver) GetName() (name string) {
+ name = "mapping resolver"
+ return
+}
+
+// TryResolve use Product and RegionId as key to find endpoint from inner map
+func (resolver *MappingResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) {
+ key := fmt.Sprintf(keyFormatter, strings.ToLower(param.RegionId), strings.ToLower(param.Product))
+ endpoint, contains := endpointMapping[key]
+ return endpoint, contains, nil
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go
new file mode 100644
index 000000000..5e1e30530
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go
@@ -0,0 +1,98 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "sync"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+var debug utils.Debug
+
+func init() {
+ debug = utils.Init("sdk")
+}
+
+const (
+ ResolveEndpointUserGuideLink = ""
+)
+
+var once sync.Once
+var resolvers []Resolver
+
+type Resolver interface {
+ TryResolve(param *ResolveParam) (endpoint string, support bool, err error)
+ GetName() (name string)
+}
+
+// Resolve resolve endpoint with params
+// It will resolve with each supported resolver until anyone resolved
+func Resolve(param *ResolveParam) (endpoint string, err error) {
+ supportedResolvers := getAllResolvers()
+ var lastErr error
+ for _, resolver := range supportedResolvers {
+ endpoint, supported, resolveErr := resolver.TryResolve(param)
+ if resolveErr != nil {
+ lastErr = resolveErr
+ }
+
+ if supported {
+ debug("resolve endpoint with %s\n", param)
+ debug("\t%s by resolver(%s)\n", endpoint, resolver.GetName())
+ return endpoint, nil
+ }
+ }
+
+ // not support
+ errorMsg := fmt.Sprintf(errors.CanNotResolveEndpointErrorMessage, param, ResolveEndpointUserGuideLink)
+ err = errors.NewClientError(errors.CanNotResolveEndpointErrorCode, errorMsg, lastErr)
+ return
+}
+
+func getAllResolvers() []Resolver {
+ once.Do(func() {
+ resolvers = []Resolver{
+ &SimpleHostResolver{},
+ &MappingResolver{},
+ &LocationResolver{},
+ &LocalRegionalResolver{},
+ &LocalGlobalResolver{},
+ }
+ })
+ return resolvers
+}
+
+type ResolveParam struct {
+ Domain string
+ Product string
+ RegionId string
+ LocationProduct string
+ LocationEndpointType string
+ CommonApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) `json:"-"`
+}
+
+func (param *ResolveParam) String() string {
+ jsonBytes, err := json.Marshal(param)
+ if err != nil {
+ return fmt.Sprint("ResolveParam.String() process error:", err)
+ }
+ return string(jsonBytes)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go
new file mode 100644
index 000000000..9ba2346c6
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go
@@ -0,0 +1,33 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package endpoints
+
+// SimpleHostResolver the simple host resolver type
+type SimpleHostResolver struct {
+}
+
+// GetName get the resolver name: "simple host resolver"
+func (resolver *SimpleHostResolver) GetName() (name string) {
+ name = "simple host resolver"
+ return
+}
+
+// TryResolve if the Domain exist in param, use it as endpoint
+func (resolver *SimpleHostResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) {
+ if support = len(param.Domain) > 0; support {
+ endpoint = param.Domain
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go
new file mode 100644
index 000000000..1e2d9c004
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go
@@ -0,0 +1,92 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package errors
+
+import "fmt"
+
+const (
+ DefaultClientErrorStatus = 400
+ DefaultClientErrorCode = "SDK.ClientError"
+
+ UnsupportedCredentialErrorCode = "SDK.UnsupportedCredential"
+ UnsupportedCredentialErrorMessage = "Specified credential (type = %s) is not supported, please check"
+
+ CanNotResolveEndpointErrorCode = "SDK.CanNotResolveEndpoint"
+ CanNotResolveEndpointErrorMessage = "Can not resolve endpoint(param = %s), please check your accessKey with secret, and read the user guide\n %s"
+
+ UnsupportedParamPositionErrorCode = "SDK.UnsupportedParamPosition"
+ UnsupportedParamPositionErrorMessage = "Specified param position (%s) is not supported, please upgrade sdk and retry"
+
+ AsyncFunctionNotEnabledCode = "SDK.AsyncFunctionNotEnabled"
+ AsyncFunctionNotEnabledMessage = "Async function is not enabled in client, please invoke 'client.EnableAsync' function"
+
+ UnknownRequestTypeErrorCode = "SDK.UnknownRequestType"
+ UnknownRequestTypeErrorMessage = "Unknown Request Type: %s"
+
+ MissingParamErrorCode = "SDK.MissingParam"
+ InvalidParamErrorCode = "SDK.InvalidParam"
+
+ JsonUnmarshalErrorCode = "SDK.JsonUnmarshalError"
+ JsonUnmarshalErrorMessage = "Failed to unmarshal response, but you can get the data via response.GetHttpStatusCode() and response.GetHttpContentString()"
+
+ TimeoutErrorCode = "SDK.TimeoutError"
+ TimeoutErrorMessage = "The request timed out %s times(%s for retry), perhaps we should have the threshold raised a little?"
+)
+
+type ClientError struct {
+ errorCode string
+ message string
+ originError error
+}
+
+func NewClientError(errorCode, message string, originErr error) Error {
+ return &ClientError{
+ errorCode: errorCode,
+ message: message,
+ originError: originErr,
+ }
+}
+
+func (err *ClientError) Error() string {
+ clientErrMsg := fmt.Sprintf("[%s] %s", err.ErrorCode(), err.message)
+ if err.originError != nil {
+ return clientErrMsg + "\ncaused by:\n" + err.originError.Error()
+ }
+ return clientErrMsg
+}
+
+func (err *ClientError) OriginError() error {
+ return err.originError
+}
+
+func (*ClientError) HttpStatus() int {
+ return DefaultClientErrorStatus
+}
+
+func (err *ClientError) ErrorCode() string {
+ if err.errorCode == "" {
+ return DefaultClientErrorCode
+ } else {
+ return err.errorCode
+ }
+}
+
+func (err *ClientError) Message() string {
+ return err.message
+}
+
+func (err *ClientError) String() string {
+ return err.Error()
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go
new file mode 100644
index 000000000..49962f3b5
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go
@@ -0,0 +1,23 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package errors
+
+type Error interface {
+ error
+ HttpStatus() int
+ ErrorCode() string
+ Message() string
+ OriginError() error
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go
new file mode 100644
index 000000000..1b7810414
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go
@@ -0,0 +1,123 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package errors
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var wrapperList = []ServerErrorWrapper{
+ &SignatureDostNotMatchWrapper{},
+}
+
+type ServerError struct {
+ httpStatus int
+ requestId string
+ hostId string
+ errorCode string
+ recommend string
+ message string
+ comment string
+}
+
+type ServerErrorWrapper interface {
+ tryWrap(error *ServerError, wrapInfo map[string]string) bool
+}
+
+func (err *ServerError) Error() string {
+ return fmt.Sprintf("SDK.ServerError\nErrorCode: %s\nRecommend: %s\nRequestId: %s\nMessage: %s",
+ err.errorCode, err.comment+err.recommend, err.requestId, err.message)
+}
+
+func NewServerError(httpStatus int, responseContent, comment string) Error {
+ result := &ServerError{
+ httpStatus: httpStatus,
+ message: responseContent,
+ comment: comment,
+ }
+
+ var data interface{}
+ err := json.Unmarshal([]byte(responseContent), &data)
+ if err == nil {
+ requestId, _ := jmespath.Search("RequestId", data)
+ hostId, _ := jmespath.Search("HostId", data)
+ errorCode, _ := jmespath.Search("Code", data)
+ recommend, _ := jmespath.Search("Recommend", data)
+ message, _ := jmespath.Search("Message", data)
+
+ if requestId != nil {
+ result.requestId = requestId.(string)
+ }
+ if hostId != nil {
+ result.hostId = hostId.(string)
+ }
+ if errorCode != nil {
+ result.errorCode = errorCode.(string)
+ }
+ if recommend != nil {
+ result.recommend = recommend.(string)
+ }
+ if message != nil {
+ result.message = message.(string)
+ }
+ }
+
+ return result
+}
+
+func WrapServerError(originError *ServerError, wrapInfo map[string]string) *ServerError {
+ for _, wrapper := range wrapperList {
+ ok := wrapper.tryWrap(originError, wrapInfo)
+ if ok {
+ return originError
+ }
+ }
+ return originError
+}
+
+func (err *ServerError) HttpStatus() int {
+ return err.httpStatus
+}
+
+func (err *ServerError) ErrorCode() string {
+ return err.errorCode
+}
+
+func (err *ServerError) Message() string {
+ return err.message
+}
+
+func (err *ServerError) OriginError() error {
+ return nil
+}
+
+func (err *ServerError) HostId() string {
+ return err.hostId
+}
+
+func (err *ServerError) RequestId() string {
+ return err.requestId
+}
+
+func (err *ServerError) Recommend() string {
+ return err.recommend
+}
+
+func (err *ServerError) Comment() string {
+ return err.comment
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go
new file mode 100644
index 000000000..4b09d7d71
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go
@@ -0,0 +1,45 @@
+package errors
+
+import (
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+const SignatureDostNotMatchErrorCode = "SignatureDoesNotMatch"
+const IncompleteSignatureErrorCode = "IncompleteSignature"
+const MessageContain = "server string to sign is:"
+
+var debug utils.Debug
+
+func init() {
+ debug = utils.Init("sdk")
+}
+
+type SignatureDostNotMatchWrapper struct {
+}
+
+func (*SignatureDostNotMatchWrapper) tryWrap(error *ServerError, wrapInfo map[string]string) (ok bool) {
+ clientStringToSign := wrapInfo["StringToSign"]
+ if (error.errorCode == SignatureDostNotMatchErrorCode || error.errorCode == IncompleteSignatureErrorCode) && clientStringToSign != "" {
+ message := error.message
+ if strings.Contains(message, MessageContain) {
+ str := strings.Split(message, MessageContain)
+ serverStringToSign := str[1]
+
+ if clientStringToSign == serverStringToSign {
+ // user secret is error
+ error.recommend = "InvalidAccessKeySecret: Please check you AccessKeySecret"
+ } else {
+ debug("Client StringToSign: %s", clientStringToSign)
+ debug("Server StringToSign: %s", serverStringToSign)
+ error.recommend = "This may be a bug with the SDK and we hope you can submit this question in the " +
+ "github issue(https://github.com/aliyun/alibaba-cloud-sdk-go/issues), thanks very much"
+ }
+ }
+ ok = true
+ return
+ }
+ ok = false
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/logger.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/logger.go
new file mode 100644
index 000000000..a01a7bbc9
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/logger.go
@@ -0,0 +1,116 @@
+package sdk
+
+import (
+ "encoding/json"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+ "io"
+ "log"
+ "os"
+ "strings"
+ "time"
+)
+
+var logChannel string
+var defaultChannel = "AlibabaCloud"
+
+type Logger struct {
+ *log.Logger
+ formatTemplate string
+ isOpen bool
+ lastLogMsg string
+}
+
+var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}`
+var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"}
+
+func initLogMsg(fieldMap map[string]string) {
+ for _, value := range loggerParam {
+ fieldMap[value] = ""
+ }
+}
+
+func (client *Client) GetLogger() *Logger {
+ return client.logger
+}
+
+func (client *Client) GetLoggerMsg() string {
+ if client.logger == nil {
+ client.SetLogger("", "", os.Stdout, "")
+ }
+ return client.logger.lastLogMsg
+}
+
+func (client *Client) SetLogger(level string, channel string, out io.Writer, template string) {
+ if level == "" {
+ level = "info"
+ }
+
+ logChannel = "AlibabaCloud"
+ if channel != "" {
+ logChannel = channel
+ }
+ log := log.New(out, "["+strings.ToUpper(level)+"]", log.Lshortfile)
+ if template == "" {
+ template = defaultLoggerTemplate
+ }
+
+ client.logger = &Logger{
+ Logger: log,
+ formatTemplate: template,
+ isOpen: true,
+ }
+}
+
+func (client *Client) OpenLogger() {
+ if client.logger == nil {
+ client.SetLogger("", "", os.Stdout, "")
+ }
+ client.logger.isOpen = true
+}
+
+func (client *Client) CloseLogger() {
+ if client.logger != nil {
+ client.logger.isOpen = false
+ }
+}
+
+func (client *Client) SetTemplate(template string) {
+ if client.logger == nil {
+ client.SetLogger("", "", os.Stdout, "")
+ }
+ client.logger.formatTemplate = template
+}
+
+func (client *Client) GetTemplate() string {
+ if client.logger == nil {
+ client.SetLogger("", "", os.Stdout, "")
+ }
+ return client.logger.formatTemplate
+}
+
+func TransToString(object interface{}) string {
+ byt, err := json.Marshal(object)
+ if err != nil {
+ return ""
+ }
+ return string(byt)
+}
+
+func (client *Client) printLog(fieldMap map[string]string, err error) {
+ if err != nil {
+ fieldMap["{error}"] = err.Error()
+ }
+ fieldMap["{time}"] = time.Now().Format("2006-01-02 15:04:05")
+ fieldMap["{ts}"] = utils.GetTimeInFormatISO8601()
+ fieldMap["{channel}"] = logChannel
+ if client.logger != nil {
+ logMsg := client.logger.formatTemplate
+ for key, value := range fieldMap {
+ logMsg = strings.Replace(logMsg, key, value, -1)
+ }
+ client.logger.lastLogMsg = logMsg
+ if client.logger.isOpen == true {
+ client.logger.Output(2, logMsg)
+ }
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go
new file mode 100644
index 000000000..fa22db1f7
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go
@@ -0,0 +1,378 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package requests
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+)
+
+const (
+ RPC = "RPC"
+ ROA = "ROA"
+
+ HTTP = "HTTP"
+ HTTPS = "HTTPS"
+
+ DefaultHttpPort = "80"
+
+ GET = "GET"
+ PUT = "PUT"
+ POST = "POST"
+ DELETE = "DELETE"
+ HEAD = "HEAD"
+ OPTIONS = "OPTIONS"
+
+ Json = "application/json"
+ Xml = "application/xml"
+ Raw = "application/octet-stream"
+ Form = "application/x-www-form-urlencoded"
+
+ Header = "Header"
+ Query = "Query"
+ Body = "Body"
+ Path = "Path"
+
+ HeaderSeparator = "\n"
+)
+
+// interface
+type AcsRequest interface {
+ GetScheme() string
+ GetMethod() string
+ GetDomain() string
+ GetPort() string
+ GetRegionId() string
+ GetHeaders() map[string]string
+ GetQueryParams() map[string]string
+ GetFormParams() map[string]string
+ GetContent() []byte
+ GetBodyReader() io.Reader
+ GetStyle() string
+ GetProduct() string
+ GetVersion() string
+ SetVersion(version string)
+ GetActionName() string
+ GetAcceptFormat() string
+ GetLocationServiceCode() string
+ GetLocationEndpointType() string
+ GetReadTimeout() time.Duration
+ GetConnectTimeout() time.Duration
+ SetReadTimeout(readTimeout time.Duration)
+ SetConnectTimeout(connectTimeout time.Duration)
+ SetHTTPSInsecure(isInsecure bool)
+ GetHTTPSInsecure() *bool
+
+ GetUserAgent() map[string]string
+
+ SetStringToSign(stringToSign string)
+ GetStringToSign() string
+
+ SetDomain(domain string)
+ SetContent(content []byte)
+ SetScheme(scheme string)
+ BuildUrl() string
+ BuildQueries() string
+
+ addHeaderParam(key, value string)
+ addQueryParam(key, value string)
+ addFormParam(key, value string)
+ addPathParam(key, value string)
+}
+
+// base class
+type baseRequest struct {
+ Scheme string
+ Method string
+ Domain string
+ Port string
+ RegionId string
+ ReadTimeout time.Duration
+ ConnectTimeout time.Duration
+ isInsecure *bool
+
+ userAgent map[string]string
+ product string
+ version string
+
+ actionName string
+
+ AcceptFormat string
+
+ QueryParams map[string]string
+ Headers map[string]string
+ FormParams map[string]string
+ Content []byte
+
+ locationServiceCode string
+ locationEndpointType string
+
+ queries string
+
+ stringToSign string
+}
+
+func (request *baseRequest) GetQueryParams() map[string]string {
+ return request.QueryParams
+}
+
+func (request *baseRequest) GetFormParams() map[string]string {
+ return request.FormParams
+}
+
+func (request *baseRequest) GetReadTimeout() time.Duration {
+ return request.ReadTimeout
+}
+
+func (request *baseRequest) GetConnectTimeout() time.Duration {
+ return request.ConnectTimeout
+}
+
+func (request *baseRequest) SetReadTimeout(readTimeout time.Duration) {
+ request.ReadTimeout = readTimeout
+}
+
+func (request *baseRequest) SetConnectTimeout(connectTimeout time.Duration) {
+ request.ConnectTimeout = connectTimeout
+}
+
+func (request *baseRequest) GetHTTPSInsecure() *bool {
+ return request.isInsecure
+}
+
+func (request *baseRequest) SetHTTPSInsecure(isInsecure bool) {
+ request.isInsecure = &isInsecure
+}
+
+func (request *baseRequest) GetContent() []byte {
+ return request.Content
+}
+
+func (request *baseRequest) SetVersion(version string) {
+ request.version = version
+}
+
+func (request *baseRequest) GetVersion() string {
+ return request.version
+}
+
+func (request *baseRequest) GetActionName() string {
+ return request.actionName
+}
+
+func (request *baseRequest) SetContent(content []byte) {
+ request.Content = content
+}
+
+func (request *baseRequest) GetUserAgent() map[string]string {
+ return request.userAgent
+}
+
+func (request *baseRequest) AppendUserAgent(key, value string) {
+ newkey := true
+ if request.userAgent == nil {
+ request.userAgent = make(map[string]string)
+ }
+ if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" {
+ for tag, _ := range request.userAgent {
+ if tag == key {
+ request.userAgent[tag] = value
+ newkey = false
+ }
+ }
+ if newkey {
+ request.userAgent[key] = value
+ }
+ }
+}
+
+func (request *baseRequest) addHeaderParam(key, value string) {
+ request.Headers[key] = value
+}
+
+func (request *baseRequest) addQueryParam(key, value string) {
+ request.QueryParams[key] = value
+}
+
+func (request *baseRequest) addFormParam(key, value string) {
+ request.FormParams[key] = value
+}
+
+func (request *baseRequest) GetAcceptFormat() string {
+ return request.AcceptFormat
+}
+
+func (request *baseRequest) GetLocationServiceCode() string {
+ return request.locationServiceCode
+}
+
+func (request *baseRequest) GetLocationEndpointType() string {
+ return request.locationEndpointType
+}
+
+func (request *baseRequest) GetProduct() string {
+ return request.product
+}
+
+func (request *baseRequest) GetScheme() string {
+ return request.Scheme
+}
+
+func (request *baseRequest) SetScheme(scheme string) {
+ request.Scheme = scheme
+}
+
+func (request *baseRequest) GetMethod() string {
+ return request.Method
+}
+
+func (request *baseRequest) GetDomain() string {
+ return request.Domain
+}
+
+func (request *baseRequest) SetDomain(host string) {
+ request.Domain = host
+}
+
+func (request *baseRequest) GetPort() string {
+ return request.Port
+}
+
+func (request *baseRequest) GetRegionId() string {
+ return request.RegionId
+}
+
+func (request *baseRequest) GetHeaders() map[string]string {
+ return request.Headers
+}
+
+func (request *baseRequest) SetContentType(contentType string) {
+ request.addHeaderParam("Content-Type", contentType)
+}
+
+func (request *baseRequest) GetContentType() (contentType string, contains bool) {
+ contentType, contains = request.Headers["Content-Type"]
+ return
+}
+
+func (request *baseRequest) SetStringToSign(stringToSign string) {
+ request.stringToSign = stringToSign
+}
+
+func (request *baseRequest) GetStringToSign() string {
+ return request.stringToSign
+}
+
+func defaultBaseRequest() (request *baseRequest) {
+ request = &baseRequest{
+ Scheme: "",
+ AcceptFormat: "JSON",
+ Method: GET,
+ QueryParams: make(map[string]string),
+ Headers: map[string]string{
+ "x-sdk-client": "golang/1.0.0",
+ "x-sdk-invoke-type": "normal",
+ "Accept-Encoding": "identity",
+ },
+ FormParams: make(map[string]string),
+ }
+ return
+}
+
+func InitParams(request AcsRequest) (err error) {
+ requestValue := reflect.ValueOf(request).Elem()
+ err = flatRepeatedList(requestValue, request, "", "")
+ return
+}
+
+func flatRepeatedList(dataValue reflect.Value, request AcsRequest, position, prefix string) (err error) {
+ dataType := dataValue.Type()
+ for i := 0; i < dataType.NumField(); i++ {
+ field := dataType.Field(i)
+ name, containsNameTag := field.Tag.Lookup("name")
+ fieldPosition := position
+ if fieldPosition == "" {
+ fieldPosition, _ = field.Tag.Lookup("position")
+ }
+ typeTag, containsTypeTag := field.Tag.Lookup("type")
+ if containsNameTag {
+ if !containsTypeTag {
+ // simple param
+ key := prefix + name
+ value := dataValue.Field(i).String()
+ if dataValue.Field(i).Kind().String() == "map" {
+ byt, _ := json.Marshal(dataValue.Field(i).Interface())
+ value = string(byt)
+ }
+ err = addParam(request, fieldPosition, key, value)
+ if err != nil {
+ return
+ }
+ } else if typeTag == "Repeated" {
+ // repeated param
+ repeatedFieldValue := dataValue.Field(i)
+ if repeatedFieldValue.Kind() != reflect.Slice {
+ // possible value: {"[]string", "*[]struct"}, we must call Elem() in the last condition
+ repeatedFieldValue = repeatedFieldValue.Elem()
+ }
+ if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() {
+ for m := 0; m < repeatedFieldValue.Len(); m++ {
+ elementValue := repeatedFieldValue.Index(m)
+ key := prefix + name + "." + strconv.Itoa(m+1)
+ if elementValue.Type().Kind().String() == "string" {
+ value := elementValue.String()
+ err = addParam(request, fieldPosition, key, value)
+ if err != nil {
+ return
+ }
+ } else {
+ err = flatRepeatedList(elementValue, request, fieldPosition, key+".")
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return
+}
+
+func addParam(request AcsRequest, position, name, value string) (err error) {
+ if len(value) > 0 {
+ switch position {
+ case Header:
+ request.addHeaderParam(name, value)
+ case Query:
+ request.addQueryParam(name, value)
+ case Path:
+ request.addPathParam(name, value)
+ case Body:
+ request.addFormParam(name, value)
+ default:
+ errMsg := fmt.Sprintf(errors.UnsupportedParamPositionErrorMessage, position)
+ err = errors.NewClientError(errors.UnsupportedParamPositionErrorCode, errMsg, nil)
+ }
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go
new file mode 100644
index 000000000..80c170097
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go
@@ -0,0 +1,108 @@
+package requests
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+)
+
+type CommonRequest struct {
+ *baseRequest
+
+ Version string
+ ApiName string
+ Product string
+ ServiceCode string
+
+ // roa params
+ PathPattern string
+ PathParams map[string]string
+
+ Ontology AcsRequest
+}
+
+func NewCommonRequest() (request *CommonRequest) {
+ request = &CommonRequest{
+ baseRequest: defaultBaseRequest(),
+ }
+ request.Headers["x-sdk-invoke-type"] = "common"
+ request.PathParams = make(map[string]string)
+ return
+}
+
+func (request *CommonRequest) String() string {
+ request.TransToAcsRequest()
+
+ resultBuilder := bytes.Buffer{}
+
+ mapOutput := func(m map[string]string) {
+ if len(m) > 0 {
+ sortedKeys := make([]string, 0)
+ for k := range m {
+ sortedKeys = append(sortedKeys, k)
+ }
+
+ // sort 'string' key in increasing order
+ sort.Strings(sortedKeys)
+
+ for _, key := range sortedKeys {
+ resultBuilder.WriteString(key + ": " + m[key] + "\n")
+ }
+ }
+ }
+
+ // Request Line
+ resultBuilder.WriteString(fmt.Sprintf("%s %s %s/1.1\n", request.Method, request.BuildQueries(), strings.ToUpper(request.Scheme)))
+
+ // Headers
+ resultBuilder.WriteString("Host" + ": " + request.Domain + "\n")
+ mapOutput(request.Headers)
+
+ resultBuilder.WriteString("\n")
+ // Body
+ if len(request.Content) > 0 {
+ resultBuilder.WriteString(string(request.Content) + "\n")
+ } else {
+ mapOutput(request.FormParams)
+ }
+
+ return resultBuilder.String()
+}
+
+func (request *CommonRequest) TransToAcsRequest() {
+ if len(request.PathPattern) > 0 {
+ roaRequest := &RoaRequest{}
+ roaRequest.initWithCommonRequest(request)
+ request.Ontology = roaRequest
+ } else {
+ rpcRequest := &RpcRequest{}
+ rpcRequest.baseRequest = request.baseRequest
+ rpcRequest.product = request.Product
+ rpcRequest.version = request.Version
+ rpcRequest.locationServiceCode = request.ServiceCode
+ rpcRequest.actionName = request.ApiName
+ request.Ontology = rpcRequest
+ }
+}
+
+func (request *CommonRequest) BuildUrl() string {
+ return request.Ontology.BuildUrl()
+}
+
+func (request *CommonRequest) BuildQueries() string {
+ return request.Ontology.BuildQueries()
+}
+
+func (request *CommonRequest) GetBodyReader() io.Reader {
+ return request.Ontology.GetBodyReader()
+}
+
+func (request *CommonRequest) GetStyle() string {
+ return request.Ontology.GetStyle()
+}
+
+func (request *CommonRequest) addPathParam(key, value string) {
+ request.PathParams[key] = value
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go
new file mode 100644
index 000000000..70b856e3c
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go
@@ -0,0 +1,152 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package requests
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "sort"
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+type RoaRequest struct {
+ *baseRequest
+ pathPattern string
+ PathParams map[string]string
+}
+
+func (*RoaRequest) GetStyle() string {
+ return ROA
+}
+
+func (request *RoaRequest) GetBodyReader() io.Reader {
+ if request.FormParams != nil && len(request.FormParams) > 0 {
+ formString := utils.GetUrlFormedMap(request.FormParams)
+ return strings.NewReader(formString)
+ } else if len(request.Content) > 0 {
+ return bytes.NewReader(request.Content)
+ } else {
+ return nil
+ }
+}
+
+// for sign method, need not url encoded
+func (request *RoaRequest) BuildQueries() string {
+ return request.buildQueries()
+}
+
+func (request *RoaRequest) buildPath() string {
+ path := request.pathPattern
+ for key, value := range request.PathParams {
+ path = strings.Replace(path, "["+key+"]", value, 1)
+ }
+ return path
+}
+
+func (request *RoaRequest) buildQueries() string {
+ // replace path params with value
+ path := request.buildPath()
+ queryParams := request.QueryParams
+ // sort QueryParams by key
+ var queryKeys []string
+ for key := range queryParams {
+ queryKeys = append(queryKeys, key)
+ }
+ sort.Strings(queryKeys)
+
+ // append urlBuilder
+ urlBuilder := bytes.Buffer{}
+ urlBuilder.WriteString(path)
+ if len(queryKeys) > 0 {
+ urlBuilder.WriteString("?")
+ }
+ for i := 0; i < len(queryKeys); i++ {
+ queryKey := queryKeys[i]
+ urlBuilder.WriteString(queryKey)
+ if value := queryParams[queryKey]; len(value) > 0 {
+ urlBuilder.WriteString("=")
+ urlBuilder.WriteString(value)
+ }
+ if i < len(queryKeys)-1 {
+ urlBuilder.WriteString("&")
+ }
+ }
+ result := urlBuilder.String()
+ result = popStandardUrlencode(result)
+ return result
+}
+
+func (request *RoaRequest) buildQueryString() string {
+ queryParams := request.QueryParams
+ // sort QueryParams by key
+ q := url.Values{}
+ for key, value := range queryParams {
+ q.Add(key, value)
+ }
+ return q.Encode()
+}
+
+func popStandardUrlencode(stringToSign string) (result string) {
+ result = strings.Replace(stringToSign, "+", "%20", -1)
+ result = strings.Replace(result, "*", "%2A", -1)
+ result = strings.Replace(result, "%7E", "~", -1)
+ return
+}
+
+func (request *RoaRequest) BuildUrl() string {
+ // for network trans, need url encoded
+ scheme := strings.ToLower(request.Scheme)
+ domain := request.Domain
+ port := request.Port
+ path := request.buildPath()
+ url := fmt.Sprintf("%s://%s:%s%s", scheme, domain, port, path)
+ querystring := request.buildQueryString()
+ if len(querystring) > 0 {
+ url = fmt.Sprintf("%s?%s", url, querystring)
+ }
+ return url
+}
+
+func (request *RoaRequest) addPathParam(key, value string) {
+ request.PathParams[key] = value
+}
+
+func (request *RoaRequest) InitWithApiInfo(product, version, action, uriPattern, serviceCode, endpointType string) {
+ request.baseRequest = defaultBaseRequest()
+ request.PathParams = make(map[string]string)
+ request.Headers["x-acs-version"] = version
+ request.pathPattern = uriPattern
+ request.locationServiceCode = serviceCode
+ request.locationEndpointType = endpointType
+ request.product = product
+ //request.version = version
+ request.actionName = action
+}
+
+func (request *RoaRequest) initWithCommonRequest(commonRequest *CommonRequest) {
+ request.baseRequest = commonRequest.baseRequest
+ request.PathParams = commonRequest.PathParams
+ request.product = commonRequest.Product
+ //request.version = commonRequest.Version
+ request.Headers["x-acs-version"] = commonRequest.Version
+ request.actionName = commonRequest.ApiName
+ request.pathPattern = commonRequest.PathPattern
+ request.locationServiceCode = commonRequest.ServiceCode
+ request.locationEndpointType = ""
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go
new file mode 100644
index 000000000..01be6fd04
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go
@@ -0,0 +1,79 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package requests
+
+import (
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils"
+)
+
+type RpcRequest struct {
+ *baseRequest
+}
+
+func (request *RpcRequest) init() {
+ request.baseRequest = defaultBaseRequest()
+ request.Method = POST
+}
+
+func (*RpcRequest) GetStyle() string {
+ return RPC
+}
+
+func (request *RpcRequest) GetBodyReader() io.Reader {
+ if request.FormParams != nil && len(request.FormParams) > 0 {
+ formString := utils.GetUrlFormedMap(request.FormParams)
+ return strings.NewReader(formString)
+ } else {
+ return strings.NewReader("")
+ }
+}
+
+func (request *RpcRequest) BuildQueries() string {
+ request.queries = "/?" + utils.GetUrlFormedMap(request.QueryParams)
+ return request.queries
+}
+
+func (request *RpcRequest) BuildUrl() string {
+ url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain)
+ if len(request.Port) > 0 {
+ url = fmt.Sprintf("%s:%s", url, request.Port)
+ }
+ return url + request.BuildQueries()
+}
+
+func (request *RpcRequest) GetVersion() string {
+ return request.version
+}
+
+func (request *RpcRequest) GetActionName() string {
+ return request.actionName
+}
+
+func (request *RpcRequest) addPathParam(key, value string) {
+ panic("not support")
+}
+
+func (request *RpcRequest) InitWithApiInfo(product, version, action, serviceCode, endpointType string) {
+ request.init()
+ request.product = product
+ request.version = version
+ request.actionName = action
+ request.locationServiceCode = serviceCode
+ request.locationEndpointType = endpointType
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go
new file mode 100644
index 000000000..28af63ea1
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go
@@ -0,0 +1,53 @@
+package requests
+
+import "strconv"
+
+type Integer string
+
+func NewInteger(integer int) Integer {
+ return Integer(strconv.Itoa(integer))
+}
+
+func (integer Integer) HasValue() bool {
+ return integer != ""
+}
+
+func (integer Integer) GetValue() (int, error) {
+ return strconv.Atoi(string(integer))
+}
+
+func NewInteger64(integer int64) Integer {
+ return Integer(strconv.FormatInt(integer, 10))
+}
+
+func (integer Integer) GetValue64() (int64, error) {
+ return strconv.ParseInt(string(integer), 10, 0)
+}
+
+type Boolean string
+
+func NewBoolean(bool bool) Boolean {
+ return Boolean(strconv.FormatBool(bool))
+}
+
+func (boolean Boolean) HasValue() bool {
+ return boolean != ""
+}
+
+func (boolean Boolean) GetValue() (bool, error) {
+ return strconv.ParseBool(string(boolean))
+}
+
+type Float string
+
+func NewFloat(f float64) Float {
+ return Float(strconv.FormatFloat(f, 'f', 6, 64))
+}
+
+func (float Float) HasValue() bool {
+ return float != ""
+}
+
+func (float Float) GetValue() (float64, error) {
+ return strconv.ParseFloat(string(float), 64)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go
new file mode 100644
index 000000000..36604fe80
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go
@@ -0,0 +1,328 @@
+package responses
+
+import (
+ "encoding/json"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+ "unsafe"
+
+ jsoniter "github.com/json-iterator/go"
+)
+
+const maxUint = ^uint(0)
+const maxInt = int(maxUint >> 1)
+const minInt = -maxInt - 1
+
+var jsonParser jsoniter.API
+
+func init() {
+ registerBetterFuzzyDecoder()
+ jsonParser = jsoniter.Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+ CaseSensitive: true,
+ }.Froze()
+}
+
+func registerBetterFuzzyDecoder() {
+ jsoniter.RegisterTypeDecoder("string", &nullableFuzzyStringDecoder{})
+ jsoniter.RegisterTypeDecoder("bool", &fuzzyBoolDecoder{})
+ jsoniter.RegisterTypeDecoder("float32", &nullableFuzzyFloat32Decoder{})
+ jsoniter.RegisterTypeDecoder("float64", &nullableFuzzyFloat64Decoder{})
+ jsoniter.RegisterTypeDecoder("int", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(maxInt) || val < float64(minInt) {
+ iter.ReportError("fuzzy decode int", "exceed range")
+ return
+ }
+ *((*int)(ptr)) = int(val)
+ } else {
+ *((*int)(ptr)) = iter.ReadInt()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("uint", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(maxUint) || val < 0 {
+ iter.ReportError("fuzzy decode uint", "exceed range")
+ return
+ }
+ *((*uint)(ptr)) = uint(val)
+ } else {
+ *((*uint)(ptr)) = iter.ReadUint()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("int8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxInt8) || val < float64(math.MinInt8) {
+ iter.ReportError("fuzzy decode int8", "exceed range")
+ return
+ }
+ *((*int8)(ptr)) = int8(val)
+ } else {
+ *((*int8)(ptr)) = iter.ReadInt8()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("uint8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxUint8) || val < 0 {
+ iter.ReportError("fuzzy decode uint8", "exceed range")
+ return
+ }
+ *((*uint8)(ptr)) = uint8(val)
+ } else {
+ *((*uint8)(ptr)) = iter.ReadUint8()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("int16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxInt16) || val < float64(math.MinInt16) {
+ iter.ReportError("fuzzy decode int16", "exceed range")
+ return
+ }
+ *((*int16)(ptr)) = int16(val)
+ } else {
+ *((*int16)(ptr)) = iter.ReadInt16()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("uint16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxUint16) || val < 0 {
+ iter.ReportError("fuzzy decode uint16", "exceed range")
+ return
+ }
+ *((*uint16)(ptr)) = uint16(val)
+ } else {
+ *((*uint16)(ptr)) = iter.ReadUint16()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("int32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxInt32) || val < float64(math.MinInt32) {
+ iter.ReportError("fuzzy decode int32", "exceed range")
+ return
+ }
+ *((*int32)(ptr)) = int32(val)
+ } else {
+ *((*int32)(ptr)) = iter.ReadInt32()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("uint32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxUint32) || val < 0 {
+ iter.ReportError("fuzzy decode uint32", "exceed range")
+ return
+ }
+ *((*uint32)(ptr)) = uint32(val)
+ } else {
+ *((*uint32)(ptr)) = iter.ReadUint32()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("int64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxInt64) || val < float64(math.MinInt64) {
+ iter.ReportError("fuzzy decode int64", "exceed range")
+ return
+ }
+ *((*int64)(ptr)) = int64(val)
+ } else {
+ *((*int64)(ptr)) = iter.ReadInt64()
+ }
+ }})
+ jsoniter.RegisterTypeDecoder("uint64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ if isFloat {
+ val := iter.ReadFloat64()
+ if val > float64(math.MaxUint64) || val < 0 {
+ iter.ReportError("fuzzy decode uint64", "exceed range")
+ return
+ }
+ *((*uint64)(ptr)) = uint64(val)
+ } else {
+ *((*uint64)(ptr)) = iter.ReadUint64()
+ }
+ }})
+}
+
+type nullableFuzzyStringDecoder struct {
+}
+
+func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ valueType := iter.WhatIsNext()
+ switch valueType {
+ case jsoniter.NumberValue:
+ var number json.Number
+ iter.ReadVal(&number)
+ *((*string)(ptr)) = string(number)
+ case jsoniter.StringValue:
+ *((*string)(ptr)) = iter.ReadString()
+ case jsoniter.BoolValue:
+ *((*string)(ptr)) = strconv.FormatBool(iter.ReadBool())
+ case jsoniter.NilValue:
+ iter.ReadNil()
+ *((*string)(ptr)) = ""
+ default:
+ iter.ReportError("fuzzyStringDecoder", "not number or string or bool")
+ }
+}
+
+type fuzzyBoolDecoder struct {
+}
+
+func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ valueType := iter.WhatIsNext()
+ switch valueType {
+ case jsoniter.BoolValue:
+ *((*bool)(ptr)) = iter.ReadBool()
+ case jsoniter.NumberValue:
+ var number json.Number
+ iter.ReadVal(&number)
+ num, err := number.Int64()
+ if err != nil {
+ iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed")
+ }
+ if num == 0 {
+ *((*bool)(ptr)) = false
+ } else {
+ *((*bool)(ptr)) = true
+ }
+ case jsoniter.StringValue:
+ strValue := strings.ToLower(iter.ReadString())
+ if strValue == "true" {
+ *((*bool)(ptr)) = true
+ } else if strValue == "false" || strValue == "" {
+ *((*bool)(ptr)) = false
+ } else {
+ iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue)
+ }
+ case jsoniter.NilValue:
+ iter.ReadNil()
+ *((*bool)(ptr)) = false
+ default:
+ iter.ReportError("fuzzyBoolDecoder", "not number or string or nil")
+ }
+}
+
+type nullableFuzzyIntegerDecoder struct {
+ fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator)
+}
+
+func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ valueType := iter.WhatIsNext()
+ var str string
+ switch valueType {
+ case jsoniter.NumberValue:
+ var number json.Number
+ iter.ReadVal(&number)
+ str = string(number)
+ case jsoniter.StringValue:
+ str = iter.ReadString()
+ // support empty string
+ if str == "" {
+ str = "0"
+ }
+ case jsoniter.BoolValue:
+ if iter.ReadBool() {
+ str = "1"
+ } else {
+ str = "0"
+ }
+ case jsoniter.NilValue:
+ iter.ReadNil()
+ str = "0"
+ default:
+ iter.ReportError("fuzzyIntegerDecoder", "not number or string")
+ }
+ newIter := iter.Pool().BorrowIterator([]byte(str))
+ defer iter.Pool().ReturnIterator(newIter)
+ isFloat := strings.IndexByte(str, '.') != -1
+ decoder.fun(isFloat, ptr, newIter)
+ if newIter.Error != nil && newIter.Error != io.EOF {
+ iter.Error = newIter.Error
+ }
+}
+
+type nullableFuzzyFloat32Decoder struct {
+}
+
+func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ valueType := iter.WhatIsNext()
+ var str string
+ switch valueType {
+ case jsoniter.NumberValue:
+ *((*float32)(ptr)) = iter.ReadFloat32()
+ case jsoniter.StringValue:
+ str = iter.ReadString()
+ // support empty string
+ if str == "" {
+ *((*float32)(ptr)) = 0
+ return
+ }
+ newIter := iter.Pool().BorrowIterator([]byte(str))
+ defer iter.Pool().ReturnIterator(newIter)
+ *((*float32)(ptr)) = newIter.ReadFloat32()
+ if newIter.Error != nil && newIter.Error != io.EOF {
+ iter.Error = newIter.Error
+ }
+ case jsoniter.BoolValue:
+ // support bool to float32
+ if iter.ReadBool() {
+ *((*float32)(ptr)) = 1
+ } else {
+ *((*float32)(ptr)) = 0
+ }
+ case jsoniter.NilValue:
+ iter.ReadNil()
+ *((*float32)(ptr)) = 0
+ default:
+ iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string")
+ }
+}
+
+type nullableFuzzyFloat64Decoder struct {
+}
+
+func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+ valueType := iter.WhatIsNext()
+ var str string
+ switch valueType {
+ case jsoniter.NumberValue:
+ *((*float64)(ptr)) = iter.ReadFloat64()
+ case jsoniter.StringValue:
+ str = iter.ReadString()
+ // support empty string
+ if str == "" {
+ *((*float64)(ptr)) = 0
+ return
+ }
+ newIter := iter.Pool().BorrowIterator([]byte(str))
+ defer iter.Pool().ReturnIterator(newIter)
+ *((*float64)(ptr)) = newIter.ReadFloat64()
+ if newIter.Error != nil && newIter.Error != io.EOF {
+ iter.Error = newIter.Error
+ }
+ case jsoniter.BoolValue:
+ // support bool to float64
+ if iter.ReadBool() {
+ *((*float64)(ptr)) = 1
+ } else {
+ *((*float64)(ptr)) = 0
+ }
+ case jsoniter.NilValue:
+ // support empty string
+ iter.ReadNil()
+ *((*float64)(ptr)) = 0
+ default:
+ iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string")
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go
new file mode 100644
index 000000000..53a156b71
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go
@@ -0,0 +1,144 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package responses
+
+import (
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors"
+)
+
+type AcsResponse interface {
+ IsSuccess() bool
+ GetHttpStatus() int
+ GetHttpHeaders() map[string][]string
+ GetHttpContentString() string
+ GetHttpContentBytes() []byte
+ GetOriginHttpResponse() *http.Response
+ parseFromHttpResponse(httpResponse *http.Response) error
+}
+
+// Unmarshal object from http response body to target Response
+func Unmarshal(response AcsResponse, httpResponse *http.Response, format string) (err error) {
+ err = response.parseFromHttpResponse(httpResponse)
+ if err != nil {
+ return
+ }
+ if !response.IsSuccess() {
+ err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), "")
+ return
+ }
+
+ if _, isCommonResponse := response.(*CommonResponse); isCommonResponse {
+ // common response need not unmarshal
+ return
+ }
+
+ if len(response.GetHttpContentBytes()) == 0 {
+ return
+ }
+
+ if strings.ToUpper(format) == "JSON" {
+ err = jsonParser.Unmarshal(response.GetHttpContentBytes(), response)
+ if err != nil {
+ err = errors.NewClientError(errors.JsonUnmarshalErrorCode, errors.JsonUnmarshalErrorMessage, err)
+ }
+ } else if strings.ToUpper(format) == "XML" {
+ err = xml.Unmarshal(response.GetHttpContentBytes(), response)
+ }
+ return
+}
+
+type BaseResponse struct {
+ httpStatus int
+ httpHeaders map[string][]string
+ httpContentString string
+ httpContentBytes []byte
+ originHttpResponse *http.Response
+}
+
+func (baseResponse *BaseResponse) GetHttpStatus() int {
+ return baseResponse.httpStatus
+}
+
+func (baseResponse *BaseResponse) GetHttpHeaders() map[string][]string {
+ return baseResponse.httpHeaders
+}
+
+func (baseResponse *BaseResponse) GetHttpContentString() string {
+ return baseResponse.httpContentString
+}
+
+func (baseResponse *BaseResponse) GetHttpContentBytes() []byte {
+ return baseResponse.httpContentBytes
+}
+
+func (baseResponse *BaseResponse) GetOriginHttpResponse() *http.Response {
+ return baseResponse.originHttpResponse
+}
+
+func (baseResponse *BaseResponse) IsSuccess() bool {
+ if baseResponse.GetHttpStatus() >= 200 && baseResponse.GetHttpStatus() < 300 {
+ return true
+ }
+
+ return false
+}
+
+func (baseResponse *BaseResponse) parseFromHttpResponse(httpResponse *http.Response) (err error) {
+ defer httpResponse.Body.Close()
+ body, err := ioutil.ReadAll(httpResponse.Body)
+ if err != nil {
+ return
+ }
+ baseResponse.httpStatus = httpResponse.StatusCode
+ baseResponse.httpHeaders = httpResponse.Header
+ baseResponse.httpContentBytes = body
+ baseResponse.httpContentString = string(body)
+ baseResponse.originHttpResponse = httpResponse
+ return
+}
+
+func (baseResponse *BaseResponse) String() string {
+ resultBuilder := bytes.Buffer{}
+ // statusCode
+ // resultBuilder.WriteString("\n")
+ resultBuilder.WriteString(fmt.Sprintf("%s %s\n", baseResponse.originHttpResponse.Proto, baseResponse.originHttpResponse.Status))
+ // httpHeaders
+ //resultBuilder.WriteString("Headers:\n")
+ for key, value := range baseResponse.httpHeaders {
+ resultBuilder.WriteString(key + ": " + strings.Join(value, ";") + "\n")
+ }
+ resultBuilder.WriteString("\n")
+ // content
+ //resultBuilder.WriteString("Content:\n")
+ resultBuilder.WriteString(baseResponse.httpContentString + "\n")
+ return resultBuilder.String()
+}
+
+type CommonResponse struct {
+ *BaseResponse
+}
+
+func NewCommonResponse() (response *CommonResponse) {
+ return &CommonResponse{
+ BaseResponse: &BaseResponse{},
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go
new file mode 100644
index 000000000..09440d27b
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go
@@ -0,0 +1,36 @@
+package utils
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+type Debug func(format string, v ...interface{})
+
+var hookGetEnv = func() string {
+ return os.Getenv("DEBUG")
+}
+
+var hookPrint = func(input string) {
+ fmt.Println(input)
+}
+
+func Init(flag string) Debug {
+ enable := false
+
+ env := hookGetEnv()
+ parts := strings.Split(env, ",")
+ for _, part := range parts {
+ if part == flag {
+ enable = true
+ break
+ }
+ }
+
+ return func(format string, v ...interface{}) {
+ if enable {
+ hookPrint(fmt.Sprintf(format, v...))
+ }
+ }
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go
new file mode 100644
index 000000000..f8a3ad384
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go
@@ -0,0 +1,141 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package utils
+
+import (
+ "crypto/md5"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/hex"
+ "hash"
+ rand2 "math/rand"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+type UUID [16]byte
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func GetUUID() (uuidHex string) {
+ uuid := NewUUID()
+ uuidHex = hex.EncodeToString(uuid[:])
+ return
+}
+
+func RandStringBytes(n int) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = letterBytes[rand2.Intn(len(letterBytes))]
+ }
+ return string(b)
+}
+
+func GetMD5Base64(bytes []byte) (base64Value string) {
+ md5Ctx := md5.New()
+ md5Ctx.Write(bytes)
+ md5Value := md5Ctx.Sum(nil)
+ base64Value = base64.StdEncoding.EncodeToString(md5Value)
+ return
+}
+
+func GetTimeInFormatISO8601() (timeStr string) {
+ gmt := time.FixedZone("GMT", 0)
+
+ return time.Now().In(gmt).Format("2006-01-02T15:04:05Z")
+}
+
+func GetTimeInFormatRFC2616() (timeStr string) {
+ gmt := time.FixedZone("GMT", 0)
+
+ return time.Now().In(gmt).Format("Mon, 02 Jan 2006 15:04:05 GMT")
+}
+
+func GetUrlFormedMap(source map[string]string) (urlEncoded string) {
+ urlEncoder := url.Values{}
+ for key, value := range source {
+ urlEncoder.Add(key, value)
+ }
+ urlEncoded = urlEncoder.Encode()
+ return
+}
+
+func InitStructWithDefaultTag(bean interface{}) {
+ configType := reflect.TypeOf(bean)
+ for i := 0; i < configType.Elem().NumField(); i++ {
+ field := configType.Elem().Field(i)
+ defaultValue := field.Tag.Get("default")
+ if defaultValue == "" {
+ continue
+ }
+ setter := reflect.ValueOf(bean).Elem().Field(i)
+ switch field.Type.String() {
+ case "int":
+ intValue, _ := strconv.ParseInt(defaultValue, 10, 64)
+ setter.SetInt(intValue)
+ case "time.Duration":
+ intValue, _ := strconv.ParseInt(defaultValue, 10, 64)
+ setter.SetInt(intValue)
+ case "string":
+ setter.SetString(defaultValue)
+ case "bool":
+ boolValue, _ := strconv.ParseBool(defaultValue)
+ setter.SetBool(boolValue)
+ }
+ }
+}
+
+func NewUUID() UUID {
+ ns := UUID{}
+ safeRandom(ns[:])
+ u := newFromHash(md5.New(), ns, RandStringBytes(16))
+ u[6] = (u[6] & 0x0f) | (byte(2) << 4)
+ u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+
+ return u
+}
+
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+ u := UUID{}
+ h.Write(ns[:])
+ h.Write([]byte(name))
+ copy(u[:], h.Sum(nil))
+
+ return u
+}
+
+func safeRandom(dest []byte) {
+ if _, err := rand.Read(dest); err != nil {
+ panic(err)
+ }
+}
+
+func (u UUID) String() string {
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/cancel_repo_build.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/cancel_repo_build.go
new file mode 100644
index 000000000..19d7194a8
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/cancel_repo_build.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CancelRepoBuild invokes the cr.CancelRepoBuild API synchronously
+// api document: https://help.aliyun.com/api/cr/cancelrepobuild.html
+func (client *Client) CancelRepoBuild(request *CancelRepoBuildRequest) (response *CancelRepoBuildResponse, err error) {
+ response = CreateCancelRepoBuildResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CancelRepoBuildWithChan invokes the cr.CancelRepoBuild API asynchronously
+// api document: https://help.aliyun.com/api/cr/cancelrepobuild.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CancelRepoBuildWithChan(request *CancelRepoBuildRequest) (<-chan *CancelRepoBuildResponse, <-chan error) {
+ responseChan := make(chan *CancelRepoBuildResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CancelRepoBuild(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CancelRepoBuildWithCallback invokes the cr.CancelRepoBuild API asynchronously
+// api document: https://help.aliyun.com/api/cr/cancelrepobuild.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CancelRepoBuildWithCallback(request *CancelRepoBuildRequest, callback func(response *CancelRepoBuildResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CancelRepoBuildResponse
+ var err error
+ defer close(result)
+ response, err = client.CancelRepoBuild(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CancelRepoBuildRequest is the request struct for api CancelRepoBuild
+type CancelRepoBuildRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ BuildId string `position:"Path" name:"BuildId"`
+}
+
+// CancelRepoBuildResponse is the response struct for api CancelRepoBuild
+type CancelRepoBuildResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCancelRepoBuildRequest creates a request to invoke CancelRepoBuild API
+func CreateCancelRepoBuildRequest() (request *CancelRepoBuildRequest) {
+ request = &CancelRepoBuildRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CancelRepoBuild", "/repos/[RepoNamespace]/[RepoName]/build/[BuildId]/cancel", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateCancelRepoBuildResponse creates a response to parse from CancelRepoBuild response
+func CreateCancelRepoBuildResponse() (response *CancelRepoBuildResponse) {
+ response = &CancelRepoBuildResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/client.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/client.go
new file mode 100644
index 000000000..59b1729d6
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/client.go
@@ -0,0 +1,81 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth"
+)
+
+// Client is the sdk client struct, each func corresponds to an OpenAPI
+type Client struct {
+ sdk.Client
+}
+
+// NewClient creates a sdk client with environment variables
+func NewClient() (client *Client, err error) {
+ client = &Client{}
+ err = client.Init()
+ return
+}
+
+// NewClientWithOptions creates a sdk client with regionId/sdkConfig/credential
+// this is the common api to create a sdk client
+func NewClientWithOptions(regionId string, config *sdk.Config, credential auth.Credential) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithOptions(regionId, config, credential)
+ return
+}
+
+// NewClientWithAccessKey is a shortcut to create sdk client with accesskey
+// usage: https://help.aliyun.com/document_detail/66217.html
+func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret)
+ return
+}
+
+// NewClientWithStsToken is a shortcut to create sdk client with sts token
+// usage: https://help.aliyun.com/document_detail/66222.html
+func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken)
+ return
+}
+
+// NewClientWithRamRoleArn is a shortcut to create sdk client with ram roleArn
+// usage: https://help.aliyun.com/document_detail/66222.html
+func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName)
+ return
+}
+
+// NewClientWithEcsRamRole is a shortcut to create sdk client with ecs ram role
+// usage: https://help.aliyun.com/document_detail/66223.html
+func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithEcsRamRole(regionId, roleName)
+ return
+}
+
+// NewClientWithRsaKeyPair is a shortcut to create sdk client with rsa key pair
+// attention: rsa key pair auth is only Japan regions available
+func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) {
+ client = &Client{}
+ err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration)
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_collection.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_collection.go
new file mode 100644
index 000000000..72ee07e98
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_collection.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateCollection invokes the cr.CreateCollection API synchronously
+// api document: https://help.aliyun.com/api/cr/createcollection.html
+func (client *Client) CreateCollection(request *CreateCollectionRequest) (response *CreateCollectionResponse, err error) {
+ response = CreateCreateCollectionResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateCollectionWithChan invokes the cr.CreateCollection API asynchronously
+// api document: https://help.aliyun.com/api/cr/createcollection.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateCollectionWithChan(request *CreateCollectionRequest) (<-chan *CreateCollectionResponse, <-chan error) {
+ responseChan := make(chan *CreateCollectionResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateCollection(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateCollectionWithCallback invokes the cr.CreateCollection API asynchronously
+// api document: https://help.aliyun.com/api/cr/createcollection.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateCollectionWithCallback(request *CreateCollectionRequest, callback func(response *CreateCollectionResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateCollectionResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateCollection(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateCollectionRequest is the request struct for api CreateCollection
+type CreateCollectionRequest struct {
+ *requests.RoaRequest
+}
+
+// CreateCollectionResponse is the response struct for api CreateCollection
+type CreateCollectionResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateCollectionRequest creates a request to invoke CreateCollection API
+func CreateCreateCollectionRequest() (request *CreateCollectionRequest) {
+ request = &CreateCollectionRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateCollection", "/collections", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateCollectionResponse creates a response to parse from CreateCollection response
+func CreateCreateCollectionResponse() (response *CreateCollectionResponse) {
+ response = &CreateCollectionResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_namespace.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_namespace.go
new file mode 100644
index 000000000..c0535d965
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_namespace.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateNamespace invokes the cr.CreateNamespace API synchronously
+// api document: https://help.aliyun.com/api/cr/createnamespace.html
+func (client *Client) CreateNamespace(request *CreateNamespaceRequest) (response *CreateNamespaceResponse, err error) {
+ response = CreateCreateNamespaceResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateNamespaceWithChan invokes the cr.CreateNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/createnamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateNamespaceWithChan(request *CreateNamespaceRequest) (<-chan *CreateNamespaceResponse, <-chan error) {
+ responseChan := make(chan *CreateNamespaceResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateNamespace(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateNamespaceWithCallback invokes the cr.CreateNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/createnamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateNamespaceWithCallback(request *CreateNamespaceRequest, callback func(response *CreateNamespaceResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateNamespaceResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateNamespace(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateNamespaceRequest is the request struct for api CreateNamespace
+type CreateNamespaceRequest struct {
+ *requests.RoaRequest
+}
+
+// CreateNamespaceResponse is the response struct for api CreateNamespace
+type CreateNamespaceResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateNamespaceRequest creates a request to invoke CreateNamespace API
+func CreateCreateNamespaceRequest() (request *CreateNamespaceRequest) {
+ request = &CreateNamespaceRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateNamespace", "/namespace", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateNamespaceResponse creates a response to parse from CreateNamespace response
+func CreateCreateNamespaceResponse() (response *CreateNamespaceResponse) {
+ response = &CreateNamespaceResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_namespace_authorization.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_namespace_authorization.go
new file mode 100644
index 000000000..9ca00b7a8
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_namespace_authorization.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateNamespaceAuthorization invokes the cr.CreateNamespaceAuthorization API synchronously
+// api document: https://help.aliyun.com/api/cr/createnamespaceauthorization.html
+func (client *Client) CreateNamespaceAuthorization(request *CreateNamespaceAuthorizationRequest) (response *CreateNamespaceAuthorizationResponse, err error) {
+ response = CreateCreateNamespaceAuthorizationResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateNamespaceAuthorizationWithChan invokes the cr.CreateNamespaceAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/createnamespaceauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateNamespaceAuthorizationWithChan(request *CreateNamespaceAuthorizationRequest) (<-chan *CreateNamespaceAuthorizationResponse, <-chan error) {
+ responseChan := make(chan *CreateNamespaceAuthorizationResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateNamespaceAuthorization(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateNamespaceAuthorizationWithCallback invokes the cr.CreateNamespaceAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/createnamespaceauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateNamespaceAuthorizationWithCallback(request *CreateNamespaceAuthorizationRequest, callback func(response *CreateNamespaceAuthorizationResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateNamespaceAuthorizationResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateNamespaceAuthorization(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateNamespaceAuthorizationRequest is the request struct for api CreateNamespaceAuthorization
+type CreateNamespaceAuthorizationRequest struct {
+ *requests.RoaRequest
+ Namespace string `position:"Path" name:"Namespace"`
+}
+
+// CreateNamespaceAuthorizationResponse is the response struct for api CreateNamespaceAuthorization
+type CreateNamespaceAuthorizationResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateNamespaceAuthorizationRequest creates a request to invoke CreateNamespaceAuthorization API
+func CreateCreateNamespaceAuthorizationRequest() (request *CreateNamespaceAuthorizationRequest) {
+ request = &CreateNamespaceAuthorizationRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateNamespaceAuthorization", "/namespace/[Namespace]/authorizations", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateNamespaceAuthorizationResponse creates a response to parse from CreateNamespaceAuthorization response
+func CreateCreateNamespaceAuthorizationResponse() (response *CreateNamespaceAuthorizationResponse) {
+ response = &CreateNamespaceAuthorizationResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo.go
new file mode 100644
index 000000000..42daebc4b
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateRepo invokes the cr.CreateRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/createrepo.html
+func (client *Client) CreateRepo(request *CreateRepoRequest) (response *CreateRepoResponse, err error) {
+ response = CreateCreateRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateRepoWithChan invokes the cr.CreateRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoWithChan(request *CreateRepoRequest) (<-chan *CreateRepoResponse, <-chan error) {
+ responseChan := make(chan *CreateRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateRepoWithCallback invokes the cr.CreateRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoWithCallback(request *CreateRepoRequest, callback func(response *CreateRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateRepoRequest is the request struct for api CreateRepo
+type CreateRepoRequest struct {
+ *requests.RoaRequest
+}
+
+// CreateRepoResponse is the response struct for api CreateRepo
+type CreateRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateRepoRequest creates a request to invoke CreateRepo API
+func CreateCreateRepoRequest() (request *CreateRepoRequest) {
+ request = &CreateRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateRepo", "/repos", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateRepoResponse creates a response to parse from CreateRepo response
+func CreateCreateRepoResponse() (response *CreateRepoResponse) {
+ response = &CreateRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_authorization.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_authorization.go
new file mode 100644
index 000000000..02ef97fbf
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_authorization.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateRepoAuthorization invokes the cr.CreateRepoAuthorization API synchronously
+// api document: https://help.aliyun.com/api/cr/createrepoauthorization.html
+func (client *Client) CreateRepoAuthorization(request *CreateRepoAuthorizationRequest) (response *CreateRepoAuthorizationResponse, err error) {
+ response = CreateCreateRepoAuthorizationResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateRepoAuthorizationWithChan invokes the cr.CreateRepoAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepoauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoAuthorizationWithChan(request *CreateRepoAuthorizationRequest) (<-chan *CreateRepoAuthorizationResponse, <-chan error) {
+ responseChan := make(chan *CreateRepoAuthorizationResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateRepoAuthorization(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateRepoAuthorizationWithCallback invokes the cr.CreateRepoAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepoauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoAuthorizationWithCallback(request *CreateRepoAuthorizationRequest, callback func(response *CreateRepoAuthorizationResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateRepoAuthorizationResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateRepoAuthorization(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateRepoAuthorizationRequest is the request struct for api CreateRepoAuthorization
+type CreateRepoAuthorizationRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// CreateRepoAuthorizationResponse is the response struct for api CreateRepoAuthorization
+type CreateRepoAuthorizationResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateRepoAuthorizationRequest creates a request to invoke CreateRepoAuthorization API
+func CreateCreateRepoAuthorizationRequest() (request *CreateRepoAuthorizationRequest) {
+ request = &CreateRepoAuthorizationRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateRepoAuthorization", "/repos/[RepoNamespace]/[RepoName]/authorizations", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateRepoAuthorizationResponse creates a response to parse from CreateRepoAuthorization response
+func CreateCreateRepoAuthorizationResponse() (response *CreateRepoAuthorizationResponse) {
+ response = &CreateRepoAuthorizationResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_build_rule.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_build_rule.go
new file mode 100644
index 000000000..413f30bc0
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_build_rule.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateRepoBuildRule invokes the cr.CreateRepoBuildRule API synchronously
+// api document: https://help.aliyun.com/api/cr/createrepobuildrule.html
+func (client *Client) CreateRepoBuildRule(request *CreateRepoBuildRuleRequest) (response *CreateRepoBuildRuleResponse, err error) {
+ response = CreateCreateRepoBuildRuleResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateRepoBuildRuleWithChan invokes the cr.CreateRepoBuildRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepobuildrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoBuildRuleWithChan(request *CreateRepoBuildRuleRequest) (<-chan *CreateRepoBuildRuleResponse, <-chan error) {
+ responseChan := make(chan *CreateRepoBuildRuleResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateRepoBuildRule(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateRepoBuildRuleWithCallback invokes the cr.CreateRepoBuildRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepobuildrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoBuildRuleWithCallback(request *CreateRepoBuildRuleRequest, callback func(response *CreateRepoBuildRuleResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateRepoBuildRuleResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateRepoBuildRule(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateRepoBuildRuleRequest is the request struct for api CreateRepoBuildRule
+type CreateRepoBuildRuleRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// CreateRepoBuildRuleResponse is the response struct for api CreateRepoBuildRule
+type CreateRepoBuildRuleResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateRepoBuildRuleRequest creates a request to invoke CreateRepoBuildRule API
+func CreateCreateRepoBuildRuleRequest() (request *CreateRepoBuildRuleRequest) {
+ request = &CreateRepoBuildRuleRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateRepoBuildRule", "/repos/[RepoNamespace]/[RepoName]/rules", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateRepoBuildRuleResponse creates a response to parse from CreateRepoBuildRule response
+func CreateCreateRepoBuildRuleResponse() (response *CreateRepoBuildRuleResponse) {
+ response = &CreateRepoBuildRuleResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_sync_task.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_sync_task.go
new file mode 100644
index 000000000..abba902cd
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_sync_task.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateRepoSyncTask invokes the cr.CreateRepoSyncTask API synchronously
+// api document: https://help.aliyun.com/api/cr/createreposynctask.html
+func (client *Client) CreateRepoSyncTask(request *CreateRepoSyncTaskRequest) (response *CreateRepoSyncTaskResponse, err error) {
+ response = CreateCreateRepoSyncTaskResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateRepoSyncTaskWithChan invokes the cr.CreateRepoSyncTask API asynchronously
+// api document: https://help.aliyun.com/api/cr/createreposynctask.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoSyncTaskWithChan(request *CreateRepoSyncTaskRequest) (<-chan *CreateRepoSyncTaskResponse, <-chan error) {
+ responseChan := make(chan *CreateRepoSyncTaskResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateRepoSyncTask(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateRepoSyncTaskWithCallback invokes the cr.CreateRepoSyncTask API asynchronously
+// api document: https://help.aliyun.com/api/cr/createreposynctask.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoSyncTaskWithCallback(request *CreateRepoSyncTaskRequest, callback func(response *CreateRepoSyncTaskResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateRepoSyncTaskResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateRepoSyncTask(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateRepoSyncTaskRequest is the request struct for api CreateRepoSyncTask
+type CreateRepoSyncTaskRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// CreateRepoSyncTaskResponse is the response struct for api CreateRepoSyncTask
+type CreateRepoSyncTaskResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateRepoSyncTaskRequest creates a request to invoke CreateRepoSyncTask API
+func CreateCreateRepoSyncTaskRequest() (request *CreateRepoSyncTaskRequest) {
+ request = &CreateRepoSyncTaskRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateRepoSyncTask", "/repos/[RepoNamespace]/[RepoName]/syncTasks", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateRepoSyncTaskResponse creates a response to parse from CreateRepoSyncTask response
+func CreateCreateRepoSyncTaskResponse() (response *CreateRepoSyncTaskResponse) {
+ response = &CreateRepoSyncTaskResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_webhook.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_webhook.go
new file mode 100644
index 000000000..2badc54e0
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_repo_webhook.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateRepoWebhook invokes the cr.CreateRepoWebhook API synchronously
+// api document: https://help.aliyun.com/api/cr/createrepowebhook.html
+func (client *Client) CreateRepoWebhook(request *CreateRepoWebhookRequest) (response *CreateRepoWebhookResponse, err error) {
+ response = CreateCreateRepoWebhookResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateRepoWebhookWithChan invokes the cr.CreateRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoWebhookWithChan(request *CreateRepoWebhookRequest) (<-chan *CreateRepoWebhookResponse, <-chan error) {
+ responseChan := make(chan *CreateRepoWebhookResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateRepoWebhook(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateRepoWebhookWithCallback invokes the cr.CreateRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/createrepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateRepoWebhookWithCallback(request *CreateRepoWebhookRequest, callback func(response *CreateRepoWebhookResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateRepoWebhookResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateRepoWebhook(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateRepoWebhookRequest is the request struct for api CreateRepoWebhook
+type CreateRepoWebhookRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// CreateRepoWebhookResponse is the response struct for api CreateRepoWebhook
+type CreateRepoWebhookResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateRepoWebhookRequest creates a request to invoke CreateRepoWebhook API
+func CreateCreateRepoWebhookRequest() (request *CreateRepoWebhookRequest) {
+ request = &CreateRepoWebhookRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateRepoWebhook", "/repos/[RepoNamespace]/[RepoName]/webhooks", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateRepoWebhookResponse creates a response to parse from CreateRepoWebhook response
+func CreateCreateRepoWebhookResponse() (response *CreateRepoWebhookResponse) {
+ response = &CreateRepoWebhookResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_user_info.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_user_info.go
new file mode 100644
index 000000000..9890656e9
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_user_info.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateUserInfo invokes the cr.CreateUserInfo API synchronously
+// api document: https://help.aliyun.com/api/cr/createuserinfo.html
+func (client *Client) CreateUserInfo(request *CreateUserInfoRequest) (response *CreateUserInfoResponse, err error) {
+ response = CreateCreateUserInfoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateUserInfoWithChan invokes the cr.CreateUserInfo API asynchronously
+// api document: https://help.aliyun.com/api/cr/createuserinfo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateUserInfoWithChan(request *CreateUserInfoRequest) (<-chan *CreateUserInfoResponse, <-chan error) {
+ responseChan := make(chan *CreateUserInfoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateUserInfo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateUserInfoWithCallback invokes the cr.CreateUserInfo API asynchronously
+// api document: https://help.aliyun.com/api/cr/createuserinfo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateUserInfoWithCallback(request *CreateUserInfoRequest, callback func(response *CreateUserInfoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateUserInfoResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateUserInfo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateUserInfoRequest is the request struct for api CreateUserInfo
+type CreateUserInfoRequest struct {
+ *requests.RoaRequest
+}
+
+// CreateUserInfoResponse is the response struct for api CreateUserInfo
+type CreateUserInfoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateUserInfoRequest creates a request to invoke CreateUserInfo API
+func CreateCreateUserInfoRequest() (request *CreateUserInfoRequest) {
+ request = &CreateUserInfoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateUserInfo", "/users", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateUserInfoResponse creates a response to parse from CreateUserInfo response
+func CreateCreateUserInfoResponse() (response *CreateUserInfoResponse) {
+ response = &CreateUserInfoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_user_source_account.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_user_source_account.go
new file mode 100644
index 000000000..21e91e5da
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/create_user_source_account.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// CreateUserSourceAccount invokes the cr.CreateUserSourceAccount API synchronously
+// api document: https://help.aliyun.com/api/cr/createusersourceaccount.html
+func (client *Client) CreateUserSourceAccount(request *CreateUserSourceAccountRequest) (response *CreateUserSourceAccountResponse, err error) {
+ response = CreateCreateUserSourceAccountResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// CreateUserSourceAccountWithChan invokes the cr.CreateUserSourceAccount API asynchronously
+// api document: https://help.aliyun.com/api/cr/createusersourceaccount.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateUserSourceAccountWithChan(request *CreateUserSourceAccountRequest) (<-chan *CreateUserSourceAccountResponse, <-chan error) {
+ responseChan := make(chan *CreateUserSourceAccountResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.CreateUserSourceAccount(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// CreateUserSourceAccountWithCallback invokes the cr.CreateUserSourceAccount API asynchronously
+// api document: https://help.aliyun.com/api/cr/createusersourceaccount.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) CreateUserSourceAccountWithCallback(request *CreateUserSourceAccountRequest, callback func(response *CreateUserSourceAccountResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *CreateUserSourceAccountResponse
+ var err error
+ defer close(result)
+ response, err = client.CreateUserSourceAccount(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// CreateUserSourceAccountRequest is the request struct for api CreateUserSourceAccount
+type CreateUserSourceAccountRequest struct {
+ *requests.RoaRequest
+}
+
+// CreateUserSourceAccountResponse is the response struct for api CreateUserSourceAccount
+type CreateUserSourceAccountResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateCreateUserSourceAccountRequest creates a request to invoke CreateUserSourceAccount API
+func CreateCreateUserSourceAccountRequest() (request *CreateUserSourceAccountRequest) {
+ request = &CreateUserSourceAccountRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "CreateUserSourceAccount", "/users/sourceAccount", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateCreateUserSourceAccountResponse creates a response to parse from CreateUserSourceAccount response
+func CreateCreateUserSourceAccountResponse() (response *CreateUserSourceAccountResponse) {
+ response = &CreateUserSourceAccountResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_collection.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_collection.go
new file mode 100644
index 000000000..9410c89e6
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_collection.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteCollection invokes the cr.DeleteCollection API synchronously
+// api document: https://help.aliyun.com/api/cr/deletecollection.html
+func (client *Client) DeleteCollection(request *DeleteCollectionRequest) (response *DeleteCollectionResponse, err error) {
+ response = CreateDeleteCollectionResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteCollectionWithChan invokes the cr.DeleteCollection API asynchronously
+// api document: https://help.aliyun.com/api/cr/deletecollection.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteCollectionWithChan(request *DeleteCollectionRequest) (<-chan *DeleteCollectionResponse, <-chan error) {
+ responseChan := make(chan *DeleteCollectionResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteCollection(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteCollectionWithCallback invokes the cr.DeleteCollection API asynchronously
+// api document: https://help.aliyun.com/api/cr/deletecollection.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteCollectionWithCallback(request *DeleteCollectionRequest, callback func(response *DeleteCollectionResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteCollectionResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteCollection(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteCollectionRequest is the request struct for api DeleteCollection
+type DeleteCollectionRequest struct {
+ *requests.RoaRequest
+ CollectionId requests.Integer `position:"Path" name:"CollectionId"`
+}
+
+// DeleteCollectionResponse is the response struct for api DeleteCollection
+type DeleteCollectionResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteCollectionRequest creates a request to invoke DeleteCollection API
+func CreateDeleteCollectionRequest() (request *DeleteCollectionRequest) {
+ request = &DeleteCollectionRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteCollection", "/collections/[CollectionId]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteCollectionResponse creates a response to parse from DeleteCollection response
+func CreateDeleteCollectionResponse() (response *DeleteCollectionResponse) {
+ response = &DeleteCollectionResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_image.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_image.go
new file mode 100644
index 000000000..adae8fc40
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_image.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteImage invokes the cr.DeleteImage API synchronously
+// api document: https://help.aliyun.com/api/cr/deleteimage.html
+func (client *Client) DeleteImage(request *DeleteImageRequest) (response *DeleteImageResponse, err error) {
+ response = CreateDeleteImageResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteImageWithChan invokes the cr.DeleteImage API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleteimage.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteImageWithChan(request *DeleteImageRequest) (<-chan *DeleteImageResponse, <-chan error) {
+ responseChan := make(chan *DeleteImageResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteImage(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteImageWithCallback invokes the cr.DeleteImage API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleteimage.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteImageWithCallback(request *DeleteImageRequest, callback func(response *DeleteImageResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteImageResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteImage(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteImageRequest is the request struct for api DeleteImage
+type DeleteImageRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ Tag string `position:"Path" name:"Tag"`
+}
+
+// DeleteImageResponse is the response struct for api DeleteImage
+type DeleteImageResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteImageRequest creates a request to invoke DeleteImage API
+func CreateDeleteImageRequest() (request *DeleteImageRequest) {
+ request = &DeleteImageRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteImage", "/repos/[RepoNamespace]/[RepoName]/tags/[Tag]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteImageResponse creates a response to parse from DeleteImage response
+func CreateDeleteImageResponse() (response *DeleteImageResponse) {
+ response = &DeleteImageResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_namespace.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_namespace.go
new file mode 100644
index 000000000..2db9cf4ce
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_namespace.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteNamespace invokes the cr.DeleteNamespace API synchronously
+// api document: https://help.aliyun.com/api/cr/deletenamespace.html
+func (client *Client) DeleteNamespace(request *DeleteNamespaceRequest) (response *DeleteNamespaceResponse, err error) {
+ response = CreateDeleteNamespaceResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteNamespaceWithChan invokes the cr.DeleteNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/deletenamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteNamespaceWithChan(request *DeleteNamespaceRequest) (<-chan *DeleteNamespaceResponse, <-chan error) {
+ responseChan := make(chan *DeleteNamespaceResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteNamespace(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteNamespaceWithCallback invokes the cr.DeleteNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/deletenamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteNamespaceWithCallback(request *DeleteNamespaceRequest, callback func(response *DeleteNamespaceResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteNamespaceResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteNamespace(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteNamespaceRequest is the request struct for api DeleteNamespace
+type DeleteNamespaceRequest struct {
+ *requests.RoaRequest
+ Namespace string `position:"Path" name:"Namespace"`
+}
+
+// DeleteNamespaceResponse is the response struct for api DeleteNamespace
+type DeleteNamespaceResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteNamespaceRequest creates a request to invoke DeleteNamespace API
+func CreateDeleteNamespaceRequest() (request *DeleteNamespaceRequest) {
+ request = &DeleteNamespaceRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteNamespace", "/namespace/[Namespace]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteNamespaceResponse creates a response to parse from DeleteNamespace response
+func CreateDeleteNamespaceResponse() (response *DeleteNamespaceResponse) {
+ response = &DeleteNamespaceResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_namespace_authorization.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_namespace_authorization.go
new file mode 100644
index 000000000..701138d6f
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_namespace_authorization.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteNamespaceAuthorization invokes the cr.DeleteNamespaceAuthorization API synchronously
+// api document: https://help.aliyun.com/api/cr/deletenamespaceauthorization.html
+func (client *Client) DeleteNamespaceAuthorization(request *DeleteNamespaceAuthorizationRequest) (response *DeleteNamespaceAuthorizationResponse, err error) {
+ response = CreateDeleteNamespaceAuthorizationResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteNamespaceAuthorizationWithChan invokes the cr.DeleteNamespaceAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/deletenamespaceauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteNamespaceAuthorizationWithChan(request *DeleteNamespaceAuthorizationRequest) (<-chan *DeleteNamespaceAuthorizationResponse, <-chan error) {
+ responseChan := make(chan *DeleteNamespaceAuthorizationResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteNamespaceAuthorization(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteNamespaceAuthorizationWithCallback invokes the cr.DeleteNamespaceAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/deletenamespaceauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteNamespaceAuthorizationWithCallback(request *DeleteNamespaceAuthorizationRequest, callback func(response *DeleteNamespaceAuthorizationResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteNamespaceAuthorizationResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteNamespaceAuthorization(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteNamespaceAuthorizationRequest is the request struct for api DeleteNamespaceAuthorization
+type DeleteNamespaceAuthorizationRequest struct {
+ *requests.RoaRequest
+ AuthorizeId requests.Integer `position:"Path" name:"AuthorizeId"`
+ Namespace string `position:"Path" name:"Namespace"`
+}
+
+// DeleteNamespaceAuthorizationResponse is the response struct for api DeleteNamespaceAuthorization
+type DeleteNamespaceAuthorizationResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteNamespaceAuthorizationRequest creates a request to invoke DeleteNamespaceAuthorization API
+func CreateDeleteNamespaceAuthorizationRequest() (request *DeleteNamespaceAuthorizationRequest) {
+ request = &DeleteNamespaceAuthorizationRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteNamespaceAuthorization", "/namespace/[Namespace]/authorizations/[AuthorizeId]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteNamespaceAuthorizationResponse creates a response to parse from DeleteNamespaceAuthorization response
+func CreateDeleteNamespaceAuthorizationResponse() (response *DeleteNamespaceAuthorizationResponse) {
+ response = &DeleteNamespaceAuthorizationResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo.go
new file mode 100644
index 000000000..f42dd4c9a
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteRepo invokes the cr.DeleteRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/deleterepo.html
+func (client *Client) DeleteRepo(request *DeleteRepoRequest) (response *DeleteRepoResponse, err error) {
+ response = CreateDeleteRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteRepoWithChan invokes the cr.DeleteRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoWithChan(request *DeleteRepoRequest) (<-chan *DeleteRepoResponse, <-chan error) {
+ responseChan := make(chan *DeleteRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteRepoWithCallback invokes the cr.DeleteRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoWithCallback(request *DeleteRepoRequest, callback func(response *DeleteRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteRepoRequest is the request struct for api DeleteRepo
+type DeleteRepoRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// DeleteRepoResponse is the response struct for api DeleteRepo
+type DeleteRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteRepoRequest creates a request to invoke DeleteRepo API
+func CreateDeleteRepoRequest() (request *DeleteRepoRequest) {
+ request = &DeleteRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteRepo", "/repos/[RepoNamespace]/[RepoName]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteRepoResponse creates a response to parse from DeleteRepo response
+func CreateDeleteRepoResponse() (response *DeleteRepoResponse) {
+ response = &DeleteRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_authorization.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_authorization.go
new file mode 100644
index 000000000..4d5b5d46d
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_authorization.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteRepoAuthorization invokes the cr.DeleteRepoAuthorization API synchronously
+// api document: https://help.aliyun.com/api/cr/deleterepoauthorization.html
+func (client *Client) DeleteRepoAuthorization(request *DeleteRepoAuthorizationRequest) (response *DeleteRepoAuthorizationResponse, err error) {
+ response = CreateDeleteRepoAuthorizationResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteRepoAuthorizationWithChan invokes the cr.DeleteRepoAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepoauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoAuthorizationWithChan(request *DeleteRepoAuthorizationRequest) (<-chan *DeleteRepoAuthorizationResponse, <-chan error) {
+ responseChan := make(chan *DeleteRepoAuthorizationResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteRepoAuthorization(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteRepoAuthorizationWithCallback invokes the cr.DeleteRepoAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepoauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoAuthorizationWithCallback(request *DeleteRepoAuthorizationRequest, callback func(response *DeleteRepoAuthorizationResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteRepoAuthorizationResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteRepoAuthorization(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteRepoAuthorizationRequest is the request struct for api DeleteRepoAuthorization
+type DeleteRepoAuthorizationRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ AuthorizeId requests.Integer `position:"Path" name:"AuthorizeId"`
+}
+
+// DeleteRepoAuthorizationResponse is the response struct for api DeleteRepoAuthorization
+type DeleteRepoAuthorizationResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteRepoAuthorizationRequest creates a request to invoke DeleteRepoAuthorization API
+func CreateDeleteRepoAuthorizationRequest() (request *DeleteRepoAuthorizationRequest) {
+ request = &DeleteRepoAuthorizationRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteRepoAuthorization", "/repos/[RepoNamespace]/[RepoName]/authorizations/[AuthorizeId]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteRepoAuthorizationResponse creates a response to parse from DeleteRepoAuthorization response
+func CreateDeleteRepoAuthorizationResponse() (response *DeleteRepoAuthorizationResponse) {
+ response = &DeleteRepoAuthorizationResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_build_rule.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_build_rule.go
new file mode 100644
index 000000000..ffce0c5eb
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_build_rule.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteRepoBuildRule invokes the cr.DeleteRepoBuildRule API synchronously
+// api document: https://help.aliyun.com/api/cr/deleterepobuildrule.html
+func (client *Client) DeleteRepoBuildRule(request *DeleteRepoBuildRuleRequest) (response *DeleteRepoBuildRuleResponse, err error) {
+ response = CreateDeleteRepoBuildRuleResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteRepoBuildRuleWithChan invokes the cr.DeleteRepoBuildRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepobuildrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoBuildRuleWithChan(request *DeleteRepoBuildRuleRequest) (<-chan *DeleteRepoBuildRuleResponse, <-chan error) {
+ responseChan := make(chan *DeleteRepoBuildRuleResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteRepoBuildRule(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteRepoBuildRuleWithCallback invokes the cr.DeleteRepoBuildRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepobuildrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoBuildRuleWithCallback(request *DeleteRepoBuildRuleRequest, callback func(response *DeleteRepoBuildRuleResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteRepoBuildRuleResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteRepoBuildRule(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteRepoBuildRuleRequest is the request struct for api DeleteRepoBuildRule
+type DeleteRepoBuildRuleRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ BuildRuleId requests.Integer `position:"Path" name:"BuildRuleId"`
+}
+
+// DeleteRepoBuildRuleResponse is the response struct for api DeleteRepoBuildRule
+type DeleteRepoBuildRuleResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteRepoBuildRuleRequest creates a request to invoke DeleteRepoBuildRule API
+func CreateDeleteRepoBuildRuleRequest() (request *DeleteRepoBuildRuleRequest) {
+ request = &DeleteRepoBuildRuleRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteRepoBuildRule", "/repos/[RepoNamespace]/[RepoName]/rules/[BuildRuleId]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteRepoBuildRuleResponse creates a response to parse from DeleteRepoBuildRule response
+func CreateDeleteRepoBuildRuleResponse() (response *DeleteRepoBuildRuleResponse) {
+ response = &DeleteRepoBuildRuleResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_webhook.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_webhook.go
new file mode 100644
index 000000000..dd60388e5
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_repo_webhook.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteRepoWebhook invokes the cr.DeleteRepoWebhook API synchronously
+// api document: https://help.aliyun.com/api/cr/deleterepowebhook.html
+func (client *Client) DeleteRepoWebhook(request *DeleteRepoWebhookRequest) (response *DeleteRepoWebhookResponse, err error) {
+ response = CreateDeleteRepoWebhookResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteRepoWebhookWithChan invokes the cr.DeleteRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoWebhookWithChan(request *DeleteRepoWebhookRequest) (<-chan *DeleteRepoWebhookResponse, <-chan error) {
+ responseChan := make(chan *DeleteRepoWebhookResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteRepoWebhook(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteRepoWebhookWithCallback invokes the cr.DeleteRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleterepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteRepoWebhookWithCallback(request *DeleteRepoWebhookRequest, callback func(response *DeleteRepoWebhookResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteRepoWebhookResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteRepoWebhook(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteRepoWebhookRequest is the request struct for api DeleteRepoWebhook
+type DeleteRepoWebhookRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ WebhookId requests.Integer `position:"Path" name:"WebhookId"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// DeleteRepoWebhookResponse is the response struct for api DeleteRepoWebhook
+type DeleteRepoWebhookResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteRepoWebhookRequest creates a request to invoke DeleteRepoWebhook API
+func CreateDeleteRepoWebhookRequest() (request *DeleteRepoWebhookRequest) {
+ request = &DeleteRepoWebhookRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteRepoWebhook", "/repos/[RepoNamespace]/[RepoName]/webhooks/[WebhookId]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteRepoWebhookResponse creates a response to parse from DeleteRepoWebhook response
+func CreateDeleteRepoWebhookResponse() (response *DeleteRepoWebhookResponse) {
+ response = &DeleteRepoWebhookResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_user_source_account.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_user_source_account.go
new file mode 100644
index 000000000..1ad7645da
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/delete_user_source_account.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// DeleteUserSourceAccount invokes the cr.DeleteUserSourceAccount API synchronously
+// api document: https://help.aliyun.com/api/cr/deleteusersourceaccount.html
+func (client *Client) DeleteUserSourceAccount(request *DeleteUserSourceAccountRequest) (response *DeleteUserSourceAccountResponse, err error) {
+ response = CreateDeleteUserSourceAccountResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// DeleteUserSourceAccountWithChan invokes the cr.DeleteUserSourceAccount API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleteusersourceaccount.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteUserSourceAccountWithChan(request *DeleteUserSourceAccountRequest) (<-chan *DeleteUserSourceAccountResponse, <-chan error) {
+ responseChan := make(chan *DeleteUserSourceAccountResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.DeleteUserSourceAccount(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// DeleteUserSourceAccountWithCallback invokes the cr.DeleteUserSourceAccount API asynchronously
+// api document: https://help.aliyun.com/api/cr/deleteusersourceaccount.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) DeleteUserSourceAccountWithCallback(request *DeleteUserSourceAccountRequest, callback func(response *DeleteUserSourceAccountResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *DeleteUserSourceAccountResponse
+ var err error
+ defer close(result)
+ response, err = client.DeleteUserSourceAccount(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// DeleteUserSourceAccountRequest is the request struct for api DeleteUserSourceAccount
+type DeleteUserSourceAccountRequest struct {
+ *requests.RoaRequest
+ SourceAccountId requests.Integer `position:"Path" name:"SourceAccountId"`
+}
+
+// DeleteUserSourceAccountResponse is the response struct for api DeleteUserSourceAccount
+type DeleteUserSourceAccountResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateDeleteUserSourceAccountRequest creates a request to invoke DeleteUserSourceAccount API
+func CreateDeleteUserSourceAccountRequest() (request *DeleteUserSourceAccountRequest) {
+ request = &DeleteUserSourceAccountRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "DeleteUserSourceAccount", "/users/sourceAccount/[SourceAccountId]", "cr", "openAPI")
+ request.Method = requests.DELETE
+ return
+}
+
+// CreateDeleteUserSourceAccountResponse creates a response to parse from DeleteUserSourceAccount response
+func CreateDeleteUserSourceAccountResponse() (response *DeleteUserSourceAccountResponse) {
+ response = &DeleteUserSourceAccountResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_authorization_token.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_authorization_token.go
new file mode 100644
index 000000000..221760094
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_authorization_token.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetAuthorizationToken invokes the cr.GetAuthorizationToken API synchronously
+// api document: https://help.aliyun.com/api/cr/getauthorizationtoken.html
+func (client *Client) GetAuthorizationToken(request *GetAuthorizationTokenRequest) (response *GetAuthorizationTokenResponse, err error) {
+ response = CreateGetAuthorizationTokenResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetAuthorizationTokenWithChan invokes the cr.GetAuthorizationToken API asynchronously
+// api document: https://help.aliyun.com/api/cr/getauthorizationtoken.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetAuthorizationTokenWithChan(request *GetAuthorizationTokenRequest) (<-chan *GetAuthorizationTokenResponse, <-chan error) {
+ responseChan := make(chan *GetAuthorizationTokenResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetAuthorizationToken(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetAuthorizationTokenWithCallback invokes the cr.GetAuthorizationToken API asynchronously
+// api document: https://help.aliyun.com/api/cr/getauthorizationtoken.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetAuthorizationTokenWithCallback(request *GetAuthorizationTokenRequest, callback func(response *GetAuthorizationTokenResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetAuthorizationTokenResponse
+ var err error
+ defer close(result)
+ response, err = client.GetAuthorizationToken(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetAuthorizationTokenRequest is the request struct for api GetAuthorizationToken
+type GetAuthorizationTokenRequest struct {
+ *requests.RoaRequest
+}
+
+// GetAuthorizationTokenResponse is the response struct for api GetAuthorizationToken
+type GetAuthorizationTokenResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetAuthorizationTokenRequest creates a request to invoke GetAuthorizationToken API
+func CreateGetAuthorizationTokenRequest() (request *GetAuthorizationTokenRequest) {
+ request = &GetAuthorizationTokenRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetAuthorizationToken", "/tokens", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetAuthorizationTokenResponse creates a response to parse from GetAuthorizationToken response
+func CreateGetAuthorizationTokenResponse() (response *GetAuthorizationTokenResponse) {
+ response = &GetAuthorizationTokenResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_collection.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_collection.go
new file mode 100644
index 000000000..a7c09f727
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_collection.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetCollection invokes the cr.GetCollection API synchronously
+// api document: https://help.aliyun.com/api/cr/getcollection.html
+func (client *Client) GetCollection(request *GetCollectionRequest) (response *GetCollectionResponse, err error) {
+ response = CreateGetCollectionResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetCollectionWithChan invokes the cr.GetCollection API asynchronously
+// api document: https://help.aliyun.com/api/cr/getcollection.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetCollectionWithChan(request *GetCollectionRequest) (<-chan *GetCollectionResponse, <-chan error) {
+ responseChan := make(chan *GetCollectionResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetCollection(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetCollectionWithCallback invokes the cr.GetCollection API asynchronously
+// api document: https://help.aliyun.com/api/cr/getcollection.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetCollectionWithCallback(request *GetCollectionRequest, callback func(response *GetCollectionResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetCollectionResponse
+ var err error
+ defer close(result)
+ response, err = client.GetCollection(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetCollectionRequest is the request struct for api GetCollection
+type GetCollectionRequest struct {
+ *requests.RoaRequest
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+}
+
+// GetCollectionResponse is the response struct for api GetCollection
+type GetCollectionResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetCollectionRequest creates a request to invoke GetCollection API
+func CreateGetCollectionRequest() (request *GetCollectionRequest) {
+ request = &GetCollectionRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetCollection", "/collections", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetCollectionResponse creates a response to parse from GetCollection response
+func CreateGetCollectionResponse() (response *GetCollectionResponse) {
+ response = &GetCollectionResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_layer.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_layer.go
new file mode 100644
index 000000000..41fa6dd83
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_layer.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetImageLayer invokes the cr.GetImageLayer API synchronously
+// api document: https://help.aliyun.com/api/cr/getimagelayer.html
+func (client *Client) GetImageLayer(request *GetImageLayerRequest) (response *GetImageLayerResponse, err error) {
+ response = CreateGetImageLayerResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetImageLayerWithChan invokes the cr.GetImageLayer API asynchronously
+// api document: https://help.aliyun.com/api/cr/getimagelayer.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetImageLayerWithChan(request *GetImageLayerRequest) (<-chan *GetImageLayerResponse, <-chan error) {
+ responseChan := make(chan *GetImageLayerResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetImageLayer(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetImageLayerWithCallback invokes the cr.GetImageLayer API asynchronously
+// api document: https://help.aliyun.com/api/cr/getimagelayer.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetImageLayerWithCallback(request *GetImageLayerRequest, callback func(response *GetImageLayerResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetImageLayerResponse
+ var err error
+ defer close(result)
+ response, err = client.GetImageLayer(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetImageLayerRequest is the request struct for api GetImageLayer
+type GetImageLayerRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ Tag string `position:"Path" name:"Tag"`
+}
+
+// GetImageLayerResponse is the response struct for api GetImageLayer
+type GetImageLayerResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetImageLayerRequest creates a request to invoke GetImageLayer API
+func CreateGetImageLayerRequest() (request *GetImageLayerRequest) {
+ request = &GetImageLayerRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetImageLayer", "/repos/[RepoNamespace]/[RepoName]/tags/[Tag]/layers", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetImageLayerResponse creates a response to parse from GetImageLayer response
+func CreateGetImageLayerResponse() (response *GetImageLayerResponse) {
+ response = &GetImageLayerResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_manifest.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_manifest.go
new file mode 100644
index 000000000..420a63815
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_manifest.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetImageManifest invokes the cr.GetImageManifest API synchronously
+// api document: https://help.aliyun.com/api/cr/getimagemanifest.html
+func (client *Client) GetImageManifest(request *GetImageManifestRequest) (response *GetImageManifestResponse, err error) {
+ response = CreateGetImageManifestResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetImageManifestWithChan invokes the cr.GetImageManifest API asynchronously
+// api document: https://help.aliyun.com/api/cr/getimagemanifest.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetImageManifestWithChan(request *GetImageManifestRequest) (<-chan *GetImageManifestResponse, <-chan error) {
+ responseChan := make(chan *GetImageManifestResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetImageManifest(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetImageManifestWithCallback invokes the cr.GetImageManifest API asynchronously
+// api document: https://help.aliyun.com/api/cr/getimagemanifest.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetImageManifestWithCallback(request *GetImageManifestRequest, callback func(response *GetImageManifestResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetImageManifestResponse
+ var err error
+ defer close(result)
+ response, err = client.GetImageManifest(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetImageManifestRequest is the request struct for api GetImageManifest
+type GetImageManifestRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ Tag string `position:"Path" name:"Tag"`
+ SchemaVersion requests.Integer `position:"Query" name:"SchemaVersion"`
+}
+
+// GetImageManifestResponse is the response struct for api GetImageManifest
+type GetImageManifestResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetImageManifestRequest creates a request to invoke GetImageManifest API
+func CreateGetImageManifestRequest() (request *GetImageManifestRequest) {
+ request = &GetImageManifestRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetImageManifest", "/repos/[RepoNamespace]/[RepoName]/tags/[Tag]/manifest", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetImageManifestResponse creates a response to parse from GetImageManifest response
+func CreateGetImageManifestResponse() (response *GetImageManifestResponse) {
+ response = &GetImageManifestResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_scan.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_scan.go
new file mode 100644
index 000000000..94767523b
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_image_scan.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetImageScan invokes the cr.GetImageScan API synchronously
+// api document: https://help.aliyun.com/api/cr/getimagescan.html
+func (client *Client) GetImageScan(request *GetImageScanRequest) (response *GetImageScanResponse, err error) {
+ response = CreateGetImageScanResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetImageScanWithChan invokes the cr.GetImageScan API asynchronously
+// api document: https://help.aliyun.com/api/cr/getimagescan.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetImageScanWithChan(request *GetImageScanRequest) (<-chan *GetImageScanResponse, <-chan error) {
+ responseChan := make(chan *GetImageScanResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetImageScan(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetImageScanWithCallback invokes the cr.GetImageScan API asynchronously
+// api document: https://help.aliyun.com/api/cr/getimagescan.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetImageScanWithCallback(request *GetImageScanRequest, callback func(response *GetImageScanResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetImageScanResponse
+ var err error
+ defer close(result)
+ response, err = client.GetImageScan(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetImageScanRequest is the request struct for api GetImageScan
+type GetImageScanRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ Tag string `position:"Path" name:"Tag"`
+}
+
+// GetImageScanResponse is the response struct for api GetImageScan
+type GetImageScanResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetImageScanRequest creates a request to invoke GetImageScan API
+func CreateGetImageScanRequest() (request *GetImageScanRequest) {
+ request = &GetImageScanRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetImageScan", "/repos/[RepoNamespace]/[RepoName]/tags/[Tag]/scan", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetImageScanResponse creates a response to parse from GetImageScan response
+func CreateGetImageScanResponse() (response *GetImageScanResponse) {
+ response = &GetImageScanResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_mirror_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_mirror_list.go
new file mode 100644
index 000000000..d3af3c94a
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_mirror_list.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetMirrorList invokes the cr.GetMirrorList API synchronously
+// api document: https://help.aliyun.com/api/cr/getmirrorlist.html
+func (client *Client) GetMirrorList(request *GetMirrorListRequest) (response *GetMirrorListResponse, err error) {
+ response = CreateGetMirrorListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetMirrorListWithChan invokes the cr.GetMirrorList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getmirrorlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetMirrorListWithChan(request *GetMirrorListRequest) (<-chan *GetMirrorListResponse, <-chan error) {
+ responseChan := make(chan *GetMirrorListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetMirrorList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetMirrorListWithCallback invokes the cr.GetMirrorList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getmirrorlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetMirrorListWithCallback(request *GetMirrorListRequest, callback func(response *GetMirrorListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetMirrorListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetMirrorList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetMirrorListRequest is the request struct for api GetMirrorList
+type GetMirrorListRequest struct {
+ *requests.RoaRequest
+}
+
+// GetMirrorListResponse is the response struct for api GetMirrorList
+type GetMirrorListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetMirrorListRequest creates a request to invoke GetMirrorList API
+func CreateGetMirrorListRequest() (request *GetMirrorListRequest) {
+ request = &GetMirrorListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetMirrorList", "/mirrors", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetMirrorListResponse creates a response to parse from GetMirrorList response
+func CreateGetMirrorListResponse() (response *GetMirrorListResponse) {
+ response = &GetMirrorListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace.go
new file mode 100644
index 000000000..d8aad6c04
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetNamespace invokes the cr.GetNamespace API synchronously
+// api document: https://help.aliyun.com/api/cr/getnamespace.html
+func (client *Client) GetNamespace(request *GetNamespaceRequest) (response *GetNamespaceResponse, err error) {
+ response = CreateGetNamespaceResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetNamespaceWithChan invokes the cr.GetNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/getnamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetNamespaceWithChan(request *GetNamespaceRequest) (<-chan *GetNamespaceResponse, <-chan error) {
+ responseChan := make(chan *GetNamespaceResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetNamespace(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetNamespaceWithCallback invokes the cr.GetNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/getnamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetNamespaceWithCallback(request *GetNamespaceRequest, callback func(response *GetNamespaceResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetNamespaceResponse
+ var err error
+ defer close(result)
+ response, err = client.GetNamespace(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetNamespaceRequest is the request struct for api GetNamespace
+type GetNamespaceRequest struct {
+ *requests.RoaRequest
+ Namespace string `position:"Path" name:"Namespace"`
+}
+
+// GetNamespaceResponse is the response struct for api GetNamespace
+type GetNamespaceResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetNamespaceRequest creates a request to invoke GetNamespace API
+func CreateGetNamespaceRequest() (request *GetNamespaceRequest) {
+ request = &GetNamespaceRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetNamespace", "/namespace/[Namespace]", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetNamespaceResponse creates a response to parse from GetNamespace response
+func CreateGetNamespaceResponse() (response *GetNamespaceResponse) {
+ response = &GetNamespaceResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace_authorization_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace_authorization_list.go
new file mode 100644
index 000000000..96a50564e
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace_authorization_list.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetNamespaceAuthorizationList invokes the cr.GetNamespaceAuthorizationList API synchronously
+// api document: https://help.aliyun.com/api/cr/getnamespaceauthorizationlist.html
+func (client *Client) GetNamespaceAuthorizationList(request *GetNamespaceAuthorizationListRequest) (response *GetNamespaceAuthorizationListResponse, err error) {
+ response = CreateGetNamespaceAuthorizationListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetNamespaceAuthorizationListWithChan invokes the cr.GetNamespaceAuthorizationList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getnamespaceauthorizationlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetNamespaceAuthorizationListWithChan(request *GetNamespaceAuthorizationListRequest) (<-chan *GetNamespaceAuthorizationListResponse, <-chan error) {
+ responseChan := make(chan *GetNamespaceAuthorizationListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetNamespaceAuthorizationList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetNamespaceAuthorizationListWithCallback invokes the cr.GetNamespaceAuthorizationList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getnamespaceauthorizationlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetNamespaceAuthorizationListWithCallback(request *GetNamespaceAuthorizationListRequest, callback func(response *GetNamespaceAuthorizationListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetNamespaceAuthorizationListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetNamespaceAuthorizationList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetNamespaceAuthorizationListRequest is the request struct for api GetNamespaceAuthorizationList
+type GetNamespaceAuthorizationListRequest struct {
+ *requests.RoaRequest
+ Namespace string `position:"Path" name:"Namespace"`
+ Authorize string `position:"Query" name:"Authorize"`
+}
+
+// GetNamespaceAuthorizationListResponse is the response struct for api GetNamespaceAuthorizationList
+type GetNamespaceAuthorizationListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetNamespaceAuthorizationListRequest creates a request to invoke GetNamespaceAuthorizationList API
+func CreateGetNamespaceAuthorizationListRequest() (request *GetNamespaceAuthorizationListRequest) {
+ request = &GetNamespaceAuthorizationListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetNamespaceAuthorizationList", "/namespace/[Namespace]/authorizations", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetNamespaceAuthorizationListResponse creates a response to parse from GetNamespaceAuthorizationList response
+func CreateGetNamespaceAuthorizationListResponse() (response *GetNamespaceAuthorizationListResponse) {
+ response = &GetNamespaceAuthorizationListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace_list.go
new file mode 100644
index 000000000..37af7eb69
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_namespace_list.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetNamespaceList invokes the cr.GetNamespaceList API synchronously
+// api document: https://help.aliyun.com/api/cr/getnamespacelist.html
+func (client *Client) GetNamespaceList(request *GetNamespaceListRequest) (response *GetNamespaceListResponse, err error) {
+ response = CreateGetNamespaceListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetNamespaceListWithChan invokes the cr.GetNamespaceList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getnamespacelist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetNamespaceListWithChan(request *GetNamespaceListRequest) (<-chan *GetNamespaceListResponse, <-chan error) {
+ responseChan := make(chan *GetNamespaceListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetNamespaceList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetNamespaceListWithCallback invokes the cr.GetNamespaceList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getnamespacelist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetNamespaceListWithCallback(request *GetNamespaceListRequest, callback func(response *GetNamespaceListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetNamespaceListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetNamespaceList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetNamespaceListRequest is the request struct for api GetNamespaceList
+type GetNamespaceListRequest struct {
+ *requests.RoaRequest
+ Authorize string `position:"Query" name:"Authorize"`
+ Status string `position:"Query" name:"Status"`
+}
+
+// GetNamespaceListResponse is the response struct for api GetNamespaceList
+type GetNamespaceListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetNamespaceListRequest creates a request to invoke GetNamespaceList API
+func CreateGetNamespaceListRequest() (request *GetNamespaceListRequest) {
+ request = &GetNamespaceListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetNamespaceList", "/namespace", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetNamespaceListResponse creates a response to parse from GetNamespaceList response
+func CreateGetNamespaceListResponse() (response *GetNamespaceListResponse) {
+ response = &GetNamespaceListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_region.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_region.go
new file mode 100644
index 000000000..dfba6a8f5
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_region.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRegion invokes the cr.GetRegion API synchronously
+// api document: https://help.aliyun.com/api/cr/getregion.html
+func (client *Client) GetRegion(request *GetRegionRequest) (response *GetRegionResponse, err error) {
+ response = CreateGetRegionResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRegionWithChan invokes the cr.GetRegion API asynchronously
+// api document: https://help.aliyun.com/api/cr/getregion.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRegionWithChan(request *GetRegionRequest) (<-chan *GetRegionResponse, <-chan error) {
+ responseChan := make(chan *GetRegionResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRegion(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRegionWithCallback invokes the cr.GetRegion API asynchronously
+// api document: https://help.aliyun.com/api/cr/getregion.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRegionWithCallback(request *GetRegionRequest, callback func(response *GetRegionResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRegionResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRegion(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRegionRequest is the request struct for api GetRegion
+type GetRegionRequest struct {
+ *requests.RoaRequest
+ Domain string `position:"Query" name:"Domain"`
+}
+
+// GetRegionResponse is the response struct for api GetRegion
+type GetRegionResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRegionRequest creates a request to invoke GetRegion API
+func CreateGetRegionRequest() (request *GetRegionRequest) {
+ request = &GetRegionRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRegion", "/regions", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRegionResponse creates a response to parse from GetRegion response
+func CreateGetRegionResponse() (response *GetRegionResponse) {
+ response = &GetRegionResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_region_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_region_list.go
new file mode 100644
index 000000000..c2e2b183e
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_region_list.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRegionList invokes the cr.GetRegionList API synchronously
+// api document: https://help.aliyun.com/api/cr/getregionlist.html
+func (client *Client) GetRegionList(request *GetRegionListRequest) (response *GetRegionListResponse, err error) {
+ response = CreateGetRegionListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRegionListWithChan invokes the cr.GetRegionList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getregionlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRegionListWithChan(request *GetRegionListRequest) (<-chan *GetRegionListResponse, <-chan error) {
+ responseChan := make(chan *GetRegionListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRegionList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRegionListWithCallback invokes the cr.GetRegionList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getregionlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRegionListWithCallback(request *GetRegionListRequest, callback func(response *GetRegionListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRegionListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRegionList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRegionListRequest is the request struct for api GetRegionList
+type GetRegionListRequest struct {
+ *requests.RoaRequest
+}
+
+// GetRegionListResponse is the response struct for api GetRegionList
+type GetRegionListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRegionListRequest creates a request to invoke GetRegionList API
+func CreateGetRegionListRequest() (request *GetRegionListRequest) {
+ request = &GetRegionListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRegionList", "/regions", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRegionListResponse creates a response to parse from GetRegionList response
+func CreateGetRegionListResponse() (response *GetRegionListResponse) {
+ response = &GetRegionListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo.go
new file mode 100644
index 000000000..3868feb2b
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepo invokes the cr.GetRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepo.html
+func (client *Client) GetRepo(request *GetRepoRequest) (response *GetRepoResponse, err error) {
+ response = CreateGetRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoWithChan invokes the cr.GetRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoWithChan(request *GetRepoRequest) (<-chan *GetRepoResponse, <-chan error) {
+ responseChan := make(chan *GetRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoWithCallback invokes the cr.GetRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoWithCallback(request *GetRepoRequest, callback func(response *GetRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoRequest is the request struct for api GetRepo
+type GetRepoRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// GetRepoResponse is the response struct for api GetRepo
+type GetRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoRequest creates a request to invoke GetRepo API
+func CreateGetRepoRequest() (request *GetRepoRequest) {
+ request = &GetRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepo", "/repos/[RepoNamespace]/[RepoName]", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoResponse creates a response to parse from GetRepo response
+func CreateGetRepoResponse() (response *GetRepoResponse) {
+ response = &GetRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_authorization_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_authorization_list.go
new file mode 100644
index 000000000..c4177f122
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_authorization_list.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoAuthorizationList invokes the cr.GetRepoAuthorizationList API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepoauthorizationlist.html
+func (client *Client) GetRepoAuthorizationList(request *GetRepoAuthorizationListRequest) (response *GetRepoAuthorizationListResponse, err error) {
+ response = CreateGetRepoAuthorizationListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoAuthorizationListWithChan invokes the cr.GetRepoAuthorizationList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepoauthorizationlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoAuthorizationListWithChan(request *GetRepoAuthorizationListRequest) (<-chan *GetRepoAuthorizationListResponse, <-chan error) {
+ responseChan := make(chan *GetRepoAuthorizationListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoAuthorizationList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoAuthorizationListWithCallback invokes the cr.GetRepoAuthorizationList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepoauthorizationlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoAuthorizationListWithCallback(request *GetRepoAuthorizationListRequest, callback func(response *GetRepoAuthorizationListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoAuthorizationListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoAuthorizationList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoAuthorizationListRequest is the request struct for api GetRepoAuthorizationList
+type GetRepoAuthorizationListRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ Authorize string `position:"Query" name:"Authorize"`
+}
+
+// GetRepoAuthorizationListResponse is the response struct for api GetRepoAuthorizationList
+type GetRepoAuthorizationListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoAuthorizationListRequest creates a request to invoke GetRepoAuthorizationList API
+func CreateGetRepoAuthorizationListRequest() (request *GetRepoAuthorizationListRequest) {
+ request = &GetRepoAuthorizationListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoAuthorizationList", "/repos/[RepoNamespace]/[RepoName]/authorizations", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoAuthorizationListResponse creates a response to parse from GetRepoAuthorizationList response
+func CreateGetRepoAuthorizationListResponse() (response *GetRepoAuthorizationListResponse) {
+ response = &GetRepoAuthorizationListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_batch.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_batch.go
new file mode 100644
index 000000000..22d30c231
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_batch.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoBatch invokes the cr.GetRepoBatch API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepobatch.html
+func (client *Client) GetRepoBatch(request *GetRepoBatchRequest) (response *GetRepoBatchResponse, err error) {
+ response = CreateGetRepoBatchResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoBatchWithChan invokes the cr.GetRepoBatch API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobatch.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBatchWithChan(request *GetRepoBatchRequest) (<-chan *GetRepoBatchResponse, <-chan error) {
+ responseChan := make(chan *GetRepoBatchResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoBatch(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoBatchWithCallback invokes the cr.GetRepoBatch API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobatch.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBatchWithCallback(request *GetRepoBatchRequest, callback func(response *GetRepoBatchResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoBatchResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoBatch(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoBatchRequest is the request struct for api GetRepoBatch
+type GetRepoBatchRequest struct {
+ *requests.RoaRequest
+ RepoIds string `position:"Query" name:"RepoIds"`
+}
+
+// GetRepoBatchResponse is the response struct for api GetRepoBatch
+type GetRepoBatchResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoBatchRequest creates a request to invoke GetRepoBatch API
+func CreateGetRepoBatchRequest() (request *GetRepoBatchRequest) {
+ request = &GetRepoBatchRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoBatch", "/batchsearch", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoBatchResponse creates a response to parse from GetRepoBatch response
+func CreateGetRepoBatchResponse() (response *GetRepoBatchResponse) {
+ response = &GetRepoBatchResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_list.go
new file mode 100644
index 000000000..cc211903a
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_list.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoBuildList invokes the cr.GetRepoBuildList API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildlist.html
+func (client *Client) GetRepoBuildList(request *GetRepoBuildListRequest) (response *GetRepoBuildListResponse, err error) {
+ response = CreateGetRepoBuildListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoBuildListWithChan invokes the cr.GetRepoBuildList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildListWithChan(request *GetRepoBuildListRequest) (<-chan *GetRepoBuildListResponse, <-chan error) {
+ responseChan := make(chan *GetRepoBuildListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoBuildList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoBuildListWithCallback invokes the cr.GetRepoBuildList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildListWithCallback(request *GetRepoBuildListRequest, callback func(response *GetRepoBuildListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoBuildListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoBuildList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoBuildListRequest is the request struct for api GetRepoBuildList
+type GetRepoBuildListRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+}
+
+// GetRepoBuildListResponse is the response struct for api GetRepoBuildList
+type GetRepoBuildListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoBuildListRequest creates a request to invoke GetRepoBuildList API
+func CreateGetRepoBuildListRequest() (request *GetRepoBuildListRequest) {
+ request = &GetRepoBuildListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoBuildList", "/repos/[RepoNamespace]/[RepoName]/build", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoBuildListResponse creates a response to parse from GetRepoBuildList response
+func CreateGetRepoBuildListResponse() (response *GetRepoBuildListResponse) {
+ response = &GetRepoBuildListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_logs.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_logs.go
new file mode 100644
index 000000000..a9cddeec5
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_logs.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoBuildLogs invokes the cr.GetRepoBuildLogs API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildlogs.html
+func (client *Client) GetRepoBuildLogs(request *GetRepoBuildLogsRequest) (response *GetRepoBuildLogsResponse, err error) {
+ response = CreateGetRepoBuildLogsResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoBuildLogsWithChan invokes the cr.GetRepoBuildLogs API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildlogs.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildLogsWithChan(request *GetRepoBuildLogsRequest) (<-chan *GetRepoBuildLogsResponse, <-chan error) {
+ responseChan := make(chan *GetRepoBuildLogsResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoBuildLogs(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoBuildLogsWithCallback invokes the cr.GetRepoBuildLogs API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildlogs.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildLogsWithCallback(request *GetRepoBuildLogsRequest, callback func(response *GetRepoBuildLogsResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoBuildLogsResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoBuildLogs(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoBuildLogsRequest is the request struct for api GetRepoBuildLogs
+type GetRepoBuildLogsRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ BuildId string `position:"Path" name:"BuildId"`
+}
+
+// GetRepoBuildLogsResponse is the response struct for api GetRepoBuildLogs
+type GetRepoBuildLogsResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoBuildLogsRequest creates a request to invoke GetRepoBuildLogs API
+func CreateGetRepoBuildLogsRequest() (request *GetRepoBuildLogsRequest) {
+ request = &GetRepoBuildLogsRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoBuildLogs", "/repos/[RepoNamespace]/[RepoName]/build/[BuildId]/logs", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoBuildLogsResponse creates a response to parse from GetRepoBuildLogs response
+func CreateGetRepoBuildLogsResponse() (response *GetRepoBuildLogsResponse) {
+ response = &GetRepoBuildLogsResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_rule_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_rule_list.go
new file mode 100644
index 000000000..373197cab
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_rule_list.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoBuildRuleList invokes the cr.GetRepoBuildRuleList API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildrulelist.html
+func (client *Client) GetRepoBuildRuleList(request *GetRepoBuildRuleListRequest) (response *GetRepoBuildRuleListResponse, err error) {
+ response = CreateGetRepoBuildRuleListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoBuildRuleListWithChan invokes the cr.GetRepoBuildRuleList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildrulelist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildRuleListWithChan(request *GetRepoBuildRuleListRequest) (<-chan *GetRepoBuildRuleListResponse, <-chan error) {
+ responseChan := make(chan *GetRepoBuildRuleListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoBuildRuleList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoBuildRuleListWithCallback invokes the cr.GetRepoBuildRuleList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildrulelist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildRuleListWithCallback(request *GetRepoBuildRuleListRequest, callback func(response *GetRepoBuildRuleListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoBuildRuleListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoBuildRuleList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoBuildRuleListRequest is the request struct for api GetRepoBuildRuleList
+type GetRepoBuildRuleListRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// GetRepoBuildRuleListResponse is the response struct for api GetRepoBuildRuleList
+type GetRepoBuildRuleListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoBuildRuleListRequest creates a request to invoke GetRepoBuildRuleList API
+func CreateGetRepoBuildRuleListRequest() (request *GetRepoBuildRuleListRequest) {
+ request = &GetRepoBuildRuleListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoBuildRuleList", "/repos/[RepoNamespace]/[RepoName]/rules", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoBuildRuleListResponse creates a response to parse from GetRepoBuildRuleList response
+func CreateGetRepoBuildRuleListResponse() (response *GetRepoBuildRuleListResponse) {
+ response = &GetRepoBuildRuleListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_status.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_status.go
new file mode 100644
index 000000000..b473848a8
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_build_status.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoBuildStatus invokes the cr.GetRepoBuildStatus API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildstatus.html
+func (client *Client) GetRepoBuildStatus(request *GetRepoBuildStatusRequest) (response *GetRepoBuildStatusResponse, err error) {
+ response = CreateGetRepoBuildStatusResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoBuildStatusWithChan invokes the cr.GetRepoBuildStatus API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildstatus.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildStatusWithChan(request *GetRepoBuildStatusRequest) (<-chan *GetRepoBuildStatusResponse, <-chan error) {
+ responseChan := make(chan *GetRepoBuildStatusResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoBuildStatus(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoBuildStatusWithCallback invokes the cr.GetRepoBuildStatus API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepobuildstatus.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoBuildStatusWithCallback(request *GetRepoBuildStatusRequest, callback func(response *GetRepoBuildStatusResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoBuildStatusResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoBuildStatus(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoBuildStatusRequest is the request struct for api GetRepoBuildStatus
+type GetRepoBuildStatusRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ BuildId string `position:"Path" name:"BuildId"`
+}
+
+// GetRepoBuildStatusResponse is the response struct for api GetRepoBuildStatus
+type GetRepoBuildStatusResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoBuildStatusRequest creates a request to invoke GetRepoBuildStatus API
+func CreateGetRepoBuildStatusRequest() (request *GetRepoBuildStatusRequest) {
+ request = &GetRepoBuildStatusRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoBuildStatus", "/repos/[RepoNamespace]/[RepoName]/build/[BuildId]/status", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoBuildStatusResponse creates a response to parse from GetRepoBuildStatus response
+func CreateGetRepoBuildStatusResponse() (response *GetRepoBuildStatusResponse) {
+ response = &GetRepoBuildStatusResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_list.go
new file mode 100644
index 000000000..420b86e92
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_list.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoList invokes the cr.GetRepoList API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepolist.html
+func (client *Client) GetRepoList(request *GetRepoListRequest) (response *GetRepoListResponse, err error) {
+ response = CreateGetRepoListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoListWithChan invokes the cr.GetRepoList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepolist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoListWithChan(request *GetRepoListRequest) (<-chan *GetRepoListResponse, <-chan error) {
+ responseChan := make(chan *GetRepoListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoListWithCallback invokes the cr.GetRepoList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepolist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoListWithCallback(request *GetRepoListRequest, callback func(response *GetRepoListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoListRequest is the request struct for api GetRepoList
+type GetRepoListRequest struct {
+ *requests.RoaRequest
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+ Status string `position:"Query" name:"Status"`
+}
+
+// GetRepoListResponse is the response struct for api GetRepoList
+type GetRepoListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoListRequest creates a request to invoke GetRepoList API
+func CreateGetRepoListRequest() (request *GetRepoListRequest) {
+ request = &GetRepoListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoList", "/repos", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoListResponse creates a response to parse from GetRepoList response
+func CreateGetRepoListResponse() (response *GetRepoListResponse) {
+ response = &GetRepoListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_list_by_namespace.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_list_by_namespace.go
new file mode 100644
index 000000000..6aff8dad4
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_list_by_namespace.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoListByNamespace invokes the cr.GetRepoListByNamespace API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepolistbynamespace.html
+func (client *Client) GetRepoListByNamespace(request *GetRepoListByNamespaceRequest) (response *GetRepoListByNamespaceResponse, err error) {
+ response = CreateGetRepoListByNamespaceResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoListByNamespaceWithChan invokes the cr.GetRepoListByNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepolistbynamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoListByNamespaceWithChan(request *GetRepoListByNamespaceRequest) (<-chan *GetRepoListByNamespaceResponse, <-chan error) {
+ responseChan := make(chan *GetRepoListByNamespaceResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoListByNamespace(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoListByNamespaceWithCallback invokes the cr.GetRepoListByNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepolistbynamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoListByNamespaceWithCallback(request *GetRepoListByNamespaceRequest, callback func(response *GetRepoListByNamespaceResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoListByNamespaceResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoListByNamespace(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoListByNamespaceRequest is the request struct for api GetRepoListByNamespace
+type GetRepoListByNamespaceRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+ Status string `position:"Query" name:"Status"`
+}
+
+// GetRepoListByNamespaceResponse is the response struct for api GetRepoListByNamespace
+type GetRepoListByNamespaceResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoListByNamespaceRequest creates a request to invoke GetRepoListByNamespace API
+func CreateGetRepoListByNamespaceRequest() (request *GetRepoListByNamespaceRequest) {
+ request = &GetRepoListByNamespaceRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoListByNamespace", "/repos/[RepoNamespace]", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoListByNamespaceResponse creates a response to parse from GetRepoListByNamespace response
+func CreateGetRepoListByNamespaceResponse() (response *GetRepoListByNamespaceResponse) {
+ response = &GetRepoListByNamespaceResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_source_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_source_repo.go
new file mode 100644
index 000000000..3a5a74f90
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_source_repo.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoSourceRepo invokes the cr.GetRepoSourceRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/getreposourcerepo.html
+func (client *Client) GetRepoSourceRepo(request *GetRepoSourceRepoRequest) (response *GetRepoSourceRepoResponse, err error) {
+ response = CreateGetRepoSourceRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoSourceRepoWithChan invokes the cr.GetRepoSourceRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/getreposourcerepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoSourceRepoWithChan(request *GetRepoSourceRepoRequest) (<-chan *GetRepoSourceRepoResponse, <-chan error) {
+ responseChan := make(chan *GetRepoSourceRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoSourceRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoSourceRepoWithCallback invokes the cr.GetRepoSourceRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/getreposourcerepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoSourceRepoWithCallback(request *GetRepoSourceRepoRequest, callback func(response *GetRepoSourceRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoSourceRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoSourceRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoSourceRepoRequest is the request struct for api GetRepoSourceRepo
+type GetRepoSourceRepoRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// GetRepoSourceRepoResponse is the response struct for api GetRepoSourceRepo
+type GetRepoSourceRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoSourceRepoRequest creates a request to invoke GetRepoSourceRepo API
+func CreateGetRepoSourceRepoRequest() (request *GetRepoSourceRepoRequest) {
+ request = &GetRepoSourceRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoSourceRepo", "/repos/[RepoNamespace]/[RepoName]/sourceRepo", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoSourceRepoResponse creates a response to parse from GetRepoSourceRepo response
+func CreateGetRepoSourceRepoResponse() (response *GetRepoSourceRepoResponse) {
+ response = &GetRepoSourceRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_sync_task.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_sync_task.go
new file mode 100644
index 000000000..e6347dc2f
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_sync_task.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoSyncTask invokes the cr.GetRepoSyncTask API synchronously
+// api document: https://help.aliyun.com/api/cr/getreposynctask.html
+func (client *Client) GetRepoSyncTask(request *GetRepoSyncTaskRequest) (response *GetRepoSyncTaskResponse, err error) {
+ response = CreateGetRepoSyncTaskResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoSyncTaskWithChan invokes the cr.GetRepoSyncTask API asynchronously
+// api document: https://help.aliyun.com/api/cr/getreposynctask.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoSyncTaskWithChan(request *GetRepoSyncTaskRequest) (<-chan *GetRepoSyncTaskResponse, <-chan error) {
+ responseChan := make(chan *GetRepoSyncTaskResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoSyncTask(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoSyncTaskWithCallback invokes the cr.GetRepoSyncTask API asynchronously
+// api document: https://help.aliyun.com/api/cr/getreposynctask.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoSyncTaskWithCallback(request *GetRepoSyncTaskRequest, callback func(response *GetRepoSyncTaskResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoSyncTaskResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoSyncTask(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoSyncTaskRequest is the request struct for api GetRepoSyncTask
+type GetRepoSyncTaskRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ SyncTaskId string `position:"Path" name:"SyncTaskId"`
+}
+
+// GetRepoSyncTaskResponse is the response struct for api GetRepoSyncTask
+type GetRepoSyncTaskResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoSyncTaskRequest creates a request to invoke GetRepoSyncTask API
+func CreateGetRepoSyncTaskRequest() (request *GetRepoSyncTaskRequest) {
+ request = &GetRepoSyncTaskRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoSyncTask", "/repos/[RepoNamespace]/[RepoName]/syncTasks/[SyncTaskId]", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoSyncTaskResponse creates a response to parse from GetRepoSyncTask response
+func CreateGetRepoSyncTaskResponse() (response *GetRepoSyncTaskResponse) {
+ response = &GetRepoSyncTaskResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_sync_task_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_sync_task_list.go
new file mode 100644
index 000000000..3ca17658c
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_sync_task_list.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoSyncTaskList invokes the cr.GetRepoSyncTaskList API synchronously
+// api document: https://help.aliyun.com/api/cr/getreposynctasklist.html
+func (client *Client) GetRepoSyncTaskList(request *GetRepoSyncTaskListRequest) (response *GetRepoSyncTaskListResponse, err error) {
+ response = CreateGetRepoSyncTaskListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoSyncTaskListWithChan invokes the cr.GetRepoSyncTaskList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getreposynctasklist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoSyncTaskListWithChan(request *GetRepoSyncTaskListRequest) (<-chan *GetRepoSyncTaskListResponse, <-chan error) {
+ responseChan := make(chan *GetRepoSyncTaskListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoSyncTaskList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoSyncTaskListWithCallback invokes the cr.GetRepoSyncTaskList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getreposynctasklist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoSyncTaskListWithCallback(request *GetRepoSyncTaskListRequest, callback func(response *GetRepoSyncTaskListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoSyncTaskListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoSyncTaskList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoSyncTaskListRequest is the request struct for api GetRepoSyncTaskList
+type GetRepoSyncTaskListRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+}
+
+// GetRepoSyncTaskListResponse is the response struct for api GetRepoSyncTaskList
+type GetRepoSyncTaskListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoSyncTaskListRequest creates a request to invoke GetRepoSyncTaskList API
+func CreateGetRepoSyncTaskListRequest() (request *GetRepoSyncTaskListRequest) {
+ request = &GetRepoSyncTaskListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoSyncTaskList", "/repos/[RepoNamespace]/[RepoName]/syncTasks", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoSyncTaskListResponse creates a response to parse from GetRepoSyncTaskList response
+func CreateGetRepoSyncTaskListResponse() (response *GetRepoSyncTaskListResponse) {
+ response = &GetRepoSyncTaskListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_tags.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_tags.go
new file mode 100644
index 000000000..42fa5cae0
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_tags.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoTags invokes the cr.GetRepoTags API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepotags.html
+func (client *Client) GetRepoTags(request *GetRepoTagsRequest) (response *GetRepoTagsResponse, err error) {
+ response = CreateGetRepoTagsResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoTagsWithChan invokes the cr.GetRepoTags API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepotags.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoTagsWithChan(request *GetRepoTagsRequest) (<-chan *GetRepoTagsResponse, <-chan error) {
+ responseChan := make(chan *GetRepoTagsResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoTags(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoTagsWithCallback invokes the cr.GetRepoTags API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepotags.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoTagsWithCallback(request *GetRepoTagsRequest, callback func(response *GetRepoTagsResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoTagsResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoTags(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoTagsRequest is the request struct for api GetRepoTags
+type GetRepoTagsRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+}
+
+// GetRepoTagsResponse is the response struct for api GetRepoTags
+type GetRepoTagsResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoTagsRequest creates a request to invoke GetRepoTags API
+func CreateGetRepoTagsRequest() (request *GetRepoTagsRequest) {
+ request = &GetRepoTagsRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoTags", "/repos/[RepoNamespace]/[RepoName]/tags", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoTagsResponse creates a response to parse from GetRepoTags response
+func CreateGetRepoTagsResponse() (response *GetRepoTagsResponse) {
+ response = &GetRepoTagsResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_webhook.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_webhook.go
new file mode 100644
index 000000000..830597fd9
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_webhook.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoWebhook invokes the cr.GetRepoWebhook API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepowebhook.html
+func (client *Client) GetRepoWebhook(request *GetRepoWebhookRequest) (response *GetRepoWebhookResponse, err error) {
+ response = CreateGetRepoWebhookResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoWebhookWithChan invokes the cr.GetRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoWebhookWithChan(request *GetRepoWebhookRequest) (<-chan *GetRepoWebhookResponse, <-chan error) {
+ responseChan := make(chan *GetRepoWebhookResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoWebhook(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoWebhookWithCallback invokes the cr.GetRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoWebhookWithCallback(request *GetRepoWebhookRequest, callback func(response *GetRepoWebhookResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoWebhookResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoWebhook(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoWebhookRequest is the request struct for api GetRepoWebhook
+type GetRepoWebhookRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// GetRepoWebhookResponse is the response struct for api GetRepoWebhook
+type GetRepoWebhookResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoWebhookRequest creates a request to invoke GetRepoWebhook API
+func CreateGetRepoWebhookRequest() (request *GetRepoWebhookRequest) {
+ request = &GetRepoWebhookRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoWebhook", "/repos/[RepoNamespace]/[RepoName]/webhooks", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoWebhookResponse creates a response to parse from GetRepoWebhook response
+func CreateGetRepoWebhookResponse() (response *GetRepoWebhookResponse) {
+ response = &GetRepoWebhookResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_webhook_log_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_webhook_log_list.go
new file mode 100644
index 000000000..19c34c6bd
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_repo_webhook_log_list.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetRepoWebhookLogList invokes the cr.GetRepoWebhookLogList API synchronously
+// api document: https://help.aliyun.com/api/cr/getrepowebhookloglist.html
+func (client *Client) GetRepoWebhookLogList(request *GetRepoWebhookLogListRequest) (response *GetRepoWebhookLogListResponse, err error) {
+ response = CreateGetRepoWebhookLogListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetRepoWebhookLogListWithChan invokes the cr.GetRepoWebhookLogList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepowebhookloglist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoWebhookLogListWithChan(request *GetRepoWebhookLogListRequest) (<-chan *GetRepoWebhookLogListResponse, <-chan error) {
+ responseChan := make(chan *GetRepoWebhookLogListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetRepoWebhookLogList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetRepoWebhookLogListWithCallback invokes the cr.GetRepoWebhookLogList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getrepowebhookloglist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetRepoWebhookLogListWithCallback(request *GetRepoWebhookLogListRequest, callback func(response *GetRepoWebhookLogListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetRepoWebhookLogListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetRepoWebhookLogList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetRepoWebhookLogListRequest is the request struct for api GetRepoWebhookLogList
+type GetRepoWebhookLogListRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ WebhookId requests.Integer `position:"Path" name:"WebhookId"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// GetRepoWebhookLogListResponse is the response struct for api GetRepoWebhookLogList
+type GetRepoWebhookLogListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetRepoWebhookLogListRequest creates a request to invoke GetRepoWebhookLogList API
+func CreateGetRepoWebhookLogListRequest() (request *GetRepoWebhookLogListRequest) {
+ request = &GetRepoWebhookLogListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetRepoWebhookLogList", "/repos/[RepoNamespace]/[RepoName]/webhooks/[WebhookId]/logs", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetRepoWebhookLogListResponse creates a response to parse from GetRepoWebhookLogList response
+func CreateGetRepoWebhookLogListResponse() (response *GetRepoWebhookLogListResponse) {
+ response = &GetRepoWebhookLogListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_search.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_search.go
new file mode 100644
index 000000000..9889e6be2
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_search.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetSearch invokes the cr.GetSearch API synchronously
+// api document: https://help.aliyun.com/api/cr/getsearch.html
+func (client *Client) GetSearch(request *GetSearchRequest) (response *GetSearchResponse, err error) {
+ response = CreateGetSearchResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetSearchWithChan invokes the cr.GetSearch API asynchronously
+// api document: https://help.aliyun.com/api/cr/getsearch.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetSearchWithChan(request *GetSearchRequest) (<-chan *GetSearchResponse, <-chan error) {
+ responseChan := make(chan *GetSearchResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetSearch(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetSearchWithCallback invokes the cr.GetSearch API asynchronously
+// api document: https://help.aliyun.com/api/cr/getsearch.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetSearchWithCallback(request *GetSearchRequest, callback func(response *GetSearchResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetSearchResponse
+ var err error
+ defer close(result)
+ response, err = client.GetSearch(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetSearchRequest is the request struct for api GetSearch
+type GetSearchRequest struct {
+ *requests.RoaRequest
+ Origin string `position:"Query" name:"Origin"`
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+ Keyword string `position:"Query" name:"Keyword"`
+}
+
+// GetSearchResponse is the response struct for api GetSearch
+type GetSearchResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetSearchRequest creates a request to invoke GetSearch API
+func CreateGetSearchRequest() (request *GetSearchRequest) {
+ request = &GetSearchRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetSearch", "/search-delete", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetSearchResponse creates a response to parse from GetSearch response
+func CreateGetSearchResponse() (response *GetSearchResponse) {
+ response = &GetSearchResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_sub_user_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_sub_user_list.go
new file mode 100644
index 000000000..31fb8e887
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_sub_user_list.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetSubUserList invokes the cr.GetSubUserList API synchronously
+// api document: https://help.aliyun.com/api/cr/getsubuserlist.html
+func (client *Client) GetSubUserList(request *GetSubUserListRequest) (response *GetSubUserListResponse, err error) {
+ response = CreateGetSubUserListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetSubUserListWithChan invokes the cr.GetSubUserList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getsubuserlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetSubUserListWithChan(request *GetSubUserListRequest) (<-chan *GetSubUserListResponse, <-chan error) {
+ responseChan := make(chan *GetSubUserListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetSubUserList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetSubUserListWithCallback invokes the cr.GetSubUserList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getsubuserlist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetSubUserListWithCallback(request *GetSubUserListRequest, callback func(response *GetSubUserListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetSubUserListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetSubUserList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetSubUserListRequest is the request struct for api GetSubUserList
+type GetSubUserListRequest struct {
+ *requests.RoaRequest
+}
+
+// GetSubUserListResponse is the response struct for api GetSubUserList
+type GetSubUserListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetSubUserListRequest creates a request to invoke GetSubUserList API
+func CreateGetSubUserListRequest() (request *GetSubUserListRequest) {
+ request = &GetSubUserListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetSubUserList", "/users/subAccount", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetSubUserListResponse creates a response to parse from GetSubUserList response
+func CreateGetSubUserListResponse() (response *GetSubUserListResponse) {
+ response = &GetSubUserListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_info.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_info.go
new file mode 100644
index 000000000..c29b8dd49
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_info.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetUserInfo invokes the cr.GetUserInfo API synchronously
+// api document: https://help.aliyun.com/api/cr/getuserinfo.html
+func (client *Client) GetUserInfo(request *GetUserInfoRequest) (response *GetUserInfoResponse, err error) {
+ response = CreateGetUserInfoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetUserInfoWithChan invokes the cr.GetUserInfo API asynchronously
+// api document: https://help.aliyun.com/api/cr/getuserinfo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserInfoWithChan(request *GetUserInfoRequest) (<-chan *GetUserInfoResponse, <-chan error) {
+ responseChan := make(chan *GetUserInfoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetUserInfo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetUserInfoWithCallback invokes the cr.GetUserInfo API asynchronously
+// api document: https://help.aliyun.com/api/cr/getuserinfo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserInfoWithCallback(request *GetUserInfoRequest, callback func(response *GetUserInfoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetUserInfoResponse
+ var err error
+ defer close(result)
+ response, err = client.GetUserInfo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetUserInfoRequest is the request struct for api GetUserInfo
+type GetUserInfoRequest struct {
+ *requests.RoaRequest
+}
+
+// GetUserInfoResponse is the response struct for api GetUserInfo
+type GetUserInfoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetUserInfoRequest creates a request to invoke GetUserInfo API
+func CreateGetUserInfoRequest() (request *GetUserInfoRequest) {
+ request = &GetUserInfoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetUserInfo", "/users", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetUserInfoResponse creates a response to parse from GetUserInfo response
+func CreateGetUserInfoResponse() (response *GetUserInfoResponse) {
+ response = &GetUserInfoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_account.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_account.go
new file mode 100644
index 000000000..63e90da43
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_account.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetUserSourceAccount invokes the cr.GetUserSourceAccount API synchronously
+// api document: https://help.aliyun.com/api/cr/getusersourceaccount.html
+func (client *Client) GetUserSourceAccount(request *GetUserSourceAccountRequest) (response *GetUserSourceAccountResponse, err error) {
+ response = CreateGetUserSourceAccountResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetUserSourceAccountWithChan invokes the cr.GetUserSourceAccount API asynchronously
+// api document: https://help.aliyun.com/api/cr/getusersourceaccount.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserSourceAccountWithChan(request *GetUserSourceAccountRequest) (<-chan *GetUserSourceAccountResponse, <-chan error) {
+ responseChan := make(chan *GetUserSourceAccountResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetUserSourceAccount(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetUserSourceAccountWithCallback invokes the cr.GetUserSourceAccount API asynchronously
+// api document: https://help.aliyun.com/api/cr/getusersourceaccount.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserSourceAccountWithCallback(request *GetUserSourceAccountRequest, callback func(response *GetUserSourceAccountResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetUserSourceAccountResponse
+ var err error
+ defer close(result)
+ response, err = client.GetUserSourceAccount(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetUserSourceAccountRequest is the request struct for api GetUserSourceAccount
+type GetUserSourceAccountRequest struct {
+ *requests.RoaRequest
+ SourceOriginType string `position:"Query" name:"SourceOriginType"`
+}
+
+// GetUserSourceAccountResponse is the response struct for api GetUserSourceAccount
+type GetUserSourceAccountResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetUserSourceAccountRequest creates a request to invoke GetUserSourceAccount API
+func CreateGetUserSourceAccountRequest() (request *GetUserSourceAccountRequest) {
+ request = &GetUserSourceAccountRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetUserSourceAccount", "/users/sourceAccount", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetUserSourceAccountResponse creates a response to parse from GetUserSourceAccount response
+func CreateGetUserSourceAccountResponse() (response *GetUserSourceAccountResponse) {
+ response = &GetUserSourceAccountResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_repo_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_repo_list.go
new file mode 100644
index 000000000..98c2622d1
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_repo_list.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetUserSourceRepoList invokes the cr.GetUserSourceRepoList API synchronously
+// api document: https://help.aliyun.com/api/cr/getusersourcerepolist.html
+func (client *Client) GetUserSourceRepoList(request *GetUserSourceRepoListRequest) (response *GetUserSourceRepoListResponse, err error) {
+ response = CreateGetUserSourceRepoListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetUserSourceRepoListWithChan invokes the cr.GetUserSourceRepoList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getusersourcerepolist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserSourceRepoListWithChan(request *GetUserSourceRepoListRequest) (<-chan *GetUserSourceRepoListResponse, <-chan error) {
+ responseChan := make(chan *GetUserSourceRepoListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetUserSourceRepoList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetUserSourceRepoListWithCallback invokes the cr.GetUserSourceRepoList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getusersourcerepolist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserSourceRepoListWithCallback(request *GetUserSourceRepoListRequest, callback func(response *GetUserSourceRepoListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetUserSourceRepoListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetUserSourceRepoList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetUserSourceRepoListRequest is the request struct for api GetUserSourceRepoList
+type GetUserSourceRepoListRequest struct {
+ *requests.RoaRequest
+ SourceAccountId requests.Integer `position:"Path" name:"SourceAccountId"`
+}
+
+// GetUserSourceRepoListResponse is the response struct for api GetUserSourceRepoList
+type GetUserSourceRepoListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetUserSourceRepoListRequest creates a request to invoke GetUserSourceRepoList API
+func CreateGetUserSourceRepoListRequest() (request *GetUserSourceRepoListRequest) {
+ request = &GetUserSourceRepoListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetUserSourceRepoList", "/users/sourceAccount/[SourceAccountId]/repos", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetUserSourceRepoListResponse creates a response to parse from GetUserSourceRepoList response
+func CreateGetUserSourceRepoListResponse() (response *GetUserSourceRepoListResponse) {
+ response = &GetUserSourceRepoListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_repo_ref_list.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_repo_ref_list.go
new file mode 100644
index 000000000..beec4c100
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/get_user_source_repo_ref_list.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// GetUserSourceRepoRefList invokes the cr.GetUserSourceRepoRefList API synchronously
+// api document: https://help.aliyun.com/api/cr/getusersourcereporeflist.html
+func (client *Client) GetUserSourceRepoRefList(request *GetUserSourceRepoRefListRequest) (response *GetUserSourceRepoRefListResponse, err error) {
+ response = CreateGetUserSourceRepoRefListResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// GetUserSourceRepoRefListWithChan invokes the cr.GetUserSourceRepoRefList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getusersourcereporeflist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserSourceRepoRefListWithChan(request *GetUserSourceRepoRefListRequest) (<-chan *GetUserSourceRepoRefListResponse, <-chan error) {
+ responseChan := make(chan *GetUserSourceRepoRefListResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.GetUserSourceRepoRefList(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// GetUserSourceRepoRefListWithCallback invokes the cr.GetUserSourceRepoRefList API asynchronously
+// api document: https://help.aliyun.com/api/cr/getusersourcereporeflist.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) GetUserSourceRepoRefListWithCallback(request *GetUserSourceRepoRefListRequest, callback func(response *GetUserSourceRepoRefListResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *GetUserSourceRepoRefListResponse
+ var err error
+ defer close(result)
+ response, err = client.GetUserSourceRepoRefList(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// GetUserSourceRepoRefListRequest is the request struct for api GetUserSourceRepoRefList
+type GetUserSourceRepoRefListRequest struct {
+ *requests.RoaRequest
+ SourceAccountId requests.Integer `position:"Path" name:"SourceAccountId"`
+ SourceRepoName string `position:"Path" name:"SourceRepoName"`
+ SourceRepoNamespace string `position:"Path" name:"SourceRepoNamespace"`
+}
+
+// GetUserSourceRepoRefListResponse is the response struct for api GetUserSourceRepoRefList
+type GetUserSourceRepoRefListResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateGetUserSourceRepoRefListRequest creates a request to invoke GetUserSourceRepoRefList API
+func CreateGetUserSourceRepoRefListRequest() (request *GetUserSourceRepoRefListRequest) {
+ request = &GetUserSourceRepoRefListRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "GetUserSourceRepoRefList", "/users/sourceAccount/[SourceAccountId]/repos/[SourceRepoNamespace]/[SourceRepoName]/refs", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateGetUserSourceRepoRefListResponse creates a response to parse from GetUserSourceRepoRefList response
+func CreateGetUserSourceRepoRefListResponse() (response *GetUserSourceRepoRefListResponse) {
+ response = &GetUserSourceRepoRefListResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/search_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/search_repo.go
new file mode 100644
index 000000000..8ba627140
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/search_repo.go
@@ -0,0 +1,106 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// SearchRepo invokes the cr.SearchRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/searchrepo.html
+func (client *Client) SearchRepo(request *SearchRepoRequest) (response *SearchRepoResponse, err error) {
+ response = CreateSearchRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// SearchRepoWithChan invokes the cr.SearchRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/searchrepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) SearchRepoWithChan(request *SearchRepoRequest) (<-chan *SearchRepoResponse, <-chan error) {
+ responseChan := make(chan *SearchRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.SearchRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// SearchRepoWithCallback invokes the cr.SearchRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/searchrepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) SearchRepoWithCallback(request *SearchRepoRequest, callback func(response *SearchRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *SearchRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.SearchRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// SearchRepoRequest is the request struct for api SearchRepo
+type SearchRepoRequest struct {
+ *requests.RoaRequest
+ Origin string `position:"Query" name:"Origin"`
+ PageSize requests.Integer `position:"Query" name:"PageSize"`
+ Page requests.Integer `position:"Query" name:"Page"`
+ Keyword string `position:"Query" name:"Keyword"`
+}
+
+// SearchRepoResponse is the response struct for api SearchRepo
+type SearchRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateSearchRepoRequest creates a request to invoke SearchRepo API
+func CreateSearchRepoRequest() (request *SearchRepoRequest) {
+ request = &SearchRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "SearchRepo", "/search", "cr", "openAPI")
+ request.Method = requests.GET
+ return
+}
+
+// CreateSearchRepoResponse creates a response to parse from SearchRepo response
+func CreateSearchRepoResponse() (response *SearchRepoResponse) {
+ response = &SearchRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_image_scan.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_image_scan.go
new file mode 100644
index 000000000..82b4aebe5
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_image_scan.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// StartImageScan invokes the cr.StartImageScan API synchronously
+// api document: https://help.aliyun.com/api/cr/startimagescan.html
+func (client *Client) StartImageScan(request *StartImageScanRequest) (response *StartImageScanResponse, err error) {
+ response = CreateStartImageScanResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// StartImageScanWithChan invokes the cr.StartImageScan API asynchronously
+// api document: https://help.aliyun.com/api/cr/startimagescan.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) StartImageScanWithChan(request *StartImageScanRequest) (<-chan *StartImageScanResponse, <-chan error) {
+ responseChan := make(chan *StartImageScanResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.StartImageScan(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// StartImageScanWithCallback invokes the cr.StartImageScan API asynchronously
+// api document: https://help.aliyun.com/api/cr/startimagescan.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) StartImageScanWithCallback(request *StartImageScanRequest, callback func(response *StartImageScanResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *StartImageScanResponse
+ var err error
+ defer close(result)
+ response, err = client.StartImageScan(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// StartImageScanRequest is the request struct for api StartImageScan
+type StartImageScanRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ Tag string `position:"Path" name:"Tag"`
+}
+
+// StartImageScanResponse is the response struct for api StartImageScan
+type StartImageScanResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateStartImageScanRequest creates a request to invoke StartImageScan API
+func CreateStartImageScanRequest() (request *StartImageScanRequest) {
+ request = &StartImageScanRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "StartImageScan", "/repos/[RepoNamespace]/[RepoName]/tags/[Tag]/scan", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateStartImageScanResponse creates a response to parse from StartImageScan response
+func CreateStartImageScanResponse() (response *StartImageScanResponse) {
+ response = &StartImageScanResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_repo_build.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_repo_build.go
new file mode 100644
index 000000000..a8cff3503
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_repo_build.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// StartRepoBuild invokes the cr.StartRepoBuild API synchronously
+// api document: https://help.aliyun.com/api/cr/startrepobuild.html
+func (client *Client) StartRepoBuild(request *StartRepoBuildRequest) (response *StartRepoBuildResponse, err error) {
+ response = CreateStartRepoBuildResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// StartRepoBuildWithChan invokes the cr.StartRepoBuild API asynchronously
+// api document: https://help.aliyun.com/api/cr/startrepobuild.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) StartRepoBuildWithChan(request *StartRepoBuildRequest) (<-chan *StartRepoBuildResponse, <-chan error) {
+ responseChan := make(chan *StartRepoBuildResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.StartRepoBuild(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// StartRepoBuildWithCallback invokes the cr.StartRepoBuild API asynchronously
+// api document: https://help.aliyun.com/api/cr/startrepobuild.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) StartRepoBuildWithCallback(request *StartRepoBuildRequest, callback func(response *StartRepoBuildResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *StartRepoBuildResponse
+ var err error
+ defer close(result)
+ response, err = client.StartRepoBuild(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// StartRepoBuildRequest is the request struct for api StartRepoBuild
+type StartRepoBuildRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// StartRepoBuildResponse is the response struct for api StartRepoBuild
+type StartRepoBuildResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateStartRepoBuildRequest creates a request to invoke StartRepoBuild API
+func CreateStartRepoBuildRequest() (request *StartRepoBuildRequest) {
+ request = &StartRepoBuildRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "StartRepoBuild", "/repos/[RepoNamespace]/[RepoName]/build", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateStartRepoBuildResponse creates a response to parse from StartRepoBuild response
+func CreateStartRepoBuildResponse() (response *StartRepoBuildResponse) {
+ response = &StartRepoBuildResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_repo_build_by_rule.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_repo_build_by_rule.go
new file mode 100644
index 000000000..401f77544
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/start_repo_build_by_rule.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// StartRepoBuildByRule invokes the cr.StartRepoBuildByRule API synchronously
+// api document: https://help.aliyun.com/api/cr/startrepobuildbyrule.html
+func (client *Client) StartRepoBuildByRule(request *StartRepoBuildByRuleRequest) (response *StartRepoBuildByRuleResponse, err error) {
+ response = CreateStartRepoBuildByRuleResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// StartRepoBuildByRuleWithChan invokes the cr.StartRepoBuildByRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/startrepobuildbyrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) StartRepoBuildByRuleWithChan(request *StartRepoBuildByRuleRequest) (<-chan *StartRepoBuildByRuleResponse, <-chan error) {
+ responseChan := make(chan *StartRepoBuildByRuleResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.StartRepoBuildByRule(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// StartRepoBuildByRuleWithCallback invokes the cr.StartRepoBuildByRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/startrepobuildbyrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) StartRepoBuildByRuleWithCallback(request *StartRepoBuildByRuleRequest, callback func(response *StartRepoBuildByRuleResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *StartRepoBuildByRuleResponse
+ var err error
+ defer close(result)
+ response, err = client.StartRepoBuildByRule(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// StartRepoBuildByRuleRequest is the request struct for api StartRepoBuildByRule
+type StartRepoBuildByRuleRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ BuildRuleId requests.Integer `position:"Path" name:"BuildRuleId"`
+}
+
+// StartRepoBuildByRuleResponse is the response struct for api StartRepoBuildByRule
+type StartRepoBuildByRuleResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateStartRepoBuildByRuleRequest creates a request to invoke StartRepoBuildByRule API
+func CreateStartRepoBuildByRuleRequest() (request *StartRepoBuildByRuleRequest) {
+ request = &StartRepoBuildByRuleRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "StartRepoBuildByRule", "/repos/[RepoNamespace]/[RepoName]/rules/[BuildRuleId]/build", "cr", "openAPI")
+ request.Method = requests.PUT
+ return
+}
+
+// CreateStartRepoBuildByRuleResponse creates a response to parse from StartRepoBuildByRule response
+func CreateStartRepoBuildByRuleResponse() (response *StartRepoBuildByRuleResponse) {
+ response = &StartRepoBuildByRuleResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_namespace.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_namespace.go
new file mode 100644
index 000000000..d6dc59b65
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_namespace.go
@@ -0,0 +1,103 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateNamespace invokes the cr.UpdateNamespace API synchronously
+// api document: https://help.aliyun.com/api/cr/updatenamespace.html
+func (client *Client) UpdateNamespace(request *UpdateNamespaceRequest) (response *UpdateNamespaceResponse, err error) {
+ response = CreateUpdateNamespaceResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateNamespaceWithChan invokes the cr.UpdateNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/updatenamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateNamespaceWithChan(request *UpdateNamespaceRequest) (<-chan *UpdateNamespaceResponse, <-chan error) {
+ responseChan := make(chan *UpdateNamespaceResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateNamespace(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateNamespaceWithCallback invokes the cr.UpdateNamespace API asynchronously
+// api document: https://help.aliyun.com/api/cr/updatenamespace.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateNamespaceWithCallback(request *UpdateNamespaceRequest, callback func(response *UpdateNamespaceResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateNamespaceResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateNamespace(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateNamespaceRequest is the request struct for api UpdateNamespace
+type UpdateNamespaceRequest struct {
+ *requests.RoaRequest
+ Namespace string `position:"Path" name:"Namespace"`
+}
+
+// UpdateNamespaceResponse is the response struct for api UpdateNamespace
+type UpdateNamespaceResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateNamespaceRequest creates a request to invoke UpdateNamespace API
+func CreateUpdateNamespaceRequest() (request *UpdateNamespaceRequest) {
+ request = &UpdateNamespaceRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateNamespace", "/namespace/[Namespace]", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateNamespaceResponse creates a response to parse from UpdateNamespace response
+func CreateUpdateNamespaceResponse() (response *UpdateNamespaceResponse) {
+ response = &UpdateNamespaceResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_namespace_authorization.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_namespace_authorization.go
new file mode 100644
index 000000000..8e0f8dcdc
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_namespace_authorization.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateNamespaceAuthorization invokes the cr.UpdateNamespaceAuthorization API synchronously
+// api document: https://help.aliyun.com/api/cr/updatenamespaceauthorization.html
+func (client *Client) UpdateNamespaceAuthorization(request *UpdateNamespaceAuthorizationRequest) (response *UpdateNamespaceAuthorizationResponse, err error) {
+ response = CreateUpdateNamespaceAuthorizationResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateNamespaceAuthorizationWithChan invokes the cr.UpdateNamespaceAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/updatenamespaceauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateNamespaceAuthorizationWithChan(request *UpdateNamespaceAuthorizationRequest) (<-chan *UpdateNamespaceAuthorizationResponse, <-chan error) {
+ responseChan := make(chan *UpdateNamespaceAuthorizationResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateNamespaceAuthorization(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateNamespaceAuthorizationWithCallback invokes the cr.UpdateNamespaceAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/updatenamespaceauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateNamespaceAuthorizationWithCallback(request *UpdateNamespaceAuthorizationRequest, callback func(response *UpdateNamespaceAuthorizationResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateNamespaceAuthorizationResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateNamespaceAuthorization(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateNamespaceAuthorizationRequest is the request struct for api UpdateNamespaceAuthorization
+type UpdateNamespaceAuthorizationRequest struct {
+ *requests.RoaRequest
+ AuthorizeId requests.Integer `position:"Path" name:"AuthorizeId"`
+ Namespace string `position:"Path" name:"Namespace"`
+}
+
+// UpdateNamespaceAuthorizationResponse is the response struct for api UpdateNamespaceAuthorization
+type UpdateNamespaceAuthorizationResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateNamespaceAuthorizationRequest creates a request to invoke UpdateNamespaceAuthorization API
+func CreateUpdateNamespaceAuthorizationRequest() (request *UpdateNamespaceAuthorizationRequest) {
+ request = &UpdateNamespaceAuthorizationRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateNamespaceAuthorization", "/namespace/[Namespace]/authorizations/[AuthorizeId]", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateNamespaceAuthorizationResponse creates a response to parse from UpdateNamespaceAuthorization response
+func CreateUpdateNamespaceAuthorizationResponse() (response *UpdateNamespaceAuthorizationResponse) {
+ response = &UpdateNamespaceAuthorizationResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo.go
new file mode 100644
index 000000000..0ce801a6c
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateRepo invokes the cr.UpdateRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/updaterepo.html
+func (client *Client) UpdateRepo(request *UpdateRepoRequest) (response *UpdateRepoResponse, err error) {
+ response = CreateUpdateRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateRepoWithChan invokes the cr.UpdateRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoWithChan(request *UpdateRepoRequest) (<-chan *UpdateRepoResponse, <-chan error) {
+ responseChan := make(chan *UpdateRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateRepoWithCallback invokes the cr.UpdateRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoWithCallback(request *UpdateRepoRequest, callback func(response *UpdateRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateRepoRequest is the request struct for api UpdateRepo
+type UpdateRepoRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// UpdateRepoResponse is the response struct for api UpdateRepo
+type UpdateRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateRepoRequest creates a request to invoke UpdateRepo API
+func CreateUpdateRepoRequest() (request *UpdateRepoRequest) {
+ request = &UpdateRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateRepo", "/repos/[RepoNamespace]/[RepoName]", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateRepoResponse creates a response to parse from UpdateRepo response
+func CreateUpdateRepoResponse() (response *UpdateRepoResponse) {
+ response = &UpdateRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_authorization.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_authorization.go
new file mode 100644
index 000000000..be3a074c9
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_authorization.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateRepoAuthorization invokes the cr.UpdateRepoAuthorization API synchronously
+// api document: https://help.aliyun.com/api/cr/updaterepoauthorization.html
+func (client *Client) UpdateRepoAuthorization(request *UpdateRepoAuthorizationRequest) (response *UpdateRepoAuthorizationResponse, err error) {
+ response = CreateUpdateRepoAuthorizationResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateRepoAuthorizationWithChan invokes the cr.UpdateRepoAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepoauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoAuthorizationWithChan(request *UpdateRepoAuthorizationRequest) (<-chan *UpdateRepoAuthorizationResponse, <-chan error) {
+ responseChan := make(chan *UpdateRepoAuthorizationResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateRepoAuthorization(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateRepoAuthorizationWithCallback invokes the cr.UpdateRepoAuthorization API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepoauthorization.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoAuthorizationWithCallback(request *UpdateRepoAuthorizationRequest, callback func(response *UpdateRepoAuthorizationResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateRepoAuthorizationResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateRepoAuthorization(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateRepoAuthorizationRequest is the request struct for api UpdateRepoAuthorization
+type UpdateRepoAuthorizationRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ AuthorizeId requests.Integer `position:"Path" name:"AuthorizeId"`
+}
+
+// UpdateRepoAuthorizationResponse is the response struct for api UpdateRepoAuthorization
+type UpdateRepoAuthorizationResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateRepoAuthorizationRequest creates a request to invoke UpdateRepoAuthorization API
+func CreateUpdateRepoAuthorizationRequest() (request *UpdateRepoAuthorizationRequest) {
+ request = &UpdateRepoAuthorizationRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateRepoAuthorization", "/repos/[RepoNamespace]/[RepoName]/authorizations/[AuthorizeId]", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateRepoAuthorizationResponse creates a response to parse from UpdateRepoAuthorization response
+func CreateUpdateRepoAuthorizationResponse() (response *UpdateRepoAuthorizationResponse) {
+ response = &UpdateRepoAuthorizationResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_build_rule.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_build_rule.go
new file mode 100644
index 000000000..e0cbebdb0
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_build_rule.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateRepoBuildRule invokes the cr.UpdateRepoBuildRule API synchronously
+// api document: https://help.aliyun.com/api/cr/updaterepobuildrule.html
+func (client *Client) UpdateRepoBuildRule(request *UpdateRepoBuildRuleRequest) (response *UpdateRepoBuildRuleResponse, err error) {
+ response = CreateUpdateRepoBuildRuleResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateRepoBuildRuleWithChan invokes the cr.UpdateRepoBuildRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepobuildrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoBuildRuleWithChan(request *UpdateRepoBuildRuleRequest) (<-chan *UpdateRepoBuildRuleResponse, <-chan error) {
+ responseChan := make(chan *UpdateRepoBuildRuleResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateRepoBuildRule(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateRepoBuildRuleWithCallback invokes the cr.UpdateRepoBuildRule API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepobuildrule.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoBuildRuleWithCallback(request *UpdateRepoBuildRuleRequest, callback func(response *UpdateRepoBuildRuleResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateRepoBuildRuleResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateRepoBuildRule(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateRepoBuildRuleRequest is the request struct for api UpdateRepoBuildRule
+type UpdateRepoBuildRuleRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+ BuildRuleId requests.Integer `position:"Path" name:"BuildRuleId"`
+}
+
+// UpdateRepoBuildRuleResponse is the response struct for api UpdateRepoBuildRule
+type UpdateRepoBuildRuleResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateRepoBuildRuleRequest creates a request to invoke UpdateRepoBuildRule API
+func CreateUpdateRepoBuildRuleRequest() (request *UpdateRepoBuildRuleRequest) {
+ request = &UpdateRepoBuildRuleRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateRepoBuildRule", "/repos/[RepoNamespace]/[RepoName]/rules/[BuildRuleId]", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateRepoBuildRuleResponse creates a response to parse from UpdateRepoBuildRule response
+func CreateUpdateRepoBuildRuleResponse() (response *UpdateRepoBuildRuleResponse) {
+ response = &UpdateRepoBuildRuleResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_source_repo.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_source_repo.go
new file mode 100644
index 000000000..153cd1ca3
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_source_repo.go
@@ -0,0 +1,104 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateRepoSourceRepo invokes the cr.UpdateRepoSourceRepo API synchronously
+// api document: https://help.aliyun.com/api/cr/updatereposourcerepo.html
+func (client *Client) UpdateRepoSourceRepo(request *UpdateRepoSourceRepoRequest) (response *UpdateRepoSourceRepoResponse, err error) {
+ response = CreateUpdateRepoSourceRepoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateRepoSourceRepoWithChan invokes the cr.UpdateRepoSourceRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/updatereposourcerepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoSourceRepoWithChan(request *UpdateRepoSourceRepoRequest) (<-chan *UpdateRepoSourceRepoResponse, <-chan error) {
+ responseChan := make(chan *UpdateRepoSourceRepoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateRepoSourceRepo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateRepoSourceRepoWithCallback invokes the cr.UpdateRepoSourceRepo API asynchronously
+// api document: https://help.aliyun.com/api/cr/updatereposourcerepo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoSourceRepoWithCallback(request *UpdateRepoSourceRepoRequest, callback func(response *UpdateRepoSourceRepoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateRepoSourceRepoResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateRepoSourceRepo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateRepoSourceRepoRequest is the request struct for api UpdateRepoSourceRepo
+type UpdateRepoSourceRepoRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// UpdateRepoSourceRepoResponse is the response struct for api UpdateRepoSourceRepo
+type UpdateRepoSourceRepoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateRepoSourceRepoRequest creates a request to invoke UpdateRepoSourceRepo API
+func CreateUpdateRepoSourceRepoRequest() (request *UpdateRepoSourceRepoRequest) {
+ request = &UpdateRepoSourceRepoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateRepoSourceRepo", "/repos/[RepoNamespace]/[RepoName]/sourceRepo", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateRepoSourceRepoResponse creates a response to parse from UpdateRepoSourceRepo response
+func CreateUpdateRepoSourceRepoResponse() (response *UpdateRepoSourceRepoResponse) {
+ response = &UpdateRepoSourceRepoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_webhook.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_webhook.go
new file mode 100644
index 000000000..dfc022321
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_repo_webhook.go
@@ -0,0 +1,105 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateRepoWebhook invokes the cr.UpdateRepoWebhook API synchronously
+// api document: https://help.aliyun.com/api/cr/updaterepowebhook.html
+func (client *Client) UpdateRepoWebhook(request *UpdateRepoWebhookRequest) (response *UpdateRepoWebhookResponse, err error) {
+ response = CreateUpdateRepoWebhookResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateRepoWebhookWithChan invokes the cr.UpdateRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoWebhookWithChan(request *UpdateRepoWebhookRequest) (<-chan *UpdateRepoWebhookResponse, <-chan error) {
+ responseChan := make(chan *UpdateRepoWebhookResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateRepoWebhook(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateRepoWebhookWithCallback invokes the cr.UpdateRepoWebhook API asynchronously
+// api document: https://help.aliyun.com/api/cr/updaterepowebhook.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateRepoWebhookWithCallback(request *UpdateRepoWebhookRequest, callback func(response *UpdateRepoWebhookResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateRepoWebhookResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateRepoWebhook(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateRepoWebhookRequest is the request struct for api UpdateRepoWebhook
+type UpdateRepoWebhookRequest struct {
+ *requests.RoaRequest
+ RepoNamespace string `position:"Path" name:"RepoNamespace"`
+ WebhookId requests.Integer `position:"Path" name:"WebhookId"`
+ RepoName string `position:"Path" name:"RepoName"`
+}
+
+// UpdateRepoWebhookResponse is the response struct for api UpdateRepoWebhook
+type UpdateRepoWebhookResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateRepoWebhookRequest creates a request to invoke UpdateRepoWebhook API
+func CreateUpdateRepoWebhookRequest() (request *UpdateRepoWebhookRequest) {
+ request = &UpdateRepoWebhookRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateRepoWebhook", "/repos/[RepoNamespace]/[RepoName]/webhooks/[WebhookId]", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateRepoWebhookResponse creates a response to parse from UpdateRepoWebhook response
+func CreateUpdateRepoWebhookResponse() (response *UpdateRepoWebhookResponse) {
+ response = &UpdateRepoWebhookResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_user_info.go b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_user_info.go
new file mode 100644
index 000000000..688b1c847
--- /dev/null
+++ b/src/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/cr/update_user_info.go
@@ -0,0 +1,102 @@
+package cr
+
+//Licensed under the Apache License, Version 2.0 (the "License");
+//you may not use this file except in compliance with the License.
+//You may obtain a copy of the License at
+//
+//http://www.apache.org/licenses/LICENSE-2.0
+//
+//Unless required by applicable law or agreed to in writing, software
+//distributed under the License is distributed on an "AS IS" BASIS,
+//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//See the License for the specific language governing permissions and
+//limitations under the License.
+//
+// Code generated by Alibaba Cloud SDK Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+import (
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
+ "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
+)
+
+// UpdateUserInfo invokes the cr.UpdateUserInfo API synchronously
+// api document: https://help.aliyun.com/api/cr/updateuserinfo.html
+func (client *Client) UpdateUserInfo(request *UpdateUserInfoRequest) (response *UpdateUserInfoResponse, err error) {
+ response = CreateUpdateUserInfoResponse()
+ err = client.DoAction(request, response)
+ return
+}
+
+// UpdateUserInfoWithChan invokes the cr.UpdateUserInfo API asynchronously
+// api document: https://help.aliyun.com/api/cr/updateuserinfo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateUserInfoWithChan(request *UpdateUserInfoRequest) (<-chan *UpdateUserInfoResponse, <-chan error) {
+ responseChan := make(chan *UpdateUserInfoResponse, 1)
+ errChan := make(chan error, 1)
+ err := client.AddAsyncTask(func() {
+ defer close(responseChan)
+ defer close(errChan)
+ response, err := client.UpdateUserInfo(request)
+ if err != nil {
+ errChan <- err
+ } else {
+ responseChan <- response
+ }
+ })
+ if err != nil {
+ errChan <- err
+ close(responseChan)
+ close(errChan)
+ }
+ return responseChan, errChan
+}
+
+// UpdateUserInfoWithCallback invokes the cr.UpdateUserInfo API asynchronously
+// api document: https://help.aliyun.com/api/cr/updateuserinfo.html
+// asynchronous document: https://help.aliyun.com/document_detail/66220.html
+func (client *Client) UpdateUserInfoWithCallback(request *UpdateUserInfoRequest, callback func(response *UpdateUserInfoResponse, err error)) <-chan int {
+ result := make(chan int, 1)
+ err := client.AddAsyncTask(func() {
+ var response *UpdateUserInfoResponse
+ var err error
+ defer close(result)
+ response, err = client.UpdateUserInfo(request)
+ callback(response, err)
+ result <- 1
+ })
+ if err != nil {
+ defer close(result)
+ callback(nil, err)
+ result <- 0
+ }
+ return result
+}
+
+// UpdateUserInfoRequest is the request struct for api UpdateUserInfo
+type UpdateUserInfoRequest struct {
+ *requests.RoaRequest
+}
+
+// UpdateUserInfoResponse is the response struct for api UpdateUserInfo
+type UpdateUserInfoResponse struct {
+ *responses.BaseResponse
+}
+
+// CreateUpdateUserInfoRequest creates a request to invoke UpdateUserInfo API
+func CreateUpdateUserInfoRequest() (request *UpdateUserInfoRequest) {
+ request = &UpdateUserInfoRequest{
+ RoaRequest: &requests.RoaRequest{},
+ }
+ request.InitWithApiInfo("cr", "2016-06-07", "UpdateUserInfo", "/users", "cr", "openAPI")
+ request.Method = requests.POST
+ return
+}
+
+// CreateUpdateUserInfoResponse creates a response to parse from UpdateUserInfo response
+func CreateUpdateUserInfoResponse() (response *UpdateUserInfoResponse) {
+ response = &UpdateUserInfoResponse{
+ BaseResponse: &responses.BaseResponse{},
+ }
+ return
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/src/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/src/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 000000000..899129ecc
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 000000000..99849c0e1
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,164 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
+
+// UnmarshalError provides the interface for the SDK failing to unmarshal data.
+type UnmarshalError interface {
+ awsError
+ Bytes() []byte
+}
+
+// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
+// the bytes that fail to unmarshal to the error.
+func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
+ return &unmarshalError{
+ awsError: New("UnmarshalError", msg, err),
+ bytes: bytes,
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 000000000..a2c5817c4
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,221 @@
+package awserr
+
+import (
+ "encoding/hex"
+ "fmt"
+)
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occurred", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+ bytes []byte
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+type unmarshalError struct {
+ awsError
+ bytes []byte
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (e unmarshalError) Error() string {
+ extra := hex.Dump(e.bytes)
+ return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (e unmarshalError) String() string {
+ return e.Error()
+}
+
+// Bytes returns the bytes that failed to unmarshal.
+func (e unmarshalError) Bytes() []byte {
+ return e.bytes
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 000000000..1a3d106d5
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+ "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ if _, ok := src.Interface().(*time.Time); !ok {
+ dst.Set(reflect.New(e))
+ } else {
+ tempValue := reflect.New(e)
+ tempValue.Elem().Set(src.Elem())
+ // Sets time.Time's unexported values
+ dst.Set(tempValue)
+ }
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 000000000..142a7a01c
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type they are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 000000000..11c52c389
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, valItem := range values {
+ value := reflect.Indirect(valItem)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 000000000..710eb432f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ strtype := v.Type().String()
+ if strtype == "[]uint8" {
+ fmt.Fprintf(buf, " len %d", v.Len())
+ break
+ }
+
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 000000000..645df2450
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,88 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ for i := 0; i < v.Type().NumField(); i++ {
+ ft := v.Type().Field(i)
+ fv := v.Field(i)
+
+ if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() {
+ continue // ignore unset fields
+ }
+
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(ft.Name + ": ")
+
+ if tag := ft.Tag.Get("sensitive"); tag == "true" {
+ buf.WriteString("")
+ } else {
+ stringValue(fv, indent+2, buf)
+ }
+
+ buf.WriteString(",\n")
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/src/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 000000000..709605384
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,96 @@
+package client
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint string
+ SigningRegion string
+ SigningName string
+
+ // States that the signing name did not come from a modeled source but
+ // was derived based on other data. Used by service client constructors
+ // to determine if the signin name can be overridden based on metadata the
+ // service has.
+ SigningNameDerived bool
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+ ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers.Copy(),
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
+ c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/src/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 000000000..a397b0d04
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,116 @@
+package client
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// client.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() int { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+ // Set the upper limit of delay in retrying at ~five minutes
+ minTime := 30
+ throttle := d.shouldThrottle(r)
+ if throttle {
+ if delay, ok := getRetryDelay(r); ok {
+ return delay
+ }
+
+ minTime = 500
+ }
+
+ retryCount := r.RetryCount
+ if throttle && retryCount > 8 {
+ retryCount = 8
+ } else if retryCount > 13 {
+ retryCount = 13
+ }
+
+ delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+
+ if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 {
+ return true
+ }
+ return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 502:
+ case 503:
+ case 504:
+ default:
+ return r.IsErrorThrottle()
+ }
+
+ return true
+}
+
+// This will look in the Retry-After header, RFC 7231, for how long
+// it will wait before attempting another request
+func getRetryDelay(r *request.Request) (time.Duration, bool) {
+ if !canUseRetryAfterHeader(r) {
+ return 0, false
+ }
+
+ delayStr := r.HTTPResponse.Header.Get("Retry-After")
+ if len(delayStr) == 0 {
+ return 0, false
+ }
+
+ delay, err := strconv.Atoi(delayStr)
+ if err != nil {
+ return 0, false
+ }
+
+ return time.Duration(delay) * time.Second, true
+}
+
+// Will look at the status code to see if the retry header pertains to
+// the status code.
+func canUseRetryAfterHeader(r *request.Request) bool {
+ switch r.HTTPResponse.StatusCode {
+ case 429:
+ case 503:
+ default:
+ return false
+ }
+
+ return true
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/src/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
new file mode 100644
index 000000000..7b5e1276a
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -0,0 +1,190 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger aws.Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will include the HTTP request body if the LogLevel of the
+// request matches LogDebugWithHTTPBody.
+var LogHTTPRequestHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequest",
+ Fn: logRequest,
+}
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ bodySeekable := aws.IsReaderSeekable(r.Body)
+
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ if !bodySeekable {
+ r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
+ }
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.ResetBody()
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
+// to a service. Will only log the HTTP request's headers. The request payload
+// will not be read.
+var LogHTTPRequestHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogRequestHeader",
+ Fn: logRequestHeader,
+}
+
+func logRequestHeader(r *request.Request) {
+ b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
+// received from a service. Will include the HTTP response body if the LogLevel
+// of the request matches LogDebugWithHTTPBody.
+var LogHTTPResponseHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponse",
+ Fn: logResponse,
+}
+
+func logResponse(r *request.Request) {
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+
+ if r.HTTPResponse == nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
+ return
+ }
+
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ if logBody {
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+ }
+
+ handlerFn := func(req *request.Request) {
+ b, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(fmt.Sprintf(logRespMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
+
+ if logBody {
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
+
+// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
+// response received from a service. Will only log the HTTP response's headers.
+// The response payload will not be read.
+var LogHTTPResponseHeaderHandler = request.NamedHandler{
+ Name: "awssdk.client.LogResponseHeader",
+ Fn: logResponseHeader,
+}
+
+func logResponseHeader(r *request.Request) {
+ if r.Config.Logger == nil {
+ return
+ }
+
+ b, err := httputil.DumpResponse(r.HTTPResponse, false)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
+ r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/src/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 000000000..920e9fddf
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,13 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ ServiceID string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/config.go b/src/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 000000000..10634d173
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,536 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig structure.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(&aws.Config{
+// MaxRetries: aws.Int(3),
+// }))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, &aws.Config{
+// Region: aws.String("us-west-2"),
+// })
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to
+ // retrieve credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to a
+ // chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // Note: You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The resolver to use for looking up endpoints for AWS service clients
+ // to use based on region.
+ EndpointResolver endpoints.Resolver
+
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
+ // Regions and Endpoints.
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service
+ // specific configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of
+ // recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the client.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for
+ // missing required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+ // will use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // Note: This configuration option is specific to the Amazon S3 service.
+ //
+ // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // for Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait
+ // timeout. https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disble 100-Continue if you experience issues
+ // with proxies or third party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations
+ // compatible with S3 Accelerate will use the accelerate endpoint for
+ // requests. Requests not compatible will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with
+ // accelerate enabled. If the bucket is not enabled for accelerate an error
+ // will be returned. The bucket name must be DNS compatible to also work
+ // with accelerate.
+ S3UseAccelerate *bool
+
+ // S3DisableContentMD5Validation config option is temporarily disabled,
+ // For S3 GetObject API calls, #1837.
+ //
+ // Set this to `true` to disable the S3 service client from automatically
+ // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
+ // will also disable the SDK from performing object ContentMD5 validation
+ // on GetObject API calls.
+ S3DisableContentMD5Validation *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the
+ // EC2Metadata client to create a new http.Client. This options is only
+ // meaningful if you're not already using a custom HTTP client with the
+ // SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.NewSession() in order to disable
+ // the EC2Metadata overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDiableTimeoutOverride(true)))
+ //
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ // Instructs the endpoint to be generated for a service client to
+ // be the dual stack endpoint. The dual stack endpoint will support
+ // both IPv4 and IPv6 addressing.
+ //
+ // Setting this for a service which does not support dual stack will fail
+ // to make requets. It is not recommended to set this value on the session
+ // as it will apply to all service clients created with the session. Even
+ // services which don't support dual stack endpoints.
+ //
+ // If the Endpoint config value is also provided the UseDualStack flag
+ // will be ignored.
+ //
+ // Only supported with.
+ //
+ // sess := session.Must(session.NewSession())
+ //
+ // svc := s3.New(sess, &aws.Config{
+ // UseDualStack: aws.Bool(true),
+ // })
+ UseDualStack *bool
+
+ // SleepDelay is an override for the func the SDK will call when sleeping
+ // during the lifecycle of a request. Specifically this will be used for
+ // request delays. This value should only be used for testing. To adjust
+ // the delay of a request see the aws/client.DefaultRetryer and
+ // aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
+ SleepDelay func(time.Duration)
+
+ // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+ // Will default to false. This would only be used for empty directory names in s3 requests.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("//foo//bar//moo"),
+ // })
+ DisableRestProtocolURICleaning *bool
+
+ // EnableEndpointDiscovery will allow for endpoint discovery on operations that
+ // have the definition in its model. By default, endpoint discovery is off.
+ //
+ // Example:
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // EnableEndpointDiscovery: aws.Bool(true),
+ // }))
+ //
+ // svc := s3.New(sess)
+ // out, err := svc.GetObject(&s3.GetObjectInput {
+ // Bucket: aws.String("bucketname"),
+ // Key: aws.String("/foo/bar/moo"),
+ // })
+ EnableEndpointDiscovery *bool
+
+ // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
+ // request endpoint hosts with modeled information.
+ //
+ // Disabling this feature is useful when you want to use local endpoints
+ // for testing that do not support the modeled host prefix pattern.
+ DisableEndpointHostPrefix *bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+// // Create Session with MaxRetry configuration to be shared by multiple
+// // service clients.
+// sess := session.Must(session.NewSession(aws.NewConfig().
+// WithMaxRetries(3),
+// ))
+//
+// // Create S3 service client with a specific Region.
+// svc := s3.New(sess, aws.NewConfig().
+// WithRegion("us-west-2"),
+// )
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+ c.EndpointResolver = resolver
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = ®ion
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+
+}
+
+// WithS3DisableContentMD5Validation sets a config
+// S3DisableContentMD5Validation value returning a Config pointer for chaining.
+func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
+ c.S3DisableContentMD5Validation = &enable
+ return c
+
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+ c.UseDualStack = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// WithEndpointDiscovery will set whether or not to use endpoint discovery.
+func (c *Config) WithEndpointDiscovery(t bool) *Config {
+ c.EnableEndpointDiscovery = &t
+ return c
+}
+
+// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
+// when making requests.
+func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
+ c.DisableEndpointHostPrefix = &t
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.EndpointResolver != nil {
+ dst.EndpointResolver = other.EndpointResolver
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.S3DisableContentMD5Validation != nil {
+ dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
+ }
+
+ if other.UseDualStack != nil {
+ dst.UseDualStack = other.UseDualStack
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+
+ if other.DisableRestProtocolURICleaning != nil {
+ dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+ }
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
+
+ if other.EnableEndpointDiscovery != nil {
+ dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
+ }
+
+ if other.DisableEndpointHostPrefix != nil {
+ dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/src/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
new file mode 100644
index 000000000..2866f9a7f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
@@ -0,0 +1,37 @@
+// +build !go1.9
+
+package aws
+
+import "time"
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/src/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
new file mode 100644
index 000000000..3718b26e1
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
@@ -0,0 +1,11 @@
+// +build go1.9
+
+package aws
+
+import "context"
+
+// Context is an alias of the Go stdlib's context.Context interface.
+// It can be used within the SDK's API operation "WithContext" methods.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context = context.Context
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
new file mode 100644
index 000000000..66c5945db
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
@@ -0,0 +1,56 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
new file mode 100644
index 000000000..9c29f29af
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
@@ -0,0 +1,20 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return context.Background()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/src/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
new file mode 100644
index 000000000..304fd1561
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
@@ -0,0 +1,24 @@
+package aws
+
+import (
+ "time"
+)
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/src/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 000000000..ff5d58e06
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,387 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 000000000..f8853d78a
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,228 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ if r.Body != nil {
+ var err error
+ length, err = aws.SeekerLen(r.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
+ return
+ }
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+ Name: "core.ValidateReqSigHandler",
+ Fn: func(r *request.Request) {
+ // Unsigned requests are not signed
+ if r.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ signedTime := r.Time
+ if !r.LastSignedAt.IsZero() {
+ signedTime = r.LastSignedAt
+ }
+
+ // 5 minutes to allow for some clock skew/delays in transmission.
+ // Would be improved with aws/aws-sdk-go#423
+ if signedTime.Add(5 * time.Minute).After(time.Now()) {
+ return
+ }
+
+ fmt.Println("request expired, resigning")
+ r.Sign()
+ },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
+ }
+
+ if request.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
+ }
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 000000000..7d50b1557
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
new file mode 100644
index 000000000..ab69c7a6f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
@@ -0,0 +1,37 @@
+package corehandlers
+
+import (
+ "os"
+ "runtime"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
+// to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+const execEnvVar = `AWS_EXECUTION_ENV`
+const execEnvUAKey = `exec-env`
+
+// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
+// execution environment to the user agent.
+//
+// If the environment variable AWS_EXECUTION_ENV is set, its value will be
+// appended to the user agent string.
+var AddHostExecEnvUserAgentHander = request.NamedHandler{
+ Name: "core.AddHostExecEnvUserAgentHander",
+ Fn: func(r *request.Request) {
+ v := os.Getenv(execEnvVar)
+ if len(v) == 0 {
+ return
+ }
+
+ request.AddToUserAgent(r, execEnvUAKey+"/"+v)
+ },
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 000000000..3ad1e798d
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true.
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 000000000..894bbc7f8
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,292 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := credentials.NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := credentials.NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "fmt"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "sync"
+ "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
+// // Access public S3 buckets.
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Retrieve returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// An Expirer is an interface that Providers can implement to expose the expiration
+// time, if known. If the Provider cannot accurately provide this info,
+// it should not implement this interface.
+type Expirer interface {
+ // The time at which the credentials are no longer valid
+ ExpiresAt() time.Time
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ curTime := e.CurrentTime
+ if curTime == nil {
+ curTime = time.Now
+ }
+ return e.expiration.Before(curTime())
+}
+
+// ExpiresAt returns the expiration time of the credential
+func (e *Expiry) ExpiresAt() time.Time {
+ return e.expiration
+}
+
+// A Credentials provides concurrency safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+
+ m sync.RWMutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ // Check the cached credentials first with just the read lock.
+ c.m.RLock()
+ if !c.isExpired() {
+ creds := c.creds
+ c.m.RUnlock()
+ return creds, nil
+ }
+ c.m.RUnlock()
+
+ // Credentials are expired need to retrieve the credentials taking the full
+ // lock.
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
+
+// ExpiresAt provides access to the functionality of the Expirer interface of
+// the underlying Provider, if it supports that interface. Otherwise, it returns
+// an error.
+func (c *Credentials) ExpiresAt() (time.Time, error) {
+ c.m.RLock()
+ defer c.m.RUnlock()
+
+ expirer, ok := c.provider.(Expirer)
+ if !ok {
+ return time.Time{}, awserr.New("ProviderNotExpirer",
+ fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName),
+ nil)
+ }
+ if c.forceRefresh {
+ // set expiration time to the distant past
+ return time.Time{}, nil
+ }
+ return expirer.ExpiresAt(), nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 000000000..43d4ed386
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,180 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "iam/security-credentials/"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New(request.ErrCodeSerialization,
+ "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New(request.ErrCodeSerialization,
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644
index 000000000..c2b2c5d65
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
@@ -0,0 +1,203 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+// {
+// "AccessKeyId" : "MUA...",
+// "SecretAccessKey" : "/7PC5om....",
+// "Token" : "AQoDY....=",
+// "Expiration" : "2016-02-25T06:03:31Z"
+// }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+// {
+// "code": "ErrorCode",
+// "message": "Helpful error message."
+// }
+package endpointcreds
+
+import (
+ "encoding/json"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+ staticCreds bool
+ credentials.Expiry
+
+ // Requires a AWS Client to make HTTP requests to the endpoint with.
+ // the Endpoint the request will be made to is provided by the aws.Config's
+ // Endpoint value.
+ Client *client.Client
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // Optional authorization token value if set will be used as the value of
+ // the Authorization header of the endpoint credential request.
+ AuthorizationToken string
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+ p := &Provider{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: "CredentialsEndpoint",
+ Endpoint: endpoint,
+ },
+ handlers,
+ ),
+ }
+
+ p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+ p.Client.Handlers.Validate.Clear()
+ p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
+// from an arbitrary endpoint concurrently. The client will request the
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+ return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+ resp, err := p.getCredentials()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+ }
+
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ } else {
+ p.staticCreds = true
+ }
+
+ return credentials.Value{
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+type getCredentialsOutput struct {
+ Expiration *time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+}
+
+type errorOutput struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+ op := &request.Operation{
+ Name: "GetCredentials",
+ HTTPMethod: "GET",
+ }
+
+ out := &getCredentialsOutput{}
+ req := p.Client.NewRequest(op, nil, out)
+ req.HTTPRequest.Header.Set("Accept", "application/json")
+ if authToken := p.AuthorizationToken; len(authToken) != 0 {
+ req.HTTPRequest.Header.Set("Authorization", authToken)
+ }
+
+ return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if len(r.ClientInfo.Endpoint) == 0 {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ out := r.Data.(*getCredentialsOutput)
+ if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to decode endpoint credentials",
+ err,
+ )
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var errOut errorOutput
+ err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 000000000..54c5cf733
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,74 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644
index 000000000..7fc91d9d2
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
new file mode 100644
index 000000000..1980c8c14
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
@@ -0,0 +1,425 @@
+/*
+Package processcreds is a credential Provider to retrieve `credential_process`
+credentials.
+
+WARNING: The following describes a method of sourcing credentials from an external
+process. This can potentially be dangerous, so proceed with caution. Other
+credential providers should be preferred if at all possible. If using this
+option, you should make sure that the config file is as locked down as possible
+using security best practices for your operating system.
+
+You can use credentials from a `credential_process` in a variety of ways.
+
+One way is to setup your shared config file, located in the default
+location, with the `credential_process` key and the command you want to be
+called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
+(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
+
+ [default]
+ credential_process = /command/to/call
+
+Creating a new session will use the credential process to retrieve credentials.
+NOTE: If there are credentials in the profile you are using, the credential
+process will not be used.
+
+ // Initialize a session to load credentials.
+ sess, _ := session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1")},
+ )
+
+ // Create S3 service client to use the credentials.
+ svc := s3.New(sess)
+
+Another way to use the `credential_process` method is by using
+`credentials.NewCredentials()` and providing a command to be executed to
+retrieve credentials:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentials("/path/to/command")
+
+ // Create service client value configured for credentials.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+You can set a non-default timeout for the `credential_process` with another
+constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
+set a one minute timeout:
+
+ // Create credentials using the ProcessProvider.
+ creds := processcreds.NewCredentialsTimeout(
+ "/path/to/command",
+ time.Duration(500) * time.Millisecond)
+
+If you need more control, you can set any configurable options in the
+credentials using one or more option functions. For example, you can set a two
+minute timeout, a credential duration of 60 minutes, and a maximum stdout
+buffer size of 2k.
+
+ creds := processcreds.NewCredentials(
+ "/path/to/command",
+ func(opt *ProcessProvider) {
+ opt.Timeout = time.Duration(2) * time.Minute
+ opt.Duration = time.Duration(60) * time.Minute
+ opt.MaxBufSize = 2048
+ })
+
+You can also use your own `exec.Cmd`:
+
+ // Create an exec.Cmd
+ myCommand := exec.Command("/path/to/command")
+
+ // Create credentials using your exec.Cmd and custom timeout
+ creds := processcreds.NewCredentialsCommand(
+ myCommand,
+ func(opt *processcreds.ProcessProvider) {
+ opt.Timeout = time.Duration(1) * time.Second
+ })
+*/
+package processcreds
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+const (
+ // ProviderName is the name this credentials provider will label any
+ // returned credentials Value with.
+ ProviderName = `ProcessProvider`
+
+ // ErrCodeProcessProviderParse error parsing process output
+ ErrCodeProcessProviderParse = "ProcessProviderParseError"
+
+ // ErrCodeProcessProviderVersion version error in output
+ ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
+
+ // ErrCodeProcessProviderRequired required attribute missing in output
+ ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
+
+ // ErrCodeProcessProviderExecution execution of command failed
+ ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
+
+ // errMsgProcessProviderTimeout process took longer than allowed
+ errMsgProcessProviderTimeout = "credential process timed out"
+
+ // errMsgProcessProviderProcess process error
+ errMsgProcessProviderProcess = "error in credential_process"
+
+ // errMsgProcessProviderParse problem parsing output
+ errMsgProcessProviderParse = "parse failed of credential_process output"
+
+ // errMsgProcessProviderVersion version error in output
+ errMsgProcessProviderVersion = "wrong version in process output (not 1)"
+
+ // errMsgProcessProviderMissKey missing access key id in output
+ errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
+
+ // errMsgProcessProviderMissSecret missing secret acess key in output
+ errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
+
+ // errMsgProcessProviderPrepareCmd prepare of command failed
+ errMsgProcessProviderPrepareCmd = "failed to prepare command"
+
+ // errMsgProcessProviderEmptyCmd command must not be empty
+ errMsgProcessProviderEmptyCmd = "command must not be empty"
+
+ // errMsgProcessProviderPipe failed to initialize pipe
+ errMsgProcessProviderPipe = "failed to initialize pipe"
+
+ // DefaultDuration is the default amount of time in minutes that the
+ // credentials will be valid for.
+ DefaultDuration = time.Duration(15) * time.Minute
+
+ // DefaultBufSize limits buffer size from growing to an enormous
+ // amount due to a faulty process.
+ DefaultBufSize = 1024
+
+ // DefaultTimeout default limit on time a process can run.
+ DefaultTimeout = time.Duration(1) * time.Minute
+)
+
+// ProcessProvider satisfies the credentials.Provider interface, and is a
+// client to retrieve credentials from a process.
+type ProcessProvider struct {
+ staticCreds bool
+ credentials.Expiry
+ originalCommand []string
+
+ // Expiry duration of the credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // A string representing an os command that should return a JSON with
+ // credential information.
+ command *exec.Cmd
+
+ // MaxBufSize limits memory usage from growing to an enormous
+ // amount due to a faulty process.
+ MaxBufSize int
+
+ // Timeout limits the time a process can run.
+ Timeout time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// ProcessProvider. The credentials will expire every 15 minutes by default.
+func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: exec.Command(command),
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsTimeout returns a pointer to a new Credentials object with
+// the specified command and timeout, and default duration and max buffer size.
+func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
+ p := NewCredentials(command, func(opt *ProcessProvider) {
+ opt.Timeout = timeout
+ })
+
+ return p
+}
+
+// NewCredentialsCommand returns a pointer to a new Credentials object with
+// the specified command, and default timeout, duration and max buffer size.
+func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
+ p := &ProcessProvider{
+ command: command,
+ Duration: DefaultDuration,
+ Timeout: DefaultTimeout,
+ MaxBufSize: DefaultBufSize,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+type credentialProcessResponse struct {
+ Version int
+ AccessKeyID string `json:"AccessKeyId"`
+ SecretAccessKey string
+ SessionToken string
+ Expiration *time.Time
+}
+
+// Retrieve executes the 'credential_process' and returns the credentials.
+func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
+ out, err := p.executeCredentialProcess()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // Serialize and validate response
+ resp := &credentialProcessResponse{}
+ if err = json.Unmarshal(out, resp); err != nil {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderParse,
+ fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
+ err)
+ }
+
+ if resp.Version != 1 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderVersion,
+ errMsgProcessProviderVersion,
+ nil)
+ }
+
+ if len(resp.AccessKeyID) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissKey,
+ nil)
+ }
+
+ if len(resp.SecretAccessKey) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New(
+ ErrCodeProcessProviderRequired,
+ errMsgProcessProviderMissSecret,
+ nil)
+ }
+
+ // Handle expiration
+ p.staticCreds = resp.Expiration == nil
+ if resp.Expiration != nil {
+ p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+ }
+
+ return credentials.Value{
+ ProviderName: ProviderName,
+ AccessKeyID: resp.AccessKeyID,
+ SecretAccessKey: resp.SecretAccessKey,
+ SessionToken: resp.SessionToken,
+ }, nil
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *ProcessProvider) IsExpired() bool {
+ if p.staticCreds {
+ return false
+ }
+ return p.Expiry.IsExpired()
+}
+
+// prepareCommand prepares the command to be executed.
+func (p *ProcessProvider) prepareCommand() error {
+
+ var cmdArgs []string
+ if runtime.GOOS == "windows" {
+ cmdArgs = []string{"cmd.exe", "/C"}
+ } else {
+ cmdArgs = []string{"sh", "-c"}
+ }
+
+ if len(p.originalCommand) == 0 {
+ p.originalCommand = make([]string, len(p.command.Args))
+ copy(p.originalCommand, p.command.Args)
+
+ // check for empty command because it succeeds
+ if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
+ return awserr.New(
+ ErrCodeProcessProviderExecution,
+ fmt.Sprintf(
+ "%s: %s",
+ errMsgProcessProviderPrepareCmd,
+ errMsgProcessProviderEmptyCmd),
+ nil)
+ }
+ }
+
+ cmdArgs = append(cmdArgs, p.originalCommand...)
+ p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ p.command.Env = os.Environ()
+
+ return nil
+}
+
+// executeCredentialProcess starts the credential process on the OS and
+// returns the results or an error.
+func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
+
+ if err := p.prepareCommand(); err != nil {
+ return nil, err
+ }
+
+ // Setup the pipes
+ outReadPipe, outWritePipe, err := os.Pipe()
+ if err != nil {
+ return nil, awserr.New(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderPipe,
+ err)
+ }
+
+ p.command.Stderr = os.Stderr // display stderr on console for MFA
+ p.command.Stdout = outWritePipe // get creds json on process's stdout
+ p.command.Stdin = os.Stdin // enable stdin for MFA
+
+ output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
+
+ stdoutCh := make(chan error, 1)
+ go readInput(
+ io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
+ output,
+ stdoutCh)
+
+ execCh := make(chan error, 1)
+ go executeCommand(*p.command, execCh)
+
+ finished := false
+ var errors []error
+ for !finished {
+ select {
+ case readError := <-stdoutCh:
+ errors = appendError(errors, readError)
+ finished = true
+ case execError := <-execCh:
+ err := outWritePipe.Close()
+ errors = appendError(errors, err)
+ errors = appendError(errors, execError)
+ if errors != nil {
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderProcess,
+ errors)
+ }
+ case <-time.After(p.Timeout):
+ finished = true
+ return output.Bytes(), awserr.NewBatchError(
+ ErrCodeProcessProviderExecution,
+ errMsgProcessProviderTimeout,
+ errors) // errors can be nil
+ }
+ }
+
+ out := output.Bytes()
+
+ if runtime.GOOS == "windows" {
+ // windows adds slashes to quotes
+ out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
+ }
+
+ return out, nil
+}
+
+// appendError conveniently checks for nil before appending slice
+func appendError(errors []error, err error) []error {
+ if err != nil {
+ return append(errors, err)
+ }
+ return errors
+}
+
+func executeCommand(cmd exec.Cmd, exec chan error) {
+ // Start the command
+ err := cmd.Start()
+ if err == nil {
+ err = cmd.Wait()
+ }
+
+ exec <- err
+}
+
+func readInput(r io.Reader, w io.Writer, read chan error) {
+ tee := io.TeeReader(r, w)
+
+ _, err := ioutil.ReadAll(tee)
+
+ if err == io.EOF {
+ err = nil
+ }
+
+ read <- err // will only arrive here when write end of pipe is closed
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 000000000..e15514958
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,150 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/ini"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.OpenFile(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+
+ iniProfile, ok := config.GetSection(profile)
+ if !ok {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
+ }
+
+ id := iniProfile.String("aws_access_key_id")
+ if len(id) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ nil)
+ }
+
+ secret := iniProfile.String("aws_secret_access_key")
+ if len(secret) == 0 {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.String("aws_session_token")
+
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if home := shareddefaults.UserHomeDir(); len(home) == 0 {
+ // Backwards compatibility of home directly not found error being returned.
+ // This error is too verbose, failure when opening the file would of been
+ // a better error to return.
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = shareddefaults.SharedCredentialsFilename()
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 000000000..531139e39
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,55 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+ return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ if len(s.Value.ProviderName) == 0 {
+ s.Value.ProviderName = StaticProviderName
+ }
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644
index 000000000..b6dbfd246
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -0,0 +1,313 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/internal/sdkrand"
+ "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+ AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+ credentials.Expiry
+
+ // STS client to make assume role request with.
+ Client AssumeRoler
+
+ // Role to be assumed.
+ RoleARN string
+
+ // Session name, if you wish to reuse the credentials elsewhere.
+ RoleSessionName string
+
+ // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+ Duration time.Duration
+
+ // Optional ExternalID to pass along, defaults to nil if not set.
+ ExternalID *string
+
+ // The policy plain text must be 2048 bytes or shorter. However, an internal
+ // conversion compresses it into a packed binary format with a separate limit.
+ // The PackedPolicySize response element indicates by percentage how close to
+ // the upper size limit the policy is, with 100% equaling the maximum allowed
+ // size.
+ Policy *string
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ SerialNumber *string
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
+ TokenCode *string
+
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+
+ // MaxJitterFrac reduces the effective Duration of each credential requested
+ // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
+ // have a value between 0 and 1. Any other value may lead to expected behavior.
+ // With a MaxJitterFrac value of 0, default) will no jitter will be used.
+ //
+ // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
+ // AssumeRole call will be made with an arbitrary Duration between 27m and
+ // 30m.
+ //
+ // MaxJitterFrac should not be negative.
+ MaxJitterFrac float64
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: sts.New(c),
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+ p := &AssumeRoleProvider{
+ Client: svc,
+ RoleARN: roleARN,
+ Duration: DefaultDuration,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+ // Apply defaults where parameters are not set.
+ if p.RoleSessionName == "" {
+ // Try to work out a role name that will hopefully end up unique.
+ p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+ }
+ if p.Duration == 0 {
+ // Expire as often as AWS permits.
+ p.Duration = DefaultDuration
+ }
+ jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
+ input := &sts.AssumeRoleInput{
+ DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
+ RoleArn: aws.String(p.RoleARN),
+ RoleSessionName: aws.String(p.RoleSessionName),
+ ExternalId: p.ExternalID,
+ }
+ if p.Policy != nil {
+ input.Policy = p.Policy
+ }
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
+ }
+
+ roleOutput, err := p.Client.AssumeRole(input)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ // We will proactively generate new credentials before they expire.
+ p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: *roleOutput.Credentials.AccessKeyId,
+ SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+ SessionToken: *roleOutput.Credentials.SessionToken,
+ ProviderName: ProviderName,
+ }, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
new file mode 100644
index 000000000..152d785b3
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
@@ -0,0 +1,46 @@
+// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
+// via UDP connection. Using the Start function will enable the reporting of
+// metrics on a given port. If Start is called, with different parameters, again,
+// a panic will occur.
+//
+// Pause can be called to pause any metrics publishing on a given port. Sessions
+// that have had their handlers modified via InjectHandlers may still be used.
+// However, the handlers will act as a no-op meaning no metrics will be published.
+//
+// Example:
+// r, err := csm.Start("clientID", ":31000")
+// if err != nil {
+// panic(fmt.Errorf("failed starting CSM: %v", err))
+// }
+//
+// sess, err := session.NewSession(&aws.Config{})
+// if err != nil {
+// panic(fmt.Errorf("failed loading session: %v", err))
+// }
+//
+// r.InjectHandlers(&sess.Handlers)
+//
+// client := s3.New(sess)
+// resp, err := client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Will pause monitoring
+// r.Pause()
+// resp, err = client.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+//
+// // Resume monitoring
+// r.Continue()
+//
+// Start returns a Reporter that is used to enable or disable monitoring. If
+// access to the Reporter is required later, calling Get will return the Reporter
+// singleton.
+//
+// Example:
+// r := csm.Get()
+// r.Continue()
+package csm
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
new file mode 100644
index 000000000..2f0c6eac9
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
@@ -0,0 +1,67 @@
+package csm
+
+import (
+ "fmt"
+ "sync"
+)
+
+var (
+ lock sync.Mutex
+)
+
+// Client side metric handler names
+const (
+ APICallMetricHandlerName = "awscsm.SendAPICallMetric"
+ APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
+)
+
+// Start will start the a long running go routine to capture
+// client side metrics. Calling start multiple time will only
+// start the metric listener once and will panic if a different
+// client ID or port is passed in.
+//
+// Example:
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+// sess := session.NewSession()
+// r.InjectHandlers(sess.Handlers)
+//
+// svc := s3.New(sess)
+// out, err := svc.GetObject(&s3.GetObjectInput{
+// Bucket: aws.String("bucket"),
+// Key: aws.String("key"),
+// })
+func Start(clientID string, url string) (*Reporter, error) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if sender == nil {
+ sender = newReporter(clientID, url)
+ } else {
+ if sender.clientID != clientID {
+ panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
+ }
+
+ if sender.url != url {
+ panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
+ }
+ }
+
+ if err := connect(url); err != nil {
+ sender = nil
+ return nil, err
+ }
+
+ return sender, nil
+}
+
+// Get will return a reporter if one exists, if one does not exist, nil will
+// be returned.
+func Get() *Reporter {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return sender
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
new file mode 100644
index 000000000..5bacc791a
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
@@ -0,0 +1,109 @@
+package csm
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+type metricTime time.Time
+
+func (t metricTime) MarshalJSON() ([]byte, error) {
+ ns := time.Duration(time.Time(t).UnixNano())
+ return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
+}
+
+type metric struct {
+ ClientID *string `json:"ClientId,omitempty"`
+ API *string `json:"Api,omitempty"`
+ Service *string `json:"Service,omitempty"`
+ Timestamp *metricTime `json:"Timestamp,omitempty"`
+ Type *string `json:"Type,omitempty"`
+ Version *int `json:"Version,omitempty"`
+
+ AttemptCount *int `json:"AttemptCount,omitempty"`
+ Latency *int `json:"Latency,omitempty"`
+
+ Fqdn *string `json:"Fqdn,omitempty"`
+ UserAgent *string `json:"UserAgent,omitempty"`
+ AttemptLatency *int `json:"AttemptLatency,omitempty"`
+
+ SessionToken *string `json:"SessionToken,omitempty"`
+ Region *string `json:"Region,omitempty"`
+ AccessKey *string `json:"AccessKey,omitempty"`
+ HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
+ XAmzID2 *string `json:"XAmzId2,omitempty"`
+ XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
+
+ AWSException *string `json:"AwsException,omitempty"`
+ AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
+ SDKException *string `json:"SdkException,omitempty"`
+ SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
+
+ FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
+ FinalAWSException *string `json:"FinalAwsException,omitempty"`
+ FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
+ FinalSDKException *string `json:"FinalSdkException,omitempty"`
+ FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
+
+ DestinationIP *string `json:"DestinationIp,omitempty"`
+ ConnectionReused *int `json:"ConnectionReused,omitempty"`
+
+ AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
+ ConnectLatency *int `json:"ConnectLatency,omitempty"`
+ RequestLatency *int `json:"RequestLatency,omitempty"`
+ DNSLatency *int `json:"DnsLatency,omitempty"`
+ TCPLatency *int `json:"TcpLatency,omitempty"`
+ SSLLatency *int `json:"SslLatency,omitempty"`
+
+ MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
+}
+
+func (m *metric) TruncateFields() {
+ m.ClientID = truncateString(m.ClientID, 255)
+ m.UserAgent = truncateString(m.UserAgent, 256)
+
+ m.AWSException = truncateString(m.AWSException, 128)
+ m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
+
+ m.SDKException = truncateString(m.SDKException, 128)
+ m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
+
+ m.FinalAWSException = truncateString(m.FinalAWSException, 128)
+ m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
+
+ m.FinalSDKException = truncateString(m.FinalSDKException, 128)
+ m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
+}
+
+func truncateString(v *string, l int) *string {
+ if v != nil && len(*v) > l {
+ nv := (*v)[:l]
+ return &nv
+ }
+
+ return v
+}
+
+func (m *metric) SetException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.AWSException = aws.String(te.exception)
+ m.AWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.SDKException = aws.String(te.exception)
+ m.SDKExceptionMessage = aws.String(te.message)
+ }
+}
+
+func (m *metric) SetFinalException(e metricException) {
+ switch te := e.(type) {
+ case awsException:
+ m.FinalAWSException = aws.String(te.exception)
+ m.FinalAWSExceptionMessage = aws.String(te.message)
+ case sdkException:
+ m.FinalSDKException = aws.String(te.exception)
+ m.FinalSDKExceptionMessage = aws.String(te.message)
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
new file mode 100644
index 000000000..514fc3739
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
@@ -0,0 +1,54 @@
+package csm
+
+import (
+ "sync/atomic"
+)
+
+const (
+ runningEnum = iota
+ pausedEnum
+)
+
+var (
+ // MetricsChannelSize of metrics to hold in the channel
+ MetricsChannelSize = 100
+)
+
+type metricChan struct {
+ ch chan metric
+ paused int64
+}
+
+func newMetricChan(size int) metricChan {
+ return metricChan{
+ ch: make(chan metric, size),
+ }
+}
+
+func (ch *metricChan) Pause() {
+ atomic.StoreInt64(&ch.paused, pausedEnum)
+}
+
+func (ch *metricChan) Continue() {
+ atomic.StoreInt64(&ch.paused, runningEnum)
+}
+
+func (ch *metricChan) IsPaused() bool {
+ v := atomic.LoadInt64(&ch.paused)
+ return v == pausedEnum
+}
+
+// Push will push metrics to the metric channel if the channel
+// is not paused
+func (ch *metricChan) Push(m metric) bool {
+ if ch.IsPaused() {
+ return false
+ }
+
+ select {
+ case ch.ch <- m:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
new file mode 100644
index 000000000..54a99280c
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
@@ -0,0 +1,26 @@
+package csm
+
+type metricException interface {
+ Exception() string
+ Message() string
+}
+
+type requestException struct {
+ exception string
+ message string
+}
+
+func (e requestException) Exception() string {
+ return e.exception
+}
+func (e requestException) Message() string {
+ return e.message
+}
+
+type awsException struct {
+ requestException
+}
+
+type sdkException struct {
+ requestException
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
new file mode 100644
index 000000000..d9aa5b062
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
@@ -0,0 +1,260 @@
+package csm
+
+import (
+ "encoding/json"
+ "net"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const (
+ // DefaultPort is used when no port is specified
+ DefaultPort = "31000"
+)
+
+// Reporter will gather metrics of API requests made and
+// send those metrics to the CSM endpoint.
+type Reporter struct {
+ clientID string
+ url string
+ conn net.Conn
+ metricsCh metricChan
+ done chan struct{}
+}
+
+var (
+ sender *Reporter
+)
+
+func connect(url string) error {
+ const network = "udp"
+ if err := sender.connect(network, url); err != nil {
+ return err
+ }
+
+ if sender.done == nil {
+ sender.done = make(chan struct{})
+ go sender.start()
+ }
+
+ return nil
+}
+
+func newReporter(clientID, url string) *Reporter {
+ return &Reporter{
+ clientID: clientID,
+ url: url,
+ metricsCh: newMetricChan(MetricsChannelSize),
+ }
+}
+
+func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ creds, _ := r.Config.Credentials.Get()
+
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Region: r.Config.Region,
+ Type: aws.String("ApiCallAttempt"),
+ Version: aws.Int(1),
+
+ XAmzRequestID: aws.String(r.RequestID),
+
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
+ AccessKey: aws.String(creds.AccessKeyID),
+ }
+
+ if r.HTTPResponse != nil {
+ m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+ rep.metricsCh.Push(m)
+}
+
+func getMetricException(err awserr.Error) metricException {
+ msg := err.Error()
+ code := err.Code()
+
+ switch code {
+ case "RequestError",
+ request.ErrCodeSerialization,
+ request.CanceledErrorCode:
+ return sdkException{
+ requestException{exception: code, message: msg},
+ }
+ default:
+ return awsException{
+ requestException{exception: code, message: msg},
+ }
+ }
+}
+
+func (rep *Reporter) sendAPICallMetric(r *request.Request) {
+ if rep == nil {
+ return
+ }
+
+ now := time.Now()
+ m := metric{
+ ClientID: aws.String(rep.clientID),
+ API: aws.String(r.Operation.Name),
+ Service: aws.String(r.ClientInfo.ServiceID),
+ Timestamp: (*metricTime)(&now),
+ UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
+ Type: aws.String("ApiCall"),
+ AttemptCount: aws.Int(r.RetryCount + 1),
+ Region: r.Config.Region,
+ Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
+ XAmzRequestID: aws.String(r.RequestID),
+ MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
+ }
+
+ if r.HTTPResponse != nil {
+ m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
+ }
+
+ if r.Error != nil {
+ if awserr, ok := r.Error.(awserr.Error); ok {
+ m.SetFinalException(getMetricException(awserr))
+ }
+ }
+
+ m.TruncateFields()
+
+ // TODO: Probably want to figure something out for logging dropped
+ // metrics
+ rep.metricsCh.Push(m)
+}
+
+func (rep *Reporter) connect(network, url string) error {
+ if rep.conn != nil {
+ rep.conn.Close()
+ }
+
+ conn, err := net.Dial(network, url)
+ if err != nil {
+ return awserr.New("UDPError", "Could not connect", err)
+ }
+
+ rep.conn = conn
+
+ return nil
+}
+
+func (rep *Reporter) close() {
+ if rep.done != nil {
+ close(rep.done)
+ }
+
+ rep.metricsCh.Pause()
+}
+
+func (rep *Reporter) start() {
+ defer func() {
+ rep.metricsCh.Pause()
+ }()
+
+ for {
+ select {
+ case <-rep.done:
+ rep.done = nil
+ return
+ case m := <-rep.metricsCh.ch:
+ // TODO: What to do with this error? Probably should just log
+ b, err := json.Marshal(m)
+ if err != nil {
+ continue
+ }
+
+ rep.conn.Write(b)
+ }
+ }
+}
+
+// Pause will pause the metric channel preventing any new metrics from
+// being added.
+func (rep *Reporter) Pause() {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if rep == nil {
+ return
+ }
+
+ rep.close()
+}
+
+// Continue will reopen the metric channel and allow for monitoring
+// to be resumed.
+func (rep *Reporter) Continue() {
+ lock.Lock()
+ defer lock.Unlock()
+ if rep == nil {
+ return
+ }
+
+ if !rep.metricsCh.IsPaused() {
+ return
+ }
+
+ rep.metricsCh.Continue()
+}
+
+// InjectHandlers will will enable client side metrics and inject the proper
+// handlers to handle how metrics are sent.
+//
+// Example:
+// // Start must be called in order to inject the correct handlers
+// r, err := csm.Start("clientID", "127.0.0.1:8094")
+// if err != nil {
+// panic(fmt.Errorf("expected no error, but received %v", err))
+// }
+//
+// sess := session.NewSession()
+// r.InjectHandlers(&sess.Handlers)
+//
+// // create a new service client with our client side metric session
+// svc := s3.New(sess)
+func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
+ if rep == nil {
+ return
+ }
+
+ handlers.Complete.PushFrontNamed(request.NamedHandler{
+ Name: APICallMetricHandlerName,
+ Fn: rep.sendAPICallMetric,
+ })
+
+ handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
+ Name: APICallAttemptMetricHandlerName,
+ Fn: rep.sendAPICallAttemptMetric,
+ })
+}
+
+// boolIntValue return 1 for true and 0 for false.
+func boolIntValue(b bool) int {
+ if b {
+ return 1
+ }
+
+ return 0
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/src/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 000000000..23bb639e0
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,207 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: CredProviders(cfg, handlers),
+ })
+}
+
+// CredProviders returns the slice of providers used in
+// the default credential chain.
+//
+// For applications that need to use some other provider (for example use
+// different environment variables for legacy reasons) but still fall back
+// on the default chain of providers. This allows that default chaint to be
+// automatically updated
+func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
+ return []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ RemoteCredProvider(*cfg, handlers),
+ }
+}
+
+const (
+ httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
+// endpoints such as EC2 or ECS Roles.
+func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+ return localHTTPCredProvider(cfg, handlers, u)
+ }
+
+ if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
+ u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
+ return httpCredProvider(cfg, handlers, u)
+ }
+
+ return ec2RoleProvider(cfg, handlers)
+}
+
+var lookupHostFn = net.LookupHost
+
+func isLoopbackHost(host string) (bool, error) {
+ ip := net.ParseIP(host)
+ if ip != nil {
+ return ip.IsLoopback(), nil
+ }
+
+ // Host is not an ip, perform lookup
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+ for _, addr := range addrs {
+ if !net.ParseIP(addr).IsLoopback() {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else {
+ host := aws.URLHostname(parsed)
+ if len(host) == 0 {
+ errMsg = "unable to parse host from local HTTP cred provider URL"
+ } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
+ errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr)
+ } else if !isLoopback {
+ errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host)
+ }
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
+ func(p *endpointcreds.Provider) {
+ p.ExpiryWindow = 5 * time.Minute
+ p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
+ },
+ )
+}
+
+func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+ resolver := cfg.EndpointResolver
+ if resolver == nil {
+ resolver = endpoints.DefaultResolver()
+ }
+
+ e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
+ return &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
+ ExpiryWindow: 5 * time.Minute,
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/src/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
new file mode 100644
index 000000000..ca0ee1dcc
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
@@ -0,0 +1,27 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return shareddefaults.SharedCredentialsFilename()
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return shareddefaults.SharedConfigFilename()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/src/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100644
index 000000000..4fcb61618
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 000000000..2c8d5f56d
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,169 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkuri"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+ op := &request.Operation{
+ Name: "GetUserData",
+ HTTPMethod: "GET",
+ HTTPPath: "/user-data",
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == http.StatusNotFound {
+ r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+ }
+ })
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: sdkuri.PathJoin("/dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+ err := req.Send()
+
+ return output.Content, err
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New(request.ErrCodeSerialization,
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ if len(resp) == 0 {
+ return "", awserr.New("EC2MetadataError", "invalid Region response", nil)
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 000000000..f0c1d31e7
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,152 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+//
+// This package's client can be disabled completely by setting the environment
+// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
+// true instructs the SDK to disable the EC2 Metadata client. The client cannot
+// be used while the environment variable is set to true, (case insensitive).
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Disable the EC2 Metadata service if the environment variable is set.
+ // This shortcirctes the service's functionality to always fail to send
+ // requests.
+ if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
+ svc.Handlers.Send.SwapNamed(request.NamedHandler{
+ Name: corehandlers.SendHandler.Name,
+ Fn: func(r *request.Request) {
+ r.HTTPResponse = &http.Response{
+ Header: http.Header{},
+ }
+ r.Error = awserr.New(
+ request.CanceledErrorCode,
+ "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
+ nil)
+ },
+ })
+ }
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644
index 000000000..87b9ff3ff
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
@@ -0,0 +1,188 @@
+package endpoints
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+ SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+// resolver, err := endpoints.DecodeModel(reader)
+//
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+ var opts DecodeModelOptions
+ opts.Set(optFns...)
+
+ // Get the version of the partition file to determine what
+ // unmarshaling model to use.
+ modelDef := modelDefinition{}
+ if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ var version string
+ if b, ok := modelDef["version"]; ok {
+ version = string(b)
+ } else {
+ return nil, newDecodeModelError("endpoints version not found in model", nil)
+ }
+
+ if version == "3" {
+ return decodeV3Endpoints(modelDef, opts)
+ }
+
+ return nil, newDecodeModelError(
+ fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+ b, ok := modelDef["partitions"]
+ if !ok {
+ return nil, newDecodeModelError("endpoints model missing partitions", nil)
+ }
+
+ ps := partitions{}
+ if err := json.Unmarshal(b, &ps); err != nil {
+ return nil, newDecodeModelError("failed to decode endpoints model", err)
+ }
+
+ if opts.SkipCustomizations {
+ return ps, nil
+ }
+
+ // Customization
+ for i := 0; i < len(ps); i++ {
+ p := &ps[i]
+ custAddEC2Metadata(p)
+ custAddS3DualStack(p)
+ custRmIotDataService(p)
+ custFixAppAutoscalingChina(p)
+ custFixAppAutoscalingUsGov(p)
+ }
+
+ return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+ if p.ID != "aws" {
+ return
+ }
+
+ custAddDualstack(p, "s3")
+ custAddDualstack(p, "s3-control")
+}
+
+func custAddDualstack(p *partition, svcName string) {
+ s, ok := p.Services[svcName]
+ if !ok {
+ return
+ }
+
+ s.Defaults.HasDualStack = boxedTrue
+ s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+ p.Services[svcName] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+ p.Services["ec2metadata"] = service{
+ IsRegionalized: boxedFalse,
+ PartitionEndpoint: "aws-global",
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ }
+}
+
+func custRmIotDataService(p *partition) {
+ delete(p.Services, "data.iot")
+}
+
+func custFixAppAutoscalingChina(p *partition) {
+ if p.ID != "aws-cn" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ const expectHostname = `autoscaling.{region}.amazonaws.com`
+ if e, a := s.Defaults.Hostname, expectHostname; e != a {
+ fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
+ return
+ }
+
+ s.Defaults.Hostname = expectHostname + ".cn"
+ p.Services[serviceName] = s
+}
+
+func custFixAppAutoscalingUsGov(p *partition) {
+ if p.ID != "aws-us-gov" {
+ return
+ }
+
+ const serviceName = "application-autoscaling"
+ s, ok := p.Services[serviceName]
+ if !ok {
+ return
+ }
+
+ if a := s.Defaults.CredentialScope.Service; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
+ return
+ }
+
+ if a := s.Defaults.Hostname; a != "" {
+ fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
+ return
+ }
+
+ s.Defaults.CredentialScope.Service = "application-autoscaling"
+ s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com"
+
+ p.Services[serviceName] = s
+}
+
+type decodeModelError struct {
+ awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+ return decodeModelError{
+ awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644
index 000000000..6975dc4ba
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -0,0 +1,4460 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+// Partition identifiers
+const (
+ AwsPartitionID = "aws" // AWS Standard partition.
+ AwsCnPartitionID = "aws-cn" // AWS China partition.
+ AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+ ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
+ ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+ ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+ ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
+ ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+ ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+ CaCentral1RegionID = "ca-central-1" // Canada (Central).
+ EuCentral1RegionID = "eu-central-1" // EU (Frankfurt).
+ EuNorth1RegionID = "eu-north-1" // EU (Stockholm).
+ EuWest1RegionID = "eu-west-1" // EU (Ireland).
+ EuWest2RegionID = "eu-west-2" // EU (London).
+ EuWest3RegionID = "eu-west-3" // EU (Paris).
+ SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
+ UsEast1RegionID = "us-east-1" // US East (N. Virginia).
+ UsEast2RegionID = "us-east-2" // US East (Ohio).
+ UsWest1RegionID = "us-west-1" // US West (N. California).
+ UsWest2RegionID = "us-west-2" // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+ CnNorth1RegionID = "cn-north-1" // China (Beijing).
+ CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+ UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
+ UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// partitions := endpoints.DefaultPartitions
+// for _, p := range partitions {
+// // ... inspect partitions
+// }
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+ awsPartition,
+ awscnPartition,
+ awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+ return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+ ID: "aws",
+ Name: "AWS Standard",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "ap-east-1": region{
+ Description: "Asia Pacific (Hong Kong)",
+ },
+ "ap-northeast-1": region{
+ Description: "Asia Pacific (Tokyo)",
+ },
+ "ap-northeast-2": region{
+ Description: "Asia Pacific (Seoul)",
+ },
+ "ap-south-1": region{
+ Description: "Asia Pacific (Mumbai)",
+ },
+ "ap-southeast-1": region{
+ Description: "Asia Pacific (Singapore)",
+ },
+ "ap-southeast-2": region{
+ Description: "Asia Pacific (Sydney)",
+ },
+ "ca-central-1": region{
+ Description: "Canada (Central)",
+ },
+ "eu-central-1": region{
+ Description: "EU (Frankfurt)",
+ },
+ "eu-north-1": region{
+ Description: "EU (Stockholm)",
+ },
+ "eu-west-1": region{
+ Description: "EU (Ireland)",
+ },
+ "eu-west-2": region{
+ Description: "EU (London)",
+ },
+ "eu-west-3": region{
+ Description: "EU (Paris)",
+ },
+ "sa-east-1": region{
+ Description: "South America (Sao Paulo)",
+ },
+ "us-east-1": region{
+ Description: "US East (N. Virginia)",
+ },
+ "us-east-2": region{
+ Description: "US East (Ohio)",
+ },
+ "us-west-1": region{
+ Description: "US West (N. California)",
+ },
+ "us-west-2": region{
+ Description: "US West (Oregon)",
+ },
+ },
+ Services: services{
+ "a4b": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "acm": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "api.ecr.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "ap-northeast-1": endpoint{
+ Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "api.ecr.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "api.ecr.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "api.ecr.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "api.ecr.eu-north-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "api.ecr.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "api.ecr.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "api.ecr.eu-west-3.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "api.ecr.sa-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "api.ecr.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "api.ecr.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "api.ecr.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "api.ecr.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "api.mediatailor": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "api.pricing": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "pricing",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appmesh": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appstream2": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Service: "appstream",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "appsync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "autoscaling-plans": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "autoscaling-plans",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "backup": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "batch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "budgets": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "budgets.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "ce": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "ce.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "chime": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "service.chime.aws.amazon.com",
+ Protocols: []string{"https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloud9": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "cloudfront.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudsearch": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codebuild-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "codepipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-idp": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cognito-sync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "comprehendmedical": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "cur": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "data.mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datapipeline": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "datasync": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dax": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "devicefarm": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "discovery": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "docdb": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.{service}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{region}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "elastictranscoder": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "email": service{
+
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips": endpoint{
+ Hostname: "es-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fms": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "fsx": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "groundstation": service{
+
+ Endpoints: endpoints{
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "health": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "iam.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "importexport": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "importexport.amazonaws.com",
+ SignatureVersions: []string{"v2", "v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ Service: "IngestionService",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "iotthingsgraph": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "iotthingsgraph",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kafka": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisanalytics": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kinesisvideo": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.ca-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "lightsail": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "machinelearning": service{
+
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ },
+ },
+ "marketplacecommerceanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "mediaconnect": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "medialive": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediapackage": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mediastore": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mgh": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
+ "mobileanalytics": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mq": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
+ "neptune": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "rds.ap-northeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "rds.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "rds.ap-south-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "rds.ap-southeast-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "rds.ap-southeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "rds.eu-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "rds.eu-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "rds.eu-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "rds.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "rds.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "rds.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "opsworks": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "opsworks-cm": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "pinpoint": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "mobiletargeting",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "projects.iot1click": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ram": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "{service}.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "resource-groups": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "robomaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "route53": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "route53.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "route53domains": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "route53resolver": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "s3": service{
+ PartitionEndpoint: "us-east-1",
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{
+ Hostname: "s3.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{
+ Hostname: "s3.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{
+ Hostname: "s3.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "s3-external-1": endpoint{
+ Hostname: "s3-external-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{
+ Hostname: "s3.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+
+ HasDualStack: boxedTrue,
+ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Hostname: "s3-control.ap-northeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-1",
+ },
+ },
+ "ap-northeast-2": endpoint{
+ Hostname: "s3-control.ap-northeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{
+ Hostname: "s3-control.ap-south-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-south-1",
+ },
+ },
+ "ap-southeast-1": endpoint{
+ Hostname: "s3-control.ap-southeast-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-1",
+ },
+ },
+ "ap-southeast-2": endpoint{
+ Hostname: "s3-control.ap-southeast-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ap-southeast-2",
+ },
+ },
+ "ca-central-1": endpoint{
+ Hostname: "s3-control.ca-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "ca-central-1",
+ },
+ },
+ "eu-central-1": endpoint{
+ Hostname: "s3-control.eu-central-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-central-1",
+ },
+ },
+ "eu-north-1": endpoint{
+ Hostname: "s3-control.eu-north-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-north-1",
+ },
+ },
+ "eu-west-1": endpoint{
+ Hostname: "s3-control.eu-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-1",
+ },
+ },
+ "eu-west-2": endpoint{
+ Hostname: "s3-control.eu-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-2",
+ },
+ },
+ "eu-west-3": endpoint{
+ Hostname: "s3-control.eu-west-3.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "eu-west-3",
+ },
+ },
+ "sa-east-1": endpoint{
+ Hostname: "s3-control.sa-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "sa-east-1",
+ },
+ },
+ "us-east-1": endpoint{
+ Hostname: "s3-control.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{
+ Hostname: "s3-control.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-east-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-east-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{
+ Hostname: "s3-control.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{
+ Hostname: "s3-control.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "us-west-2-fips": endpoint{
+ Hostname: "s3-control-fips.us-west-2.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "sdb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v2"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ Hostname: "sdb.amazonaws.com",
+ },
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "secretsmanager": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "secretsmanager-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "securityhub": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "serverlessrepo": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-northeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-south-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ap-southeast-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "ca-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-central-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-north-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "eu-west-3": endpoint{
+ Protocols: []string{"https"},
+ },
+ "sa-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-east-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ "us-west-2": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "servicecatalog": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "servicediscovery": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "shield": service{
+ IsRegionalized: boxedFalse,
+ Defaults: endpoint{
+ SSLCommonName: "shield.us-east-1.amazonaws.com",
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "fips-us-east-1": endpoint{
+ Hostname: "sqs-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "fips-us-east-2": endpoint{
+ Hostname: "sqs-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "fips-us-west-1": endpoint{
+ Hostname: "sqs-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "fips-us-west-2": endpoint{
+ Hostname: "sqs-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{
+ SSLCommonName: "queue.{dnsSuffix}",
+ },
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "local": endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "sts": service{
+ PartitionEndpoint: "aws-global",
+ Defaults: endpoint{
+ Hostname: "sts.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{
+ Hostname: "sts.ap-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-east-1",
+ },
+ },
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{
+ Hostname: "sts.ap-northeast-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "ap-northeast-2",
+ },
+ },
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "aws-global": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "support": service{
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "transfer": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "translate-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "translate-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "translate-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
+ "waf": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "waf.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workmail": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "xray": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-north-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "eu-west-3": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+ return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+ ID: "aws-cn",
+ Name: "AWS China",
+ DNSSuffix: "amazonaws.com.cn",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "cn-north-1": region{
+ Description: "China (Beijing)",
+ },
+ "cn-northwest-1": region{
+ Description: "China (Ningxia)",
+ },
+ },
+ Services: services{
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cloudfront": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codebuild": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "cognito-identity": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "elasticmapreduce": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "gamelift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "glacier": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-cn-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-cn-global": endpoint{
+ Hostname: "iam.cn-north-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{
+ Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "monitoring": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{
+ Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ "cn-northwest-1": endpoint{
+ Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "sns": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sqs": service{
+ Defaults: endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ "cn-northwest-1": endpoint{},
+ },
+ },
+ },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+ return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+ ID: "aws-us-gov",
+ Name: "AWS GovCloud (US)",
+ DNSSuffix: "amazonaws.com",
+ RegionRegex: regionRegex{
+ Regexp: func() *regexp.Regexp {
+ reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+ return reg
+ }(),
+ },
+ Defaults: endpoint{
+ Hostname: "{service}.{region}.{dnsSuffix}",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ Regions: regions{
+ "us-gov-east-1": region{
+ Description: "AWS GovCloud (US-East)",
+ },
+ "us-gov-west-1": region{
+ Description: "AWS GovCloud (US)",
+ },
+ },
+ Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "acm-pca": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "api.ecr": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "api.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "athena": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "autoscaling": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "clouddirectory": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudformation": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudhsmv2": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "cloudhsm",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "cloudtrail": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codecommit": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "comprehend": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "config": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "directconnect": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ds": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "dynamodb": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "ec2": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "ec2metadata": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "169.254.169.254/latest",
+ Protocols: []string{"http"},
+ },
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticache": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticbeanstalk": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticfilesystem": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "elasticloadbalancing": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "elasticmapreduce": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"https"},
+ },
+ },
+ },
+ "es": service{
+
+ Endpoints: endpoints{
+ "fips": endpoint{
+ Hostname: "es-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "firehose": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "glacier": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "glue": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "guardduty": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iam": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "iam.us-gov.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "inspector": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "kms": service{
+
+ Endpoints: endpoints{
+ "ProdFips": endpoint{
+ Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "license-manager": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "logs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "mediaconvert": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "metering.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "monitoring": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "organizations": service{
+ PartitionEndpoint: "aws-us-gov-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-us-gov-global": endpoint{
+ Hostname: "organizations.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "polly": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rds": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "redshift": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "runtime.sagemaker": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "s3": service{
+ Defaults: endpoint{
+ SignatureVersions: []string{"s3", "s3v4"},
+ },
+ Endpoints: endpoints{
+ "fips-us-gov-west-1": endpoint{
+ Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-east-1": endpoint{
+ Hostname: "s3.us-gov-east-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3.us-gov-west-1.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "s3-control": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"s3v4"},
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{
+ Hostname: "s3-control.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-east-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-east-1",
+ },
+ },
+ "us-gov-west-1": endpoint{
+ Hostname: "s3-control.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
+ SignatureVersions: []string{"s3v4"},
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sms": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "sns": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "sqs": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{
+ SSLCommonName: "{region}.queue.{dnsSuffix}",
+ Protocols: []string{"http", "https"},
+ },
+ },
+ },
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "states": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "storagegateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "streams.dynamodb": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "dynamodb",
+ },
+ },
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "sts": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "swf": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "us-gov-east-1": endpoint{},
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "translate": service{
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ "us-gov-west-1-fips": endpoint{
+ Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-gov-west-1",
+ },
+ },
+ },
+ },
+ "waf-regional": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "workspaces": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ },
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
new file mode 100644
index 000000000..ca8fc828e
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
@@ -0,0 +1,141 @@
+package endpoints
+
+// Service identifiers
+//
+// Deprecated: Use client package's EndpointsID value instead of these
+// ServiceIDs. These IDs are not maintained, and are out of date.
+const (
+ A4bServiceID = "a4b" // A4b.
+ AcmServiceID = "acm" // Acm.
+ AcmPcaServiceID = "acm-pca" // AcmPca.
+ ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
+ ApiPricingServiceID = "api.pricing" // ApiPricing.
+ ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
+ ApigatewayServiceID = "apigateway" // Apigateway.
+ ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
+ Appstream2ServiceID = "appstream2" // Appstream2.
+ AppsyncServiceID = "appsync" // Appsync.
+ AthenaServiceID = "athena" // Athena.
+ AutoscalingServiceID = "autoscaling" // Autoscaling.
+ AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
+ BatchServiceID = "batch" // Batch.
+ BudgetsServiceID = "budgets" // Budgets.
+ CeServiceID = "ce" // Ce.
+ ChimeServiceID = "chime" // Chime.
+ Cloud9ServiceID = "cloud9" // Cloud9.
+ ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
+ CloudformationServiceID = "cloudformation" // Cloudformation.
+ CloudfrontServiceID = "cloudfront" // Cloudfront.
+ CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
+ CloudsearchServiceID = "cloudsearch" // Cloudsearch.
+ CloudtrailServiceID = "cloudtrail" // Cloudtrail.
+ CodebuildServiceID = "codebuild" // Codebuild.
+ CodecommitServiceID = "codecommit" // Codecommit.
+ CodedeployServiceID = "codedeploy" // Codedeploy.
+ CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
+ CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
+ CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
+ CognitoSyncServiceID = "cognito-sync" // CognitoSync.
+ ComprehendServiceID = "comprehend" // Comprehend.
+ ConfigServiceID = "config" // Config.
+ CurServiceID = "cur" // Cur.
+ DatapipelineServiceID = "datapipeline" // Datapipeline.
+ DaxServiceID = "dax" // Dax.
+ DevicefarmServiceID = "devicefarm" // Devicefarm.
+ DirectconnectServiceID = "directconnect" // Directconnect.
+ DiscoveryServiceID = "discovery" // Discovery.
+ DmsServiceID = "dms" // Dms.
+ DsServiceID = "ds" // Ds.
+ DynamodbServiceID = "dynamodb" // Dynamodb.
+ Ec2ServiceID = "ec2" // Ec2.
+ Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
+ EcrServiceID = "ecr" // Ecr.
+ EcsServiceID = "ecs" // Ecs.
+ ElasticacheServiceID = "elasticache" // Elasticache.
+ ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
+ ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
+ ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
+ ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
+ ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
+ EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
+ EsServiceID = "es" // Es.
+ EventsServiceID = "events" // Events.
+ FirehoseServiceID = "firehose" // Firehose.
+ FmsServiceID = "fms" // Fms.
+ GameliftServiceID = "gamelift" // Gamelift.
+ GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
+ GuarddutyServiceID = "guardduty" // Guardduty.
+ HealthServiceID = "health" // Health.
+ IamServiceID = "iam" // Iam.
+ ImportexportServiceID = "importexport" // Importexport.
+ InspectorServiceID = "inspector" // Inspector.
+ IotServiceID = "iot" // Iot.
+ IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
+ KinesisServiceID = "kinesis" // Kinesis.
+ KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
+ KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
+ KmsServiceID = "kms" // Kms.
+ LambdaServiceID = "lambda" // Lambda.
+ LightsailServiceID = "lightsail" // Lightsail.
+ LogsServiceID = "logs" // Logs.
+ MachinelearningServiceID = "machinelearning" // Machinelearning.
+ MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+ MediaconvertServiceID = "mediaconvert" // Mediaconvert.
+ MedialiveServiceID = "medialive" // Medialive.
+ MediapackageServiceID = "mediapackage" // Mediapackage.
+ MediastoreServiceID = "mediastore" // Mediastore.
+ MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
+ MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
+ MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
+ NeptuneServiceID = "neptune" // Neptune.
+ OpsworksServiceID = "opsworks" // Opsworks.
+ OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
+ PinpointServiceID = "pinpoint" // Pinpoint.
+ PollyServiceID = "polly" // Polly.
+ RdsServiceID = "rds" // Rds.
+ RedshiftServiceID = "redshift" // Redshift.
+ RekognitionServiceID = "rekognition" // Rekognition.
+ ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
+ Route53ServiceID = "route53" // Route53.
+ Route53domainsServiceID = "route53domains" // Route53domains.
+ RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
+ RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
+ S3ServiceID = "s3" // S3.
+ S3ControlServiceID = "s3-control" // S3Control.
+ SagemakerServiceID = "api.sagemaker" // Sagemaker.
+ SdbServiceID = "sdb" // Sdb.
+ SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
+ ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
+ ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
+ ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
+ ShieldServiceID = "shield" // Shield.
+ SmsServiceID = "sms" // Sms.
+ SnowballServiceID = "snowball" // Snowball.
+ SnsServiceID = "sns" // Sns.
+ SqsServiceID = "sqs" // Sqs.
+ SsmServiceID = "ssm" // Ssm.
+ StatesServiceID = "states" // States.
+ StoragegatewayServiceID = "storagegateway" // Storagegateway.
+ StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
+ StsServiceID = "sts" // Sts.
+ SupportServiceID = "support" // Support.
+ SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
+ TransferServiceID = "transfer" // Transfer.
+ TranslateServiceID = "translate" // Translate.
+ WafServiceID = "waf" // Waf.
+ WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
+ WorkmailServiceID = "workmail" // Workmail.
+ WorkspacesServiceID = "workspaces" // Workspaces.
+ XrayServiceID = "xray" // Xray.
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644
index 000000000..84316b92c
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+// resolver := endpoints.DefaultResolver()
+// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+// for _, p := range partitions {
+// fmt.Println("Regions for", p.ID())
+// for id, _ := range p.Regions() {
+// fmt.Println("*", id)
+// }
+//
+// fmt.Println("Services for", p.ID())
+// for id, _ := range p.Services() {
+// fmt.Println("*", id)
+// }
+// }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+// if service == endpoints.S3ServiceID {
+// return endpoints.ResolvedEndpoint{
+// URL: "s3.custom.endpoint.com",
+// SigningRegion: "custom-signing-region",
+// }, nil
+// }
+//
+// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+// }
+//
+// sess := session.Must(session.NewSession(&aws.Config{
+// Region: aws.String("us-west-2"),
+// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+// }))
+package endpoints
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644
index 000000000..f82babf6f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -0,0 +1,449 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+ // DisableSSL forces the endpoint to be resolved as HTTP.
+ // instead of HTTPS if the service supports it.
+ DisableSSL bool
+
+ // Sets the resolver to resolve the endpoint as a dualstack endpoint
+ // for the service. If dualstack support for a service is not known and
+ // StrictMatching is not enabled a dualstack endpoint for the service will
+ // be returned. This endpoint may not be valid. If StrictMatching is
+ // enabled only services that are known to support dualstack will return
+ // dualstack endpoints.
+ UseDualStack bool
+
+ // Enables strict matching of services and regions resolved endpoints.
+ // If the partition doesn't enumerate the exact service and region an
+ // error will be returned. This option will prevent returning endpoints
+ // that look valid, but may not resolve to any real endpoint.
+ StrictMatching bool
+
+ // Enables resolving a service endpoint based on the region provided if the
+ // service does not exist. The service endpoint ID will be used as the service
+ // domain name prefix. By default the endpoint resolver requires the service
+ // to be known when resolving endpoints.
+ //
+ // If resolving an endpoint on the partition list the provided region will
+ // be used to determine which partition's domain name pattern to the service
+ // endpoint ID with. If both the service and region are unknown and resolving
+ // the endpoint on partition list an UnknownEndpointError error will be returned.
+ //
+ // If resolving and endpoint on a partition specific resolver that partition's
+ // domain name pattern will be used with the service endpoint ID. If both
+ // region and service do not exist when resolving an endpoint on a specific
+ // partition the partition's domain pattern will be used to combine the
+ // endpoint and region together.
+ //
+ // This option is ignored if StrictMatching is enabled.
+ ResolveUnknownService bool
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+ for _, fn := range optFns {
+ fn(o)
+ }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+ o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+ o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+ o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+ o.ResolveUnknownService = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+ EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+ Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+// * UnknownServiceError
+// * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id, r := range p.p.Regions {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: p.p,
+ }
+ }
+
+ return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id := range p.p.Services {
+ ss[id] = Service{
+ id: id,
+ p: p.p,
+ }
+ }
+
+ return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+ id, desc string
+ p *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// Description returns the region's description. The region description
+// is free text, it can be empty, and it may change between SDK releases.
+func (r Region) Description() string { return r.desc }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+ ss := map[string]Service{}
+ for id, s := range r.p.Services {
+ if _, ok := s.Endpoints[r.id]; ok {
+ ss[id] = Service{
+ id: id,
+ p: r.p,
+ }
+ }
+ }
+
+ return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+ id string
+ p *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if r, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ desc: r.Description,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+ es := map[string]Endpoint{}
+ for id := range s.p.Services[s.id].Endpoints {
+ es[id] = Endpoint{
+ id: id,
+ serviceID: s.id,
+ p: s.p,
+ }
+ }
+
+ return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+ id string
+ serviceID string
+ p *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+ return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+ // The endpoint URL
+ URL string
+
+ // The region that should be used for signing requests.
+ SigningRegion string
+
+ // The service name that should be used for signing requests.
+ SigningName string
+
+ // States that the signing name for this endpoint was derived from metadata
+ // passed in, but was not explicitly modeled.
+ SigningNameDerived bool
+
+ // The signing method that should be used for signing requests.
+ SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+ awsError
+ Partition string
+ Service string
+ Known []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+ return UnknownServiceError{
+ awsError: awserr.New("UnknownServiceError",
+ "could not resolve endpoint for unknown service", nil),
+ Partition: p,
+ Service: s,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q",
+ e.Partition, e.Service)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+ return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+ awsError
+ Partition string
+ Service string
+ Region string
+ Known []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+ return UnknownEndpointError{
+ awsError: awserr.New("UnknownEndpointError",
+ "could not resolve endpoint", nil),
+ Partition: p,
+ Service: s,
+ Region: r,
+ Known: known,
+ }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+ extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+ e.Partition, e.Service, e.Region)
+ if len(e.Known) > 0 {
+ extra += fmt.Sprintf(", known: %v", e.Known)
+ }
+ return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+ return e.Error()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644
index 000000000..ff6f76db6
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
@@ -0,0 +1,307 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+ var opt Options
+ opt.Set(opts...)
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+ continue
+ }
+
+ return ps[i].EndpointFor(service, region, opts...)
+ }
+
+ // If loose matching fallback to first partition format to use
+ // when resolving the endpoint.
+ if !opt.StrictMatching && len(ps) > 0 {
+ return ps[0].EndpointFor(service, region, opts...)
+ }
+
+ return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+ parts := make([]Partition, 0, len(ps))
+ for i := 0; i < len(ps); i++ {
+ parts = append(parts, ps[i].Partition())
+ }
+
+ return parts
+}
+
+type partition struct {
+ ID string `json:"partition"`
+ Name string `json:"partitionName"`
+ DNSSuffix string `json:"dnsSuffix"`
+ RegionRegex regionRegex `json:"regionRegex"`
+ Defaults endpoint `json:"defaults"`
+ Regions regions `json:"regions"`
+ Services services `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+ return Partition{
+ id: p.ID,
+ p: &p,
+ }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+ s, hasService := p.Services[service]
+ _, hasEndpoint := s.Endpoints[region]
+
+ if hasEndpoint && hasService {
+ return true
+ }
+
+ if strictMatch {
+ return false
+ }
+
+ return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+ var opt Options
+ opt.Set(opts...)
+
+ s, hasService := p.Services[service]
+ if !(hasService || opt.ResolveUnknownService) {
+ // Only return error if the resolver will not fallback to creating
+ // endpoint based on service endpoint ID passed in.
+ return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+ }
+
+ e, hasEndpoint := s.endpointForRegion(region)
+ if !hasEndpoint && opt.StrictMatching {
+ return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+ }
+
+ defs := []endpoint{p.Defaults, s.Defaults}
+ return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+ list := make([]string, 0, len(ss))
+ for k := range ss {
+ list = append(list, k)
+ }
+ return list
+}
+func endpointList(es endpoints) []string {
+ list := make([]string, 0, len(es))
+ for k := range es {
+ list = append(list, k)
+ }
+ return list
+}
+
+type regionRegex struct {
+ *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+ // Strip leading and trailing quotes
+ regex, err := strconv.Unquote(string(b))
+ if err != nil {
+ return fmt.Errorf("unable to strip quotes from regex, %v", err)
+ }
+
+ rr.Regexp, err = regexp.Compile(regex)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal region regex, %v", err)
+ }
+ return nil
+}
+
+type regions map[string]region
+
+type region struct {
+ Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+ PartitionEndpoint string `json:"partitionEndpoint"`
+ IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
+ Defaults endpoint `json:"defaults"`
+ Endpoints endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+ if s.IsRegionalized == boxedFalse {
+ return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+ }
+
+ if e, ok := s.Endpoints[region]; ok {
+ return e, true
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+ Hostname string `json:"hostname"`
+ Protocols []string `json:"protocols"`
+ CredentialScope credentialScope `json:"credentialScope"`
+
+ // Custom fields not modeled
+ HasDualStack boxedBool `json:"-"`
+ DualStackHostname string `json:"-"`
+
+ // Signature Version not used
+ SignatureVersions []string `json:"signatureVersions"`
+
+ // SSLCommonName not used.
+ SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+ var merged endpoint
+ for _, def := range defs {
+ merged.mergeIn(def)
+ }
+ merged.mergeIn(e)
+ e = merged
+
+ hostname := e.Hostname
+
+ // Offset the hostname for dualstack if enabled
+ if opts.UseDualStack && e.HasDualStack == boxedTrue {
+ hostname = e.DualStackHostname
+ }
+
+ u := strings.Replace(hostname, "{service}", service, 1)
+ u = strings.Replace(u, "{region}", region, 1)
+ u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+ scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+ u = fmt.Sprintf("%s://%s", scheme, u)
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+
+ signingName := e.CredentialScope.Service
+ var signingNameDerived bool
+ if len(signingName) == 0 {
+ signingName = service
+ signingNameDerived = true
+ }
+
+ return ResolvedEndpoint{
+ URL: u,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningNameDerived: signingNameDerived,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+ if disableSSL {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SSLCommonName) > 0 {
+ e.SSLCommonName = other.SSLCommonName
+ }
+ if other.HasDualStack != boxedBoolUnset {
+ e.HasDualStack = other.HasDualStack
+ }
+ if len(other.DualStackHostname) > 0 {
+ e.DualStackHostname = other.DualStackHostname
+ }
+}
+
+type credentialScope struct {
+ Region string `json:"region"`
+ Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+ v, err := strconv.ParseBool(string(buf))
+ if err != nil {
+ return err
+ }
+
+ if v {
+ *b = boxedTrue
+ } else {
+ *b = boxedFalse
+ }
+
+ return nil
+}
+
+const (
+ boxedBoolUnset boxedBool = iota
+ boxedFalse
+ boxedTrue
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644
index 000000000..0fdfcc56e
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -0,0 +1,351 @@
+// +build codegen
+
+package endpoints
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "text/template"
+ "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+ // Options for how the model will be decoded.
+ DecodeModelOptions DecodeModelOptions
+
+ // Disables code generation of the service endpoint prefix IDs defined in
+ // the model.
+ DisableGenerateServiceIDs bool
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+ for _, fn := range optFns {
+ fn(d)
+ }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+ var opts CodeGenOptions
+ opts.Set(optFns...)
+
+ resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+ *d = opts.DecodeModelOptions
+ })
+ if err != nil {
+ return err
+ }
+
+ v := struct {
+ Resolver
+ CodeGenOptions
+ }{
+ Resolver: resolver,
+ CodeGenOptions: opts,
+ }
+
+ tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+ if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
+ return fmt.Errorf("failed to execute template, %v", err)
+ }
+
+ return nil
+}
+
+func toSymbol(v string) string {
+ out := []rune{}
+ for _, c := range strings.Title(v) {
+ if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+ continue
+ }
+
+ out = append(out, c)
+ }
+
+ return string(out)
+}
+
+func quoteString(v string) string {
+ return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+ return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+ return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+ return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+ names := []string{}
+ switch len(ps) {
+ case 1:
+ return ps[0].Name
+ case 2:
+ return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+ default:
+ for i, p := range ps {
+ if i == len(ps)-1 {
+ names = append(names, "and "+p.Name)
+ } else {
+ names = append(names, p.Name)
+ }
+ }
+ return strings.Join(names, ", ")
+ }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+ switch v {
+ case boxedTrue:
+ return fmt.Sprintf(msg, "boxedTrue")
+ case boxedFalse:
+ return fmt.Sprintf(msg, "boxedFalse")
+ default:
+ return ""
+ }
+}
+
+func stringIfSet(msg, v string) string {
+ if len(v) == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+ if len(vs) == 0 {
+ return ""
+ }
+
+ names := []string{}
+ for _, v := range vs {
+ names = append(names, `"`+v+`"`)
+ }
+
+ return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+ return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+ set := map[string]struct{}{}
+ for _, p := range ps {
+ for id := range p.Services {
+ set[id] = struct{}{}
+ }
+ }
+
+ return set
+}
+
+var funcMap = template.FuncMap{
+ "ToSymbol": toSymbol,
+ "QuoteString": quoteString,
+ "RegionConst": regionConstName,
+ "PartitionGetter": partitionGetter,
+ "PartitionVarName": partitionVarName,
+ "ListPartitionNames": listPartitionNames,
+ "BoxedBoolIfSet": boxedBoolIfSet,
+ "StringIfSet": stringIfSet,
+ "StringSliceIfSet": stringSliceIfSet,
+ "EndpointIsSet": endpointIsSet,
+ "ServicesSet": serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+ "regexp"
+)
+
+ {{ template "partition consts" $.Resolver }}
+
+ {{ range $_, $partition := $.Resolver }}
+ {{ template "partition region consts" $partition }}
+ {{ end }}
+
+ {{ if not $.DisableGenerateServiceIDs -}}
+ {{ template "service consts" $.Resolver }}
+ {{- end }}
+
+ {{ template "endpoint resolvers" $.Resolver }}
+{{- end }}
+
+{{ define "partition consts" }}
+ // Partition identifiers
+ const (
+ {{ range $_, $p := . -}}
+ {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "partition region consts" }}
+ // {{ .Name }} partition's regions.
+ const (
+ {{ range $id, $region := .Regions -}}
+ {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "service consts" }}
+ // Service identifiers
+ const (
+ {{ $serviceSet := ServicesSet . -}}
+ {{ range $id, $_ := $serviceSet -}}
+ {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+ {{ end -}}
+ )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+ // DefaultResolver returns an Endpoint resolver that will be able
+ // to resolve endpoints for: {{ ListPartitionNames . }}.
+ //
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
+ //
+ // partitions := endpoints.DefaultPartitions
+ // for _, p := range partitions {
+ // // ... inspect partitions
+ // }
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
+ }
+
+ var defaultPartitions = partitions{
+ {{ range $_, $partition := . -}}
+ {{ PartitionVarName $partition.ID }},
+ {{ end }}
+ }
+
+ {{ range $_, $partition := . -}}
+ {{ $name := PartitionGetter $partition.ID -}}
+ // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+ func {{ $name }}() Partition {
+ return {{ PartitionVarName $partition.ID }}.Partition()
+ }
+ var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+ {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+ func DefaultPartitions() []Partition {
+ return []partition{
+ {{ range $_, $partition := . -}}
+ // {{ ToSymbol $partition.ID}}Partition(),
+ {{ end }}
+ }
+ }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+ {{ StringIfSet "ID: %q,\n" .ID -}}
+ {{ StringIfSet "Name: %q,\n" .Name -}}
+ {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+ RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults }},
+ {{- end }}
+ Regions: {{ template "gocode Regions" .Regions }},
+ Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+ Regexp: func() *regexp.Regexp{
+ reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+ return reg
+ }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+ {{ range $id, $region := . -}}
+ "{{ $id }}": {{ template "gocode Region" $region }},
+ {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+ {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+ {{ range $id, $service := . -}}
+ "{{ $id }}": {{ template "gocode Service" $service }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+ {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+ {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+ {{ if EndpointIsSet .Defaults -}}
+ Defaults: {{ template "gocode Endpoint" .Defaults -}},
+ {{- end }}
+ {{ if .Endpoints -}}
+ Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+ {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+ {{ range $id, $endpoint := . -}}
+ "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+ {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+ {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+ {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+ {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+ {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+ {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+ CredentialScope: credentialScope{
+ {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+ {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+ },
+ {{- end }}
+ {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+ {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/src/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 000000000..fa06f7a8f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,13 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/src/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644
index 000000000..91a6f277a
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+//
+// values := aws.JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/src/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 000000000..6ed15b2ec
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,118 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nil, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+
+ // LogDebugWithEventStreamBody states the SDK should log EventStream
+ // request and response bodys. This should be used to log the EventStream
+ // wire unmarshaled message content of requests and responses made while
+ // using the SDK Will also enable LogDebug.
+ LogDebugWithEventStreamBody
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 000000000..2d13754cf
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,9 @@
+package request
+
+import (
+ "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+ return strings.Contains(err.Error(), "connection reset")
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 000000000..8ef8548a9
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,277 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalStream HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+ CompleteAttempt HandlerList
+ Complete HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalStream: h.UnmarshalStream.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ CompleteAttempt: h.CompleteAttempt.copy(),
+ Complete: h.Complete.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalStream.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+ h.CompleteAttempt.Clear()
+ h.Complete.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
+ l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
+ }
+ }
+}
+
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// Swap will swap out all handlers matching the name passed in. The matched
+// handlers will be swapped in. True is returned if the handlers were swapped.
+func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
+ var swapped bool
+
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == name {
+ l.list[i] = replace
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644
index 000000000..79f79602b
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,24 @@
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := new(http.Request)
+ *req = *r
+ req.URL = &url.URL{}
+ *req.URL = *r.URL
+ req.Body = body
+
+ req.Header = http.Header{}
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 000000000..b0c2ef4fe
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,60 @@
+package request
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.Mutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+ reader := &offsetReader{}
+ buf.Seek(offset, sdkio.SeekStart)
+
+ reader.buf = buf
+ return reader
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+
+ return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+ o.Close()
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 000000000..19da3fcd8
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,673 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // ErrCodeInvalidPresignExpire is returned when the expire time provided to
+ // presign is invalid
+ ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ AttemptTime time.Time
+ Time time.Time
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
+
+ // A value greater than 0 instructs the request to be signed as Presigned URL
+ // You should not set this field directly. Instead use Request's
+ // Presign or PresignRequest methods.
+ ExpireTime time.Duration
+
+ context aws.Context
+
+ built bool
+
+ // Need to persist an intermediate body between the input Body and HTTP
+ // request body because the HTTP Client's transport can maintain a reference
+ // to the HTTP request's body after the client has returned. This value is
+ // safe to use concurrently and wrap the input Body for each HTTP request.
+ safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+
+ BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ SanitizeHostForHeader(httpReq)
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
+ return false
+ }
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.Body = reader
+ r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
+ r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails. The expire parameter is only used for presigned Amazon
+// S3 API requests. All other AWS services will use a fixed expiration
+// time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+func (r *Request) Presign(expire time.Duration) (string, error) {
+ r = r.copy()
+
+ // Presign requires all headers be hoisted. There is no way to retrieve
+ // the signed headers not hoisted without this. Making the presigned URL
+ // useless.
+ r.NotHoist = false
+
+ u, _, err := getPresignedURL(r, expire)
+ return u, err
+}
+
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed. The expire parameter is only used for
+// presigned Amazon S3 API requests. All other AWS services will use a fixed
+// expiration time of 15 minutes.
+//
+// It is invalid to create a presigned URL with a expire duration 0 or less. An
+// error is returned if expire duration is 0 or less.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
+func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
+ r = r.copy()
+ return getPresignedURL(r, expire)
+}
+
+// IsPresigned returns true if the request represents a presigned API url.
+func (r *Request) IsPresigned() bool {
+ return r.ExpireTime != 0
+}
+
+func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
+ if expire <= 0 {
+ return "", nil, awserr.New(
+ ErrCodeInvalidPresignExpire,
+ "presigned URL requires an expire duration greater than 0",
+ nil,
+ )
+ }
+
+ r.ExpireTime = expire
+
+ if r.Operation.BeforePresignFn != nil {
+ if err := r.Operation.BeforePresignFn(r); err != nil {
+ return "", nil, err
+ }
+ }
+
+ if err := r.Sign(); err != nil {
+ return "", nil, err
+ }
+
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Any additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request, returning error if errors are encountered.
+//
+// Sign will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
+ if r.safeBody != nil {
+ r.safeBody.Close()
+ }
+
+ r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+
+ // Go 1.8 tightened and clarified the rules code needs to use when building
+ // requests with the http package. Go 1.8 removed the automatic detection
+ // of if the Request.Body was empty, or actually had bytes in it. The SDK
+ // always sets the Request.Body even if it is empty and should not actually
+ // be sent. This is incorrect.
+ //
+ // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+ // client that the request really should be sent without a body. The
+ // Request.Body cannot be set to nil, which is preferable, because the
+ // field is exported and could introduce nil pointer dereferences for users
+ // of the SDK if they used that field.
+ //
+ // Related golang/go#18257
+ l, err := aws.SeekerLen(r.Body)
+ if err != nil {
+ return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+ }
+
+ var body io.ReadCloser
+ if l == 0 {
+ body = NoBody
+ } else if l > 0 {
+ body = r.safeBody
+ } else {
+ // Hack to prevent sending bodies for methods where the body
+ // should be ignored by the server. Sending bodies on these
+ // methods without an associated ContentLength will cause the
+ // request to socket timeout because the server does not handle
+ // Transfer-Encoding: chunked bodies for these methods.
+ //
+ // This would only happen if a aws.ReaderSeekerCloser was used with
+ // a io.Reader that was not also an io.Seeker, or did not implement
+ // Len() method.
+ switch r.Operation.HTTPMethod {
+ case "GET", "HEAD", "DELETE":
+ body = NoBody
+ default:
+ body = r.safeBody
+ }
+ }
+
+ return body, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+ return r.safeBody
+}
+
+// Send will send the request, returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
+ if err := r.Error; err != nil {
+ return err
+ }
+
+ for {
+ r.Error = nil
+ r.AttemptTime = time.Now()
+
+ if err := r.Sign(); err != nil {
+ debugLogReqError(r, "Sign Request", false, err)
+ return err
+ }
+
+ if err := r.sendRequest(); err == nil {
+ return nil
+ } else if !shouldRetryCancel(r.Error) {
+ return err
+ } else {
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+
+ if r.Error != nil || !aws.BoolValue(r.Retryable) {
+ return r.Error
+ }
+
+ r.prepareRetry()
+ continue
+ }
+ }
+}
+
+func (r *Request) prepareRetry() {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ // The previous http.Request will have a reference to the r.Body
+ // and the HTTP Client's Transport may still be reading from
+ // the request's body even though the Client's Do returned.
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+ r.ResetBody()
+
+ // Closing response body to ensure that no response body is leaked
+ // between retry attempts.
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ r.HTTPResponse.Body.Close()
+ }
+}
+
+func (r *Request) sendRequest() (sendErr error) {
+ defer r.Handlers.CompleteAttempt.Run(r)
+
+ r.Retryable = nil
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ r.Handlers.UnmarshalError.Run(r)
+ debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
+ return r.Error
+ }
+
+ return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+ req := &Request{}
+ *req = *r
+ req.Handlers = r.Handlers.Copy()
+ op := *r.Operation
+ req.Operation = &op
+ return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+type temporary interface {
+ Temporary() bool
+}
+
+func shouldRetryCancel(err error) bool {
+ switch err := err.(type) {
+ case awserr.Error:
+ if err.Code() == CanceledErrorCode {
+ return false
+ }
+ return shouldRetryCancel(err.OrigErr())
+ case *url.Error:
+ if strings.Contains(err.Error(), "connection refused") {
+ // Refused connections should be retried as the service may not yet
+ // be running on the port. Go TCP dial considers refused
+ // connections as not temporary.
+ return true
+ }
+ // *url.Error only implements Temporary after golang 1.6 but since
+ // url.Error only wraps the error:
+ return shouldRetryCancel(err.Err)
+ case temporary:
+ // If the error is temporary, we want to allow continuation of the
+ // retry process
+ return err.Temporary()
+ case nil:
+ // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
+ // because we don't know the cause, it is marked as retryable. See
+ // TestRequest4xxUnretryable for an example.
+ return true
+ default:
+ switch err.Error() {
+ case "net/http: request canceled",
+ "net/http: request canceled while waiting for connection":
+ // known 1.5 error case when an http request is cancelled
+ return false
+ }
+ // here we don't know the error; so we allow a retry.
+ return true
+ }
+}
+
+// SanitizeHostForHeader removes default port from host and updates request.Host
+func SanitizeHostForHeader(r *http.Request) {
+ host := getHost(r)
+ port := portOnly(host)
+ if port != "" && isDefaultPort(r.URL.Scheme, port) {
+ r.Host = stripPort(host)
+ }
+}
+
+// Returns host from request
+func getHost(r *http.Request) string {
+ if r.Host != "" {
+ return r.Host
+ }
+
+ return r.URL.Host
+}
+
+// Hostname returns u.Host, without any port number.
+//
+// If Host is an IPv6 literal with a port number, Hostname returns the
+// IPv6 literal without the square brackets. IPv6 literals may include
+// a zone identifier.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
+
+// Port returns the port part of u.Host, without the leading colon.
+// If u.Host doesn't contain a port, Port returns an empty string.
+//
+// Copied from the Go 1.8 standard library (net/url)
+func portOnly(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return ""
+ }
+ if i := strings.Index(hostport, "]:"); i != -1 {
+ return hostport[i+len("]:"):]
+ }
+ if strings.Contains(hostport, "]") {
+ return ""
+ }
+ return hostport[colon+len(":"):]
+}
+
+// Returns true if the specified URI is using the standard port
+// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
+func isDefaultPort(scheme, port string) bool {
+ if port == "" {
+ return true
+ }
+
+ lowerCaseScheme := strings.ToLower(scheme)
+ if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
+ return true
+ }
+
+ return false
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644
index 000000000..e36e468b7
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -0,0 +1,39 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
+func (noBody) Close() error { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// NoBody is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = noBody{}
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644
index 000000000..7c6a8000f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -0,0 +1,33 @@
+// +build go1.8
+
+package request
+
+import (
+ "net/http"
+)
+
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// sets the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644
index 000000000..a7365cd1e
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644
index 000000000..307fa0705
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 000000000..a633ed5ac
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,264 @@
+package request
+
+import (
+ "reflect"
+ "sync/atomic"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// cont := true
+// for p.Next() && cont {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
+ // EndPageOnSameToken, when enabled, will allow the paginator to stop on
+ // token that are the same as its previous tokens.
+ EndPageOnSameToken bool
+
+ started bool
+ prevTokens []interface{}
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ if !p.started {
+ return true
+ }
+
+ hasNextPage := len(p.nextTokens) != 0
+ if p.EndPageOnSameToken {
+ return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
+ }
+ return hasNextPage
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.prevTokens = p.nextTokens
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(vs) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ v := vs[0]
+
+ switch tv := v.(type) {
+ case *string:
+ if len(aws.StringValue(tv)) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ case string:
+ if len(tv) == 0 {
+ tokens = append(tokens, nil)
+ continue
+ }
+ }
+
+ tokenAdded = true
+ tokens = append(tokens, v)
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 000000000..d0aa54c6d
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,163 @@
+package request
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the client.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "RequestThrottledException": {},
+ "TooManyRequestsException": {}, // Lambda functions
+ "PriorRequestNotComplete": {}, // Route53
+ "TransactionInProgressException": {},
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+type temporaryError interface {
+ Temporary() bool
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if t, ok := err.(temporaryError); ok {
+ return t.Temporary() || isErrConnectionReset(err)
+ }
+
+ return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
+ }
+ }
+ return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeThrottle(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeExpiredCreds(aerr.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 000000000..09a44eb98
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644
index 000000000..8630683f3
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,286 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+ // ParamMaxLenErrCode is the error code for value being too long.
+ ParamMaxLenErrCode = "ParamMaxLenError"
+
+ // ParamFormatErrCode is the error code for a field with invalid
+ // format or characters.
+ ParamFormatErrCode = "ParamFormatInvalidError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
+
+// An ErrParamMaxLen represents a maximum length parameter error.
+type ErrParamMaxLen struct {
+ errInvalidParam
+ max int
+}
+
+// NewErrParamMaxLen creates a new maximum length parameter error.
+func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
+ return &ErrParamMaxLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMaxLenErrCode,
+ field: field,
+ msg: fmt.Sprintf("maximum size of %v, %v", max, value),
+ },
+ max: max,
+ }
+}
+
+// MaxLen returns the field's required minimum length.
+func (e *ErrParamMaxLen) MaxLen() int {
+ return e.max
+}
+
+// An ErrParamFormat represents a invalid format parameter error.
+type ErrParamFormat struct {
+ errInvalidParam
+ format string
+}
+
+// NewErrParamFormat creates a new invalid format parameter error.
+func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
+ return &ErrParamFormat{
+ errInvalidParam: errInvalidParam{
+ code: ParamFormatErrCode,
+ field: field,
+ msg: fmt.Sprintf("format %v, %v", format, value),
+ },
+ format: format,
+ }
+}
+
+// Format returns the field's required format.
+func (e *ErrParamFormat) Format() string {
+ return e.format
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/src/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644
index 000000000..4601f883c
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,295 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(aws.Context, time.Duration) error
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else {
+ sleepCtxFn := w.SleepWithContext
+ if sleepCtxFn == nil {
+ sleepCtxFn = aws.SleepWithContext
+ }
+
+ if err := sleepCtxFn(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
new file mode 100644
index 000000000..ea9ebb6f6
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go
@@ -0,0 +1,26 @@
+// +build go1.7
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
new file mode 100644
index 000000000..fec39dfc1
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go
@@ -0,0 +1,22 @@
+// +build !go1.6,go1.5
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
new file mode 100644
index 000000000..1c5a5391e
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go
@@ -0,0 +1,23 @@
+// +build !go1.7,go1.6
+
+package session
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// Transport that should be used when a custom CA bundle is specified with the
+// SDK.
+func getCABundleTransport() *http.Transport {
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100644
index 000000000..38a7b05a6
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -0,0 +1,273 @@
+/*
+Package session provides configuration for the SDK's service clients.
+
+Sessions can be shared across all service clients that share the same base
+configuration. The Session is built from the SDK's default configuration and
+request handlers.
+
+Sessions should be cached when possible, because creating a new Session will
+load all configuration values from the environment, and config files each time
+the Session is created. Sharing the Session value across all of your service
+clients will ensure the configuration is loaded the fewest number of times possible.
+
+Concurrency
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. The SDK will not modify the Session once the Session has been created.
+Creating service clients concurrently from a shared Session is safe.
+
+Sessions from Shared Config
+
+Sessions can be created using the method above that will only load the
+additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
+Alternatively you can explicitly create a Session with shared config enabled.
+To do this you can use NewSessionWithOptions to configure how the Session will
+be created. Using the NewSessionWithOptions with SharedConfigState set to
+SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
+environment variable was set.
+
+Creating Sessions
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. See the section Sessions from Shared Config for
+more information.
+
+Create a Session with the default config and request handlers. With credentials
+region, and profile loaded from the environment and shared config automatically.
+Requires the AWS_PROFILE to be set, or "default" is used.
+
+ // Create Session
+ sess := session.Must(session.NewSession())
+
+ // Create a Session with a custom region
+ sess := session.Must(session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1"),
+ }))
+
+ // Create a S3 client instance from a session
+ sess := session.Must(session.NewSession())
+
+ svc := s3.New(sess)
+
+Create Session With Option Overrides
+
+In addition to NewSession, Sessions can be created using NewSessionWithOptions.
+This func allows you to control and override how the Session will be created
+through code instead of being driven by environment variables only.
+
+Use NewSessionWithOptions when you want to provide the config profile, or
+override the shared config state (AWS_SDK_LOAD_CONFIG).
+
+ // Equivalent to session.NewSession()
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ // Options
+ }))
+
+ // Specify profile to load for the session's config
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Profile: "profile_name",
+ }))
+
+ // Specify profile for config and region for requests
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ Config: aws.Config{Region: aws.String("us-east-1")},
+ Profile: "profile_name",
+ }))
+
+ // Force enable Shared Config support
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
+
+Adding Handlers
+
+You can add handlers to a session for processing HTTP requests. All service
+clients that use the session inherit the handlers. For example, the following
+handler logs every request and its payload made by a service client:
+
+ // Create a session, and add additional handlers for all service
+ // clients created with the Session to inherit. Adds logging handler.
+ sess := session.Must(session.NewSession())
+
+ sess.Handlers.Send.PushFront(func(r *request.Request) {
+ // Log every request made and its payload
+ logger.Printf("Request: %s/%s, Payload: %s",
+ r.ClientInfo.ServiceName, r.Operation, r.Params)
+ })
+
+Deprecated "New" function
+
+The New session function has been deprecated because it does not provide good
+way to return errors that occur when loading the configuration files and values.
+Because of this, NewSession was created so errors can be retrieved when
+creating a session fails.
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's (~/.aws/credentials)
+credentials values, and all other config is provided by the environment variables,
+SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
+
+Credentials are the values the SDK should use for authenticating requests with
+AWS Services. They are from a configuration file will need to include both
+aws_access_key_id and aws_secret_access_key must be provided together in the
+same file to be considered valid. The values will be ignored if not a complete
+group. aws_session_token is an optional field that can be provided if both of
+the other two fields are also provided.
+
+ aws_access_key_id = AKID
+ aws_secret_access_key = SECRET
+ aws_session_token = TOKEN
+
+Assume Role values allow you to configure the SDK to assume an IAM role using
+a set of credentials provided in a config file via the source_profile field.
+Both "role_arn" and "source_profile" are required. The SDK supports assuming
+a role with MFA token if the session option AssumeRoleTokenProvider
+is set.
+
+ role_arn = arn:aws:iam:::role/
+ source_profile = profile_with_creds
+ external_id = 1234
+ mfa_serial =
+ role_session_name = session_name
+
+Region is the region the SDK should use for looking up AWS service endpoints
+and signing requests.
+
+ region = us-east-1
+
+Assume Role with MFA token
+
+To create a session with support for assuming an IAM role with MFA set the
+session option AssumeRoleTokenProvider to a function that will prompt for the
+MFA token code when the SDK assumes the role and refreshes the role's credentials.
+This allows you to configure the SDK via the shared config to assumea role
+with MFA tokens.
+
+In order for the SDK to assume a role with MFA the SharedConfigState
+session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
+environment variable set.
+
+The shared configuration instructs the SDK to assume an IAM role with MFA
+when the mfa_serial configuration field is set in the shared config
+(~/.aws/config) or shared credentials (~/.aws/credentials) file.
+
+If mfa_serial is set in the configuration, the SDK will assume the role, and
+the AssumeRoleTokenProvider session option is not set an an error will
+be returned when creating the session.
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+ }))
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess)
+
+To setup assume role outside of a session see the stscreds.AssumeRoleProvider
+documentation.
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+ # Access Key ID
+ AWS_ACCESS_KEY_ID=AKID
+ AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+ # Secret Access Key
+ AWS_SECRET_ACCESS_KEY=SECRET
+ AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+ # Session Token
+ AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+ AWS_REGION=us-east-1
+
+ # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_REGION is not also set.
+ AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+ AWS_PROFILE=my_profile
+
+ # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ # and AWS_PROFILE is not also set.
+ AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+ AWS_SDK_LOAD_CONFIG=1
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+ AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+ AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+*/
+package session
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100644
index 000000000..e3959b959
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -0,0 +1,236 @@
+package session
+
+import (
+ "os"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+)
+
+// EnvProviderName provides a name of the provider when config is loaded from environment.
+const EnvProviderName = "EnvConfigCredentials"
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+ // Environment configuration values. If set both Access Key ID and Secret Access
+ // Key must be provided. Session Token and optionally also be provided, but is
+ // not required.
+ //
+ // # Access Key ID
+ // AWS_ACCESS_KEY_ID=AKID
+ // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+ //
+ // # Secret Access Key
+ // AWS_SECRET_ACCESS_KEY=SECRET
+ // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+ //
+ // # Session Token
+ // AWS_SESSION_TOKEN=TOKEN
+ Creds credentials.Value
+
+ // Region value will instruct the SDK where to make service API requests to. If is
+ // not provided in the environment the region must be provided before a service
+ // client request is made.
+ //
+ // AWS_REGION=us-east-1
+ //
+ // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_REGION is not also set.
+ // AWS_DEFAULT_REGION=us-east-1
+ Region string
+
+ // Profile name the SDK should load use when loading shared configuration from the
+ // shared configuration files. If not provided "default" will be used as the
+ // profile name.
+ //
+ // AWS_PROFILE=my_profile
+ //
+ // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+ // # and AWS_PROFILE is not also set.
+ // AWS_DEFAULT_PROFILE=my_profile
+ Profile string
+
+ // SDK load config instructs the SDK to load the shared config in addition to
+ // shared credentials. This also expands the configuration loaded from the shared
+ // credentials to have parity with the shared config file. This also enables
+ // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+ // env values as well.
+ //
+ // AWS_SDK_LOAD_CONFIG=1
+ EnableSharedConfig bool
+
+ // Shared credentials file path can be set to instruct the SDK to use an alternate
+ // file for the shared credentials. If not set the file will be loaded from
+ // $HOME/.aws/credentials on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\credentials on Windows.
+ //
+ // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+ SharedCredentialsFile string
+
+ // Shared config file path can be set to instruct the SDK to use an alternate
+ // file for the shared config. If not set the file will be loaded from
+ // $HOME/.aws/config on Linux/Unix based systems, and
+ // %USERPROFILE%\.aws\config on Windows.
+ //
+ // AWS_CONFIG_FILE=$HOME/my_shared_config
+ SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
+ // that the SDK will use instead of the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
+
+ csmEnabled string
+ CSMEnabled bool
+ CSMPort string
+ CSMClientID string
+
+ enableEndpointDiscovery string
+ // Enables endpoint discovery via environment variables.
+ //
+ // AWS_ENABLE_ENDPOINT_DISCOVERY=true
+ EnableEndpointDiscovery *bool
+}
+
+var (
+ csmEnabledEnvKey = []string{
+ "AWS_CSM_ENABLED",
+ }
+ csmPortEnvKey = []string{
+ "AWS_CSM_PORT",
+ }
+ csmClientIDEnvKey = []string{
+ "AWS_CSM_CLIENT_ID",
+ }
+ credAccessEnvKey = []string{
+ "AWS_ACCESS_KEY_ID",
+ "AWS_ACCESS_KEY",
+ }
+ credSecretEnvKey = []string{
+ "AWS_SECRET_ACCESS_KEY",
+ "AWS_SECRET_KEY",
+ }
+ credSessionEnvKey = []string{
+ "AWS_SESSION_TOKEN",
+ }
+
+ enableEndpointDiscoveryEnvKey = []string{
+ "AWS_ENABLE_ENDPOINT_DISCOVERY",
+ }
+
+ regionEnvKeys = []string{
+ "AWS_REGION",
+ "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ profileEnvKeys = []string{
+ "AWS_PROFILE",
+ "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+ }
+ sharedCredsFileEnvKey = []string{
+ "AWS_SHARED_CREDENTIALS_FILE",
+ }
+ sharedConfigFileEnvKey = []string{
+ "AWS_CONFIG_FILE",
+ }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() envConfig {
+ enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+ return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() envConfig {
+ return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) envConfig {
+ cfg := envConfig{}
+
+ cfg.EnableSharedConfig = enableSharedConfig
+
+ setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
+ setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
+ setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
+
+ // CSM environment variables
+ setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
+ setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
+ setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
+ cfg.CSMEnabled = len(cfg.csmEnabled) > 0
+
+ // Require logical grouping of credentials
+ if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
+ cfg.Creds = credentials.Value{}
+ } else {
+ cfg.Creds.ProviderName = EnvProviderName
+ }
+
+ regionKeys := regionEnvKeys
+ profileKeys := profileEnvKeys
+ if !cfg.EnableSharedConfig {
+ regionKeys = regionKeys[:1]
+ profileKeys = profileKeys[:1]
+ }
+
+ setFromEnvVal(&cfg.Region, regionKeys)
+ setFromEnvVal(&cfg.Profile, profileKeys)
+
+ // endpoint discovery is in reference to it being enabled.
+ setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
+ if len(cfg.enableEndpointDiscovery) > 0 {
+ cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
+ }
+
+ setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
+ setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
+
+ if len(cfg.SharedCredentialsFile) == 0 {
+ cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
+ }
+ if len(cfg.SharedConfigFile) == 0 {
+ cfg.SharedConfigFile = defaults.SharedConfigFilename()
+ }
+
+ cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
+ return cfg
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+ for _, k := range keys {
+ if v := os.Getenv(k); len(v) > 0 {
+ *dst = v
+ break
+ }
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 000000000..be4b5f077
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,719 @@
+package session
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+ "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+ "github.com/aws/aws-sdk-go/aws/csm"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/endpoints"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+const (
+ // ErrCodeSharedConfig represents an error that occurs in the shared
+ // configuration logic
+ ErrCodeSharedConfig = "SharedConfigErr"
+)
+
+// ErrSharedConfigSourceCollision will be returned if a section contains both
+// source_profile and credential_source
+var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil)
+
+// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
+// variables are empty and Environment was set as the credential source
+var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
+
+// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
+var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ConfigProvider.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+func New(cfgs ...*aws.Config) *Session {
+ // load initial config from environment
+ envCfg := loadEnvConfig()
+
+ if envCfg.EnableSharedConfig {
+ var cfg aws.Config
+ cfg.MergeIn(cfgs...)
+ s, err := NewSessionWithOptions(Options{
+ Config: cfg,
+ SharedConfigState: SharedConfigEnable,
+ })
+ if err != nil {
+ // Old session.New expected all errors to be discovered when
+ // a request is made, and would report the errors then. This
+ // needs to be replicated if an error occurs while creating
+ // the session.
+ msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
+ "Use session.NewSession to handle errors occurring during session creation."
+
+ // Session creation failed, need to report the error and prevent
+ // any requests from succeeding.
+ s = &Session{Config: defaults.Config()}
+ s.Config.MergeIn(cfgs...)
+ s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+ s.Handlers.Validate.PushBack(func(r *request.Request) {
+ r.Error = err
+ })
+ }
+
+ return s
+ }
+
+ s := deprecatedNewSession(cfgs...)
+ if envCfg.CSMEnabled {
+ enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+ }
+
+ return s
+}
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created. Such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
+
+ return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+ // SharedConfigStateFromEnv does not override any state of the
+ // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+ // SharedConfigState type.
+ SharedConfigStateFromEnv SharedConfigState = iota
+
+ // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and disables the shared config functionality.
+ SharedConfigDisable
+
+ // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+ // and enables the shared config functionality.
+ SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+ // Provides config values for the SDK to use when creating service clients
+ // and making API requests to services. Any value set in with this field
+ // will override the associated value provided by the SDK defaults,
+ // environment or config files where relevant.
+ //
+ // If not set, configuration values from from SDK defaults, environment,
+ // config will be used.
+ Config aws.Config
+
+ // Overrides the config profile the Session should be created from. If not
+ // set the value of the environment variable will be loaded (AWS_PROFILE,
+ // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+ //
+ // If not set and environment variables are not set the "default"
+ // (DefaultSharedConfigProfile) will be used as the profile to load the
+ // session config from.
+ Profile string
+
+ // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+ // environment variable. By default a Session will be created using the
+ // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+ //
+ // Setting this value to SharedConfigEnable or SharedConfigDisable
+ // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+ // and enable or disable the shared config functionality.
+ SharedConfigState SharedConfigState
+
+ // Ordered list of files the session will load configuration from.
+ // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
+ SharedConfigFiles []string
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+ // client. If the client's Transport is not a http.Transport an error will be
+ // returned. If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // The Session option CustomCABundle is also available when creating sessions
+ // to also enable this feature. CustomCABundle session option field has priority
+ // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ CustomCABundle io.Reader
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// // Equivalent to session.New
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+// // Specify profile to load for the session's config
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Profile: "profile_name",
+// }))
+//
+// // Specify profile for config and region for requests
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// Config: aws.Config{Region: aws.String("us-east-1")},
+// Profile: "profile_name",
+// }))
+//
+// // Force enable Shared Config support
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
+// SharedConfigState: session.SharedConfigEnable,
+// }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+ var envCfg envConfig
+ if opts.SharedConfigState == SharedConfigEnable {
+ envCfg = loadSharedEnvConfig()
+ } else {
+ envCfg = loadEnvConfig()
+ }
+
+ if len(opts.Profile) > 0 {
+ envCfg.Profile = opts.Profile
+ }
+
+ switch opts.SharedConfigState {
+ case SharedConfigDisable:
+ envCfg.EnableSharedConfig = false
+ case SharedConfigEnable:
+ envCfg.EnableSharedConfig = true
+ }
+
+ // Only use AWS_CA_BUNDLE if session option is not provided.
+ if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+ f, err := os.Open(envCfg.CustomCABundle)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to open custom CA bundle PEM file", err)
+ }
+ defer f.Close()
+ opts.CustomCABundle = f
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+// var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+ if err != nil {
+ panic(err)
+ }
+
+ return sess
+}
+
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ if cfg.EndpointResolver == nil {
+ // An endpoint resolver is required for a session to be able to provide
+ // endpoints for service client configurations.
+ cfg.EndpointResolver = endpoints.DefaultResolver()
+ }
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ return s
+}
+
+func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
+ logger.Log("Enabling CSM")
+ if len(port) == 0 {
+ port = csm.DefaultPort
+ }
+
+ r, err := csm.Start(clientID, "127.0.0.1:"+port)
+ if err != nil {
+ return
+ }
+ r.InjectHandlers(handlers)
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Get a merged version of the user provided config to determine if
+ // credentials were.
+ userCfg := &aws.Config{}
+ userCfg.MergeIn(cfgs...)
+
+ // Ordered config files will be loaded in with later files overwriting
+ // previous config file values.
+ var cfgFiles []string
+ if opts.SharedConfigFiles != nil {
+ cfgFiles = opts.SharedConfigFiles
+ } else {
+ cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
+ }
+
+ // Load additional config from file(s)
+ sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+ if envCfg.CSMEnabled {
+ enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+ }
+
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+}
+
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+ var t *http.Transport
+ switch v := s.Config.HTTPClient.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if s.Config.HTTPClient.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ }
+ }
+ if t == nil {
+ // Nil transport implies `http.DefaultTransport` should be used. Since
+ // the SDK cannot modify, nor copy the `DefaultTransport` specifying
+ // the values the next closest behavior.
+ t = getCABundleTransport()
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ s.Config.HTTPClient.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
+ // Merge in user provided configuration
+ cfg.MergeIn(userCfg)
+
+ // Region if not already set by user
+ if len(aws.StringValue(cfg.Region)) == 0 {
+ if len(envCfg.Region) > 0 {
+ cfg.WithRegion(envCfg.Region)
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+ cfg.WithRegion(sharedCfg.Region)
+ }
+ }
+
+ if cfg.EnableEndpointDiscovery == nil {
+ if envCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
+ } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
+ cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
+ }
+ }
+
+ // Configure credentials if not already set
+ if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+
+ // inspect the profile to see if a credential source has been specified.
+ if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
+
+ // if both credential_source and source_profile have been set, return an error
+ // as this is undefined behavior.
+ if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
+ return ErrSharedConfigSourceCollision
+ }
+
+ // valid credential source values
+ const (
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+ )
+
+ switch sharedCfg.AssumeRole.CredentialSource {
+ case credSourceEc2Metadata:
+ cfgCp := *cfg
+ p := defaults.RemoteCredProvider(cfgCp, handlers)
+ cfgCp.Credentials = credentials.NewCredentials(p)
+
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+
+ cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
+ case credSourceEnvironment:
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ case credSourceECSContainer:
+ if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
+ return ErrSharedConfigECSContainerEnvVarEmpty
+ }
+
+ cfgCp := *cfg
+ p := defaults.RemoteCredProvider(cfgCp, handlers)
+ creds := credentials.NewCredentials(p)
+
+ cfg.Credentials = creds
+ default:
+ return ErrSharedConfigInvalidCredSource
+ }
+
+ return nil
+ }
+
+ if len(envCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ envCfg.Creds,
+ )
+ } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
+ cfgCp := *cfg
+ cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.AssumeRoleSource.Creds,
+ )
+
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
+
+ cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
+ } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
+ cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+ sharedCfg.Creds,
+ )
+ } else if len(sharedCfg.CredentialProcess) > 0 {
+ cfg.Credentials = processcreds.NewCredentials(
+ sharedCfg.CredentialProcess,
+ )
+ } else {
+ // Fallback to default credentials provider, include mock errors
+ // for the credential chain so user can identify why credentials
+ // failed to be retrieved.
+ cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
+ &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
+ defaults.RemoteCredProvider(*cfg, handlers),
+ },
+ })
+ }
+ }
+
+ return nil
+}
+
+func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
+ return stscreds.NewCredentials(
+ &Session{
+ Config: &cfg,
+ Handlers: handlers.Copy(),
+ },
+ sharedCfg.AssumeRole.RoleARN,
+ func(opt *stscreds.AssumeRoleProvider) {
+ opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
+
+ // Assume role with external ID
+ if len(sharedCfg.AssumeRole.ExternalID) > 0 {
+ opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
+ }
+
+ // Assume role with MFA
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 {
+ opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
+ opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+ }
+ },
+ )
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
+// MFAToken option is not set when shared config is configured load assume a
+// role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+ return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+ Err error
+}
+
+var emptyCreds = credentials.Value{}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+ return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+ return true
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current Session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+// // Create a copy of the current Session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ // Backwards compatibility, the error will be eaten if user calls ClientConfig
+ // directly. All SDK services will use ClientconfigWithError.
+ cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
+
+ return cfg
+}
+
+func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+ var err error
+
+ region := aws.StringValue(s.Config.Region)
+
+ if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
+ resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ } else {
+ resolved, err = s.Config.EndpointResolver.EndpointFor(
+ serviceName, region,
+ func(opt *endpoints.Options) {
+ opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
+ opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
+
+ // Support the condition where the service is modeled but its
+ // endpoint metadata is not available.
+ opt.ResolveUnknownService = true
+ },
+ )
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }, err
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+
+ var resolved endpoints.ResolvedEndpoint
+
+ region := aws.StringValue(s.Config.Region)
+
+ if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+ resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+ resolved.SigningRegion = region
+ }
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: resolved.URL,
+ SigningRegion: resolved.SigningRegion,
+ SigningNameDerived: resolved.SigningNameDerived,
+ SigningName: resolved.SigningName,
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/src/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100644
index 000000000..7cb44021b
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -0,0 +1,329 @@
+package session
+
+import (
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+
+ "github.com/aws/aws-sdk-go/internal/ini"
+)
+
+const (
+ // Static Credentials group
+ accessKeyIDKey = `aws_access_key_id` // group required
+ secretAccessKey = `aws_secret_access_key` // group required
+ sessionTokenKey = `aws_session_token` // optional
+
+ // Assume Role Credentials group
+ roleArnKey = `role_arn` // group required
+ sourceProfileKey = `source_profile` // group required (or credential_source)
+ credentialSourceKey = `credential_source` // group required (or source_profile)
+ externalIDKey = `external_id` // optional
+ mfaSerialKey = `mfa_serial` // optional
+ roleSessionNameKey = `role_session_name` // optional
+
+ // Additional Config fields
+ regionKey = `region`
+
+ // endpoint discovery group
+ enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+ // External Credential Process
+ credentialProcessKey = `credential_process`
+
+ // DefaultSharedConfigProfile is the default profile to be used when
+ // loading configuration from the config files if another profile name
+ // is not provided.
+ DefaultSharedConfigProfile = `default`
+)
+
+type assumeRoleConfig struct {
+ RoleARN string
+ SourceProfile string
+ CredentialSource string
+ ExternalID string
+ MFASerial string
+ RoleSessionName string
+}
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+ // Credentials values from the config file. Both aws_access_key_id
+ // and aws_secret_access_key must be provided together in the same file
+ // to be considered valid. The values will be ignored if not a complete group.
+ // aws_session_token is an optional field that can be provided if both of the
+ // other two fields are also provided.
+ //
+ // aws_access_key_id
+ // aws_secret_access_key
+ // aws_session_token
+ Creds credentials.Value
+
+ AssumeRole assumeRoleConfig
+ AssumeRoleSource *sharedConfig
+
+ // An external process to request credentials
+ CredentialProcess string
+
+ // Region is the region the SDK should use for looking up AWS service endpoints
+ // and signing requests.
+ //
+ // region
+ Region string
+
+ // EnableEndpointDiscovery can be enabled in the shared config by setting
+ // endpoint_discovery_enabled to true
+ //
+ // endpoint_discovery_enabled = true
+ EnableEndpointDiscovery *bool
+}
+
+type sharedConfigFile struct {
+ Filename string
+ IniData ini.Sections
+}
+
+// loadSharedConfig retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
+ if len(profile) == 0 {
+ profile = DefaultSharedConfigProfile
+ }
+
+ files, err := loadSharedConfigIniFiles(filenames)
+ if err != nil {
+ return sharedConfig{}, err
+ }
+
+ cfg := sharedConfig{}
+ if err = cfg.setFromIniFiles(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+
+ if len(cfg.AssumeRole.SourceProfile) > 0 {
+ if err := cfg.setAssumeRoleSource(profile, files); err != nil {
+ return sharedConfig{}, err
+ }
+ }
+
+ return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+ files := make([]sharedConfigFile, 0, len(filenames))
+
+ for _, filename := range filenames {
+ sections, err := ini.OpenFile(filename)
+ if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
+ // Skip files which can't be opened and read for whatever reason
+ continue
+ } else if err != nil {
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
+ }
+
+ files = append(files, sharedConfigFile{
+ Filename: filename, IniData: sections,
+ })
+ }
+
+ return files, nil
+}
+
+func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
+ var assumeRoleSrc sharedConfig
+
+ if len(cfg.AssumeRole.CredentialSource) > 0 {
+ // setAssumeRoleSource is only called when source_profile is found.
+ // If both source_profile and credential_source are set, then
+ // ErrSharedConfigSourceCollision will be returned
+ return ErrSharedConfigSourceCollision
+ }
+
+ // Multiple level assume role chains are not support
+ if cfg.AssumeRole.SourceProfile == origProfile {
+ assumeRoleSrc = *cfg
+ assumeRoleSrc.AssumeRole = assumeRoleConfig{}
+ } else {
+ err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
+ if err != nil {
+ return err
+ }
+ }
+
+ if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
+ return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
+ }
+
+ cfg.AssumeRoleSource = &assumeRoleSrc
+
+ return nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
+ // Trim files from the list that don't exist.
+ for _, f := range files {
+ if err := cfg.setFromIniFile(profile, f); err != nil {
+ if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+ // Ignore proviles missings
+ continue
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+// setFromFile loads the configuration from the file using
+// the profile provided. A sharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+ section, ok := file.IniData.GetSection(profile)
+ if !ok {
+ // Fallback to to alternate profile name: profile
+ section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+ if !ok {
+ return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
+ }
+ }
+
+ // Shared Credentials
+ akid := section.String(accessKeyIDKey)
+ secret := section.String(secretAccessKey)
+ if len(akid) > 0 && len(secret) > 0 {
+ cfg.Creds = credentials.Value{
+ AccessKeyID: akid,
+ SecretAccessKey: secret,
+ SessionToken: section.String(sessionTokenKey),
+ ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+ }
+ }
+
+ // Assume Role
+ roleArn := section.String(roleArnKey)
+ srcProfile := section.String(sourceProfileKey)
+ credentialSource := section.String(credentialSourceKey)
+ hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
+ if len(roleArn) > 0 && hasSource {
+ cfg.AssumeRole = assumeRoleConfig{
+ RoleARN: roleArn,
+ SourceProfile: srcProfile,
+ CredentialSource: credentialSource,
+ ExternalID: section.String(externalIDKey),
+ MFASerial: section.String(mfaSerialKey),
+ RoleSessionName: section.String(roleSessionNameKey),
+ }
+ }
+
+ // `credential_process`
+ if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
+ cfg.CredentialProcess = credProc
+ }
+
+ // Region
+ if v := section.String(regionKey); len(v) > 0 {
+ cfg.Region = v
+ }
+
+ // Endpoint discovery
+ if section.Has(enableEndpointDiscoveryKey) {
+ v := section.Bool(enableEndpointDiscoveryKey)
+ cfg.EnableEndpointDiscovery = &v
+ }
+
+ return nil
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+ Filename string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+ return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+ return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+ Profile string
+ Err error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+ return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+ return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+ return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+ RoleARN string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+ return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+ return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
+ e.RoleARN)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644
index 000000000..244c86da0
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 000000000..6aa2ed241
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644
index 000000000..bd082e9d1
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+ "net/url"
+ "strings"
+)
+
+func getURIPath(u *url.URL) string {
+ var uri string
+
+ if len(u.Opaque) > 0 {
+ uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+ } else {
+ uri = u.EscapedPath()
+ }
+
+ if len(uri) == 0 {
+ uri = "/"
+ }
+
+ return uri
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100644
index 000000000..523db79f8
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -0,0 +1,796 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+// "///"
+//
+// // e.g.
+// "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+
+ // emptyStringSHA256 is a SHA256 of an empty string
+ emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Tagging": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ "X-Amz-Content-Sha256": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+ // The authentication credentials the request will be signed against.
+ // This value must be set to sign requests.
+ Credentials *credentials.Credentials
+
+ // Sets the log level the signer should use when reporting information to
+ // the logger. If the logger is nil nothing will be logged. See
+ // aws.LogLevelType for more information on available logging levels
+ //
+ // By default nothing will be logged.
+ Debug aws.LogLevelType
+
+ // The logger loging information will be written to. If there the logger
+ // is nil, nothing will be logged.
+ Logger aws.Logger
+
+ // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+ // request header to the request's query string. This is most commonly used
+ // with pre-signed requests preventing headers from being added to the
+ // request's query string.
+ DisableHeaderHoisting bool
+
+ // Disables the automatic escaping of the URI path of the request for the
+ // siganture's canonical string's path. For services that do not need additional
+ // escaping then use this to disable the signer escaping the path.
+ //
+ // S3 is an example of a service that does not need additional escaping.
+ //
+ // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ DisableURIPathEscaping bool
+
+ // Disables the automatical setting of the HTTP request's Body field with the
+ // io.ReadSeeker passed in to the signer. This is useful if you're using a
+ // custom wrapper around the body for the io.ReadSeeker and want to preserve
+ // the Body value on the Request.Body.
+ //
+ // This does run the risk of signing a request with a body that will not be
+ // sent in the request. Need to ensure that the underlying data of the Body
+ // values are the same.
+ DisableRequestBodyOverwrite bool
+
+ // currentTimeFn returns the time value which represents the current time.
+ // This value should only be used for testing. If it is nil the default
+ // time.Now will be used.
+ currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+ v4 := &Signer{
+ Credentials: credentials,
+ }
+
+ for _, option := range options {
+ option(v4)
+ }
+
+ return v4
+}
+
+type signingCtx struct {
+ ServiceName string
+ Region string
+ Request *http.Request
+ Body io.ReadSeeker
+ Query url.Values
+ Time time.Time
+ ExpireTime time.Duration
+ SignedHeaderVals http.Header
+
+ DisableURIPathEscaping bool
+
+ credValues credentials.Value
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+ unsignedPayload bool
+
+ bodyDigest string
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, 0, false, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+ return v4.signWithBody(r, body, service, region, exp, true, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
+ currentTimeFn := v4.currentTimeFn
+ if currentTimeFn == nil {
+ currentTimeFn = time.Now
+ }
+
+ ctx := &signingCtx{
+ Request: r,
+ Body: body,
+ Query: r.URL.Query(),
+ Time: signTime,
+ ExpireTime: exp,
+ isPresign: isPresign,
+ ServiceName: service,
+ Region: region,
+ DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
+ }
+
+ for key := range ctx.Query {
+ sort.Strings(ctx.Query[key])
+ }
+
+ if ctx.isRequestSigned() {
+ ctx.Time = currentTimeFn()
+ ctx.handlePresignRemoval()
+ }
+
+ var err error
+ ctx.credValues, err = v4.Credentials.Get()
+ if err != nil {
+ return http.Header{}, err
+ }
+
+ ctx.sanitizeHostForHeader()
+ ctx.assignAmzQueryValues()
+ if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
+ return nil, err
+ }
+
+ // If the request is not presigned the body should be attached to it. This
+ // prevents the confusion of wanting to send a signed request without
+ // the body the request was signed for attached.
+ if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+ var reader io.ReadCloser
+ if body != nil {
+ var ok bool
+ if reader, ok = body.(io.ReadCloser); !ok {
+ reader = ioutil.NopCloser(body)
+ }
+ }
+ r.Body = reader
+ }
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo(ctx)
+ }
+
+ return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) sanitizeHostForHeader() {
+ request.SanitizeHostForHeader(ctx.Request)
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+ if !ctx.isPresign {
+ return
+ }
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ ctx.removePresign()
+
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if ctx.credValues.SessionToken != "" {
+ ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ } else {
+ ctx.Query.Del("X-Amz-Security-Token")
+ }
+
+ return
+ }
+
+ if ctx.credValues.SessionToken != "" {
+ ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+ }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+ Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler should only be used with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ SignSDKRequestWithCurrentTime(req, time.Now, opts...)
+ },
+ }
+}
+
+// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
+// function passed in. Behaves the same as SignSDKRequest with the exception
+// the request is signed with the value returned by the current time function.
+func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+ v4.Debug = req.Config.LogLevel.Value()
+ v4.Logger = req.Config.Logger
+ v4.DisableHeaderHoisting = req.NotHoist
+ v4.currentTimeFn = curTimeFn
+ if name == "s3" {
+ // S3 service should not have any escaping applied
+ v4.DisableURIPathEscaping = true
+ }
+ // Prevents setting the HTTPRequest's Body. Since the Body could be
+ // wrapped in a custom io.Closer that we do not want to be stompped
+ // on top of by the signer.
+ v4.DisableRequestBodyOverwrite = true
+ })
+
+ for _, opt := range opts {
+ opt(v4)
+ }
+
+ curTime := curTimeFn()
+ signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+ name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
+ )
+ if err != nil {
+ req.Error = err
+ req.SignedHeaderVals = nil
+ return
+ }
+
+ req.SignedHeaderVals = signedHeaders
+ req.LastSignedAt = curTime
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+ signedURLMsg := ""
+ if ctx.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
+ ctx.buildTime() // no depends
+ ctx.buildCredentialString() // no depends
+
+ if err := ctx.buildBodyDigest(); err != nil {
+ return err
+ }
+
+ unsignedHeaders := ctx.Request.Header
+ if ctx.isPresign {
+ if !disableHeaderHoisting {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ ctx.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ ctx.buildCanonicalString() // depends on canon headers / signed headers
+ ctx.buildStringToSign() // depends on canon string
+ ctx.buildSignature() // depends on string to sign
+
+ if ctx.isPresign {
+ ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+ "SignedHeaders=" + ctx.signedHeaders,
+ "Signature=" + ctx.signature,
+ }
+ ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+
+ return nil
+}
+
+func (ctx *signingCtx) buildTime() {
+ ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+ ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+ if ctx.isPresign {
+ duration := int64(ctx.ExpireTime / time.Second)
+ ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+ ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+ }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+ ctx.credentialString = strings.Join([]string{
+ ctx.formattedShortTime,
+ ctx.Region,
+ ctx.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if ctx.SignedHeaderVals == nil {
+ ctx.SignedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ ctx.SignedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ ctx.signedHeaders = strings.Join(headers, ";")
+
+ if ctx.isPresign {
+ ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ if ctx.Request.Host != "" {
+ headerValues[i] = "host:" + ctx.Request.Host
+ } else {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ }
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(ctx.SignedHeaderVals[k], ",")
+ }
+ }
+ stripExcessSpaces(headerValues)
+ ctx.canonicalHeaders = strings.Join(headerValues, "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+ ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+ uri := getURIPath(ctx.Request.URL)
+
+ if !ctx.DisableURIPathEscaping {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ ctx.canonicalString = strings.Join([]string{
+ ctx.Request.Method,
+ uri,
+ ctx.Request.URL.RawQuery,
+ ctx.canonicalHeaders + "\n",
+ ctx.signedHeaders,
+ ctx.bodyDigest,
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+ ctx.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ ctx.formattedTime,
+ ctx.credentialString,
+ hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+ }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+ secret := ctx.credValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+ region := makeHmac(date, []byte(ctx.Region))
+ service := makeHmac(region, []byte(ctx.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(ctx.stringToSign))
+ ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() error {
+ hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ includeSHA256Header := ctx.unsignedPayload ||
+ ctx.ServiceName == "s3" ||
+ ctx.ServiceName == "glacier"
+
+ s3Presign := ctx.isPresign && ctx.ServiceName == "s3"
+
+ if ctx.unsignedPayload || s3Presign {
+ hash = "UNSIGNED-PAYLOAD"
+ includeSHA256Header = !s3Presign
+ } else if ctx.Body == nil {
+ hash = emptyStringSHA256
+ } else {
+ if !aws.IsReaderSeekable(ctx.Body) {
+ return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
+ }
+ hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+ }
+
+ if includeSHA256Header {
+ ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+ }
+ }
+ ctx.bodyDigest = hash
+
+ return nil
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+ if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if ctx.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+ ctx.Query.Del("X-Amz-Algorithm")
+ ctx.Query.Del("X-Amz-Signature")
+ ctx.Query.Del("X-Amz-Security-Token")
+ ctx.Query.Del("X-Amz-Date")
+ ctx.Query.Del("X-Amz-Expires")
+ ctx.Query.Del("X-Amz-Credential")
+ ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, sdkio.SeekCurrent)
+ defer reader.Seek(start, sdkio.SeekStart)
+
+ // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
+ // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
+ size, err := aws.SeekerLen(reader)
+ if err != nil {
+ io.Copy(hash, reader)
+ } else {
+ io.CopyN(hash, reader, size)
+ }
+
+ return hash.Sum(nil)
+}
+
+const doubleSpace = " "
+
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain multiple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
+
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
+
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
+
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
+ }
+ spaces++
+ } else {
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
+ }
+ }
+
+ vals[i] = string(buf[:m])
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/types.go b/src/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 000000000..455091540
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,207 @@
+package aws
+
+import (
+ "io"
+ "sync"
+
+ "github.com/aws/aws-sdk-go/internal/sdkio"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
+//
+// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
+// operation's input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if the operation
+// requires payload signing.
+//
+// Note: If using With S3 PutObject to stream an object upload The SDK's S3
+// Upload manager (s3manager.Uploader) provides support for streaming with the
+// ability to retry network errors.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// IsReaderSeekable returns if the underlying reader type can be seeked. A
+// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
+// type.
+func IsReaderSeekable(r io.Reader) bool {
+ switch v := r.(type) {
+ case ReaderSeekerCloser:
+ return v.IsSeeker()
+ case *ReaderSeekerCloser:
+ return v.IsSeeker()
+ case io.ReadSeeker:
+ return true
+ default:
+ return false
+ }
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+ _, ok := r.r.(io.Seeker)
+ return ok
+}
+
+// HasLen returns the length of the underlying reader if the value implements
+// the Len() int method.
+func (r ReaderSeekerCloser) HasLen() (int, bool) {
+ type lenner interface {
+ Len() int
+ }
+
+ if lr, ok := r.r.(lenner); ok {
+ return lr.Len(), true
+ }
+
+ return 0, false
+}
+
+// GetLen returns the length of the bytes remaining in the underlying reader.
+// Checks first for Len(), then io.Seeker to determine the size of the
+// underlying reader.
+//
+// Will return -1 if the length cannot be determined.
+func (r ReaderSeekerCloser) GetLen() (int64, error) {
+ if l, ok := r.HasLen(); ok {
+ return int64(l), nil
+ }
+
+ if s, ok := r.r.(io.Seeker); ok {
+ return seekerLen(s)
+ }
+
+ return -1, nil
+}
+
+// SeekerLen attempts to get the number of bytes remaining at the seeker's
+// current position. Returns the number of bytes remaining or error.
+func SeekerLen(s io.Seeker) (int64, error) {
+ // Determine if the seeker is actually seekable. ReaderSeekerCloser
+ // hides the fact that a io.Readers might not actually be seekable.
+ switch v := s.(type) {
+ case ReaderSeekerCloser:
+ return v.GetLen()
+ case *ReaderSeekerCloser:
+ return v.GetLen()
+ }
+
+ return seekerLen(s)
+}
+
+func seekerLen(s io.Seeker) (int64, error) {
+ curOffset, err := s.Seek(0, sdkio.SeekCurrent)
+ if err != nil {
+ return 0, err
+ }
+
+ endOffset, err := s.Seek(0, sdkio.SeekEnd)
+ if err != nil {
+ return 0, err
+ }
+
+ _, err = s.Seek(curOffset, sdkio.SeekStart)
+ if err != nil {
+ return 0, err
+ }
+
+ return endOffset - curOffset, nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/url.go b/src/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644
index 000000000..6192b2455
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/src/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644
index 000000000..0210d2720
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/aws/version.go b/src/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 000000000..eecfe6dcf
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.19.47"
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
new file mode 100644
index 000000000..e83a99886
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
@@ -0,0 +1,120 @@
+package ini
+
+// ASTKind represents different states in the parse table
+// and the type of AST that is being constructed
+type ASTKind int
+
+// ASTKind* is used in the parse table to transition between
+// the different states
+const (
+ ASTKindNone = ASTKind(iota)
+ ASTKindStart
+ ASTKindExpr
+ ASTKindEqualExpr
+ ASTKindStatement
+ ASTKindSkipStatement
+ ASTKindExprStatement
+ ASTKindSectionStatement
+ ASTKindNestedSectionStatement
+ ASTKindCompletedNestedSectionStatement
+ ASTKindCommentStatement
+ ASTKindCompletedSectionStatement
+)
+
+func (k ASTKind) String() string {
+ switch k {
+ case ASTKindNone:
+ return "none"
+ case ASTKindStart:
+ return "start"
+ case ASTKindExpr:
+ return "expr"
+ case ASTKindStatement:
+ return "stmt"
+ case ASTKindSectionStatement:
+ return "section_stmt"
+ case ASTKindExprStatement:
+ return "expr_stmt"
+ case ASTKindCommentStatement:
+ return "comment"
+ case ASTKindNestedSectionStatement:
+ return "nested_section_stmt"
+ case ASTKindCompletedSectionStatement:
+ return "completed_stmt"
+ case ASTKindSkipStatement:
+ return "skip"
+ default:
+ return ""
+ }
+}
+
+// AST interface allows us to determine what kind of node we
+// are on and casting may not need to be necessary.
+//
+// The root is always the first node in Children
+type AST struct {
+ Kind ASTKind
+ Root Token
+ RootToken bool
+ Children []AST
+}
+
+func newAST(kind ASTKind, root AST, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Children: append([]AST{root}, children...),
+ }
+}
+
+func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
+ return AST{
+ Kind: kind,
+ Root: root,
+ RootToken: true,
+ Children: children,
+ }
+}
+
+// AppendChild will append to the list of children an AST has.
+func (a *AST) AppendChild(child AST) {
+ a.Children = append(a.Children, child)
+}
+
+// GetRoot will return the root AST which can be the first entry
+// in the children list or a token.
+func (a *AST) GetRoot() AST {
+ if a.RootToken {
+ return *a
+ }
+
+ if len(a.Children) == 0 {
+ return AST{}
+ }
+
+ return a.Children[0]
+}
+
+// GetChildren will return the current AST's list of children
+func (a *AST) GetChildren() []AST {
+ if len(a.Children) == 0 {
+ return []AST{}
+ }
+
+ if a.RootToken {
+ return a.Children
+ }
+
+ return a.Children[1:]
+}
+
+// SetChildren will set and override all children of the AST.
+func (a *AST) SetChildren(children []AST) {
+ if a.RootToken {
+ a.Children = children
+ } else {
+ a.Children = append(a.Children[:1], children...)
+ }
+}
+
+// Start is used to indicate the starting state of the parse table.
+var Start = newAST(ASTKindStart, AST{})
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
new file mode 100644
index 000000000..0895d53cb
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
@@ -0,0 +1,11 @@
+package ini
+
+var commaRunes = []rune(",")
+
+func isComma(b rune) bool {
+ return b == ','
+}
+
+func newCommaToken() Token {
+ return newToken(TokenComma, commaRunes, NoneType)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
new file mode 100644
index 000000000..0b76999ba
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
@@ -0,0 +1,35 @@
+package ini
+
+// isComment will return whether or not the next byte(s) is a
+// comment.
+func isComment(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case ';':
+ return true
+ case '#':
+ return true
+ }
+
+ return false
+}
+
+// newCommentToken will create a comment token and
+// return how many bytes were read.
+func newCommentToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if b[i] == '\n' {
+ break
+ }
+
+ if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
+ break
+ }
+ }
+
+ return newToken(TokenComment, b[:i], NoneType), i, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
new file mode 100644
index 000000000..25ce0fe13
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
@@ -0,0 +1,29 @@
+// Package ini is an LL(1) parser for configuration files.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
+//
+// Below is the BNF that describes this parser
+// Grammar:
+// stmt -> value stmt'
+// stmt' -> epsilon | op stmt
+// value -> number | string | boolean | quoted_string
+//
+// section -> [ section'
+// section' -> value section_close
+// section_close -> ]
+//
+// SkipState will skip (NL WS)+
+//
+// comment -> # comment' | ; comment'
+// comment' -> epsilon | value
+package ini
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
new file mode 100644
index 000000000..04345a54c
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
@@ -0,0 +1,4 @@
+package ini
+
+// emptyToken is used to satisfy the Token interface
+var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
new file mode 100644
index 000000000..91ba2a59d
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
@@ -0,0 +1,24 @@
+package ini
+
+// newExpression will return an expression AST.
+// Expr represents an expression
+//
+// grammar:
+// expr -> string | number
+func newExpression(tok Token) AST {
+ return newASTWithRootToken(ASTKindExpr, tok)
+}
+
+func newEqualExpr(left AST, tok Token) AST {
+ return newASTWithRootToken(ASTKindEqualExpr, tok, left)
+}
+
+// EqualExprKey will return a LHS value in the equal expr
+func EqualExprKey(ast AST) string {
+ children := ast.GetChildren()
+ if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
+ return ""
+ }
+
+ return string(children[0].Root.Raw())
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
new file mode 100644
index 000000000..8d462f77e
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package ini
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ b := bytes.NewReader(data)
+
+ if _, err := Parse(b); err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
new file mode 100644
index 000000000..3b0ca7afe
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
@@ -0,0 +1,51 @@
+package ini
+
+import (
+ "io"
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// OpenFile takes a path to a given file, and will open and parse
+// that file.
+func OpenFile(path string) (Sections, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
+ }
+ defer f.Close()
+
+ return Parse(f)
+}
+
+// Parse will parse the given file using the shared config
+// visitor.
+func Parse(f io.Reader) (Sections, error) {
+ tree, err := ParseAST(f)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
+
+// ParseBytes will parse the given bytes and return the parsed sections.
+func ParseBytes(b []byte) (Sections, error) {
+ tree, err := ParseASTBytes(b)
+ if err != nil {
+ return Sections{}, err
+ }
+
+ v := NewDefaultVisitor()
+ if err = Walk(tree, v); err != nil {
+ return Sections{}, err
+ }
+
+ return v.Sections, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
new file mode 100644
index 000000000..582c024ad
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
@@ -0,0 +1,165 @@
+package ini
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // ErrCodeUnableToReadFile is used when a file is failed to be
+ // opened or read from.
+ ErrCodeUnableToReadFile = "FailedRead"
+)
+
+// TokenType represents the various different tokens types
+type TokenType int
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenNone:
+ return "none"
+ case TokenLit:
+ return "literal"
+ case TokenSep:
+ return "sep"
+ case TokenOp:
+ return "op"
+ case TokenWS:
+ return "ws"
+ case TokenNL:
+ return "newline"
+ case TokenComment:
+ return "comment"
+ case TokenComma:
+ return "comma"
+ default:
+ return ""
+ }
+}
+
+// TokenType enums
+const (
+ TokenNone = TokenType(iota)
+ TokenLit
+ TokenSep
+ TokenComma
+ TokenOp
+ TokenWS
+ TokenNL
+ TokenComment
+)
+
+type iniLexer struct{}
+
+// Tokenize will return a list of tokens during lexical analysis of the
+// io.Reader.
+func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
+ }
+
+ return l.tokenize(b)
+}
+
+func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
+ runes := bytes.Runes(b)
+ var err error
+ n := 0
+ tokenAmount := countTokens(runes)
+ tokens := make([]Token, tokenAmount)
+ count := 0
+
+ for len(runes) > 0 && count < tokenAmount {
+ switch {
+ case isWhitespace(runes[0]):
+ tokens[count], n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ tokens[count], n = newCommaToken(), 1
+ case isComment(runes):
+ tokens[count], n, err = newCommentToken(runes)
+ case isNewline(runes):
+ tokens[count], n, err = newNewlineToken(runes)
+ case isSep(runes):
+ tokens[count], n, err = newSepToken(runes)
+ case isOp(runes):
+ tokens[count], n, err = newOpToken(runes)
+ default:
+ tokens[count], n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ count++
+
+ runes = runes[n:]
+ }
+
+ return tokens[:count], nil
+}
+
+func countTokens(runes []rune) int {
+ count, n := 0, 0
+ var err error
+
+ for len(runes) > 0 {
+ switch {
+ case isWhitespace(runes[0]):
+ _, n, err = newWSToken(runes)
+ case isComma(runes[0]):
+ _, n = newCommaToken(), 1
+ case isComment(runes):
+ _, n, err = newCommentToken(runes)
+ case isNewline(runes):
+ _, n, err = newNewlineToken(runes)
+ case isSep(runes):
+ _, n, err = newSepToken(runes)
+ case isOp(runes):
+ _, n, err = newOpToken(runes)
+ default:
+ _, n, err = newLitToken(runes)
+ }
+
+ if err != nil {
+ return 0
+ }
+
+ count++
+ runes = runes[n:]
+ }
+
+ return count + 1
+}
+
+// Token indicates a metadata about a given value.
+type Token struct {
+ t TokenType
+ ValueType ValueType
+ base int
+ raw []rune
+}
+
+var emptyValue = Value{}
+
+func newToken(t TokenType, raw []rune, v ValueType) Token {
+ return Token{
+ t: t,
+ raw: raw,
+ ValueType: v,
+ }
+}
+
+// Raw return the raw runes that were consumed
+func (tok Token) Raw() []rune {
+ return tok.raw
+}
+
+// Type returns the token type
+func (tok Token) Type() TokenType {
+ return tok.t
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
new file mode 100644
index 000000000..e56dcee2f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
@@ -0,0 +1,349 @@
+package ini
+
+import (
+ "fmt"
+ "io"
+)
+
+// State enums for the parse table
+const (
+ InvalidState = iota
+ // stmt -> value stmt'
+ StatementState
+ // stmt' -> MarkComplete | op stmt
+ StatementPrimeState
+ // value -> number | string | boolean | quoted_string
+ ValueState
+ // section -> [ section'
+ OpenScopeState
+ // section' -> value section_close
+ SectionState
+ // section_close -> ]
+ CloseScopeState
+ // SkipState will skip (NL WS)+
+ SkipState
+ // SkipTokenState will skip any token and push the previous
+ // state onto the stack.
+ SkipTokenState
+ // comment -> # comment' | ; comment'
+ // comment' -> MarkComplete | value
+ CommentState
+ // MarkComplete state will complete statements and move that
+ // to the completed AST list
+ MarkCompleteState
+ // TerminalState signifies that the tokens have been fully parsed
+ TerminalState
+)
+
+// parseTable is a state machine to dictate the grammar above.
+var parseTable = map[ASTKind]map[TokenType]int{
+ ASTKindStart: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+ ASTKindCommentStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExpr: map[TokenType]int{
+ TokenOp: StatementPrimeState,
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenWS: ValueState,
+ TokenNL: SkipState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindEqualExpr: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipState,
+ },
+ ASTKindStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindExprStatement: map[TokenType]int{
+ TokenLit: ValueState,
+ TokenSep: OpenScopeState,
+ TokenOp: ValueState,
+ TokenWS: ValueState,
+ TokenNL: MarkCompleteState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ TokenComma: SkipState,
+ },
+ ASTKindSectionStatement: map[TokenType]int{
+ TokenLit: SectionState,
+ TokenOp: SectionState,
+ TokenSep: CloseScopeState,
+ TokenWS: SectionState,
+ TokenNL: SkipTokenState,
+ },
+ ASTKindCompletedSectionStatement: map[TokenType]int{
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenComment: CommentState,
+ TokenNone: MarkCompleteState,
+ },
+ ASTKindSkipStatement: map[TokenType]int{
+ TokenLit: StatementState,
+ TokenSep: OpenScopeState,
+ TokenWS: SkipTokenState,
+ TokenNL: SkipTokenState,
+ TokenComment: CommentState,
+ TokenNone: TerminalState,
+ },
+}
+
+// ParseAST will parse input from an io.Reader using
+// an LL(1) parser.
+func ParseAST(r io.Reader) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.Tokenize(r)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+// ParseASTBytes will parse input from a byte slice using
+// an LL(1) parser.
+func ParseASTBytes(b []byte) ([]AST, error) {
+ lexer := iniLexer{}
+ tokens, err := lexer.tokenize(b)
+ if err != nil {
+ return []AST{}, err
+ }
+
+ return parse(tokens)
+}
+
+func parse(tokens []Token) ([]AST, error) {
+ start := Start
+ stack := newParseStack(3, len(tokens))
+
+ stack.Push(start)
+ s := newSkipper()
+
+loop:
+ for stack.Len() > 0 {
+ k := stack.Pop()
+
+ var tok Token
+ if len(tokens) == 0 {
+ // this occurs when all the tokens have been processed
+ // but reduction of what's left on the stack needs to
+ // occur.
+ tok = emptyToken
+ } else {
+ tok = tokens[0]
+ }
+
+ step := parseTable[k.Kind][tok.Type()]
+ if s.ShouldSkip(tok) {
+ // being in a skip state with no tokens will break out of
+ // the parse loop since there is nothing left to process.
+ if len(tokens) == 0 {
+ break loop
+ }
+
+ step = SkipTokenState
+ }
+
+ switch step {
+ case TerminalState:
+ // Finished parsing. Push what should be the last
+ // statement to the stack. If there is anything left
+ // on the stack, an error in parsing has occurred.
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ break loop
+ case SkipTokenState:
+ // When skipping a token, the previous state was popped off the stack.
+ // To maintain the correct state, the previous state will be pushed
+ // onto the stack.
+ stack.Push(k)
+ case StatementState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+ expr := newExpression(tok)
+ stack.Push(expr)
+ case StatementPrimeState:
+ if tok.Type() != TokenOp {
+ stack.MarkComplete(k)
+ continue
+ }
+
+ if k.Kind != ASTKindExpr {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
+ )
+ }
+
+ k = trimSpaces(k)
+ expr := newEqualExpr(k, tok)
+ stack.Push(expr)
+ case ValueState:
+ // ValueState requires the previous state to either be an equal expression
+ // or an expression statement.
+ //
+ // This grammar occurs when the RHS is a number, word, or quoted string.
+ // equal_expr -> lit op equal_expr'
+ // equal_expr' -> number | string | quoted_string
+ // quoted_string -> " quoted_string'
+ // quoted_string' -> string quoted_string_end
+ // quoted_string_end -> "
+ //
+ // otherwise
+ // expr_stmt -> equal_expr (expr_stmt')*
+ // expr_stmt' -> ws S | op S | MarkComplete
+ // S -> equal_expr' expr_stmt'
+ switch k.Kind {
+ case ASTKindEqualExpr:
+ // assiging a value to some key
+ k.AppendChild(newExpression(tok))
+ stack.Push(newExprStatement(k))
+ case ASTKindExpr:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stack.Push(k)
+ case ASTKindExprStatement:
+ root := k.GetRoot()
+ children := root.GetChildren()
+ if len(children) == 0 {
+ return nil, NewParseError(
+ fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
+ )
+ }
+
+ rhs := children[len(children)-1]
+
+ if rhs.Root.ValueType != QuotedStringType {
+ rhs.Root.ValueType = StringType
+ rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
+
+ }
+
+ children[len(children)-1] = rhs
+ k.SetChildren(children)
+
+ stack.Push(k)
+ }
+ case OpenScopeState:
+ if !runeCompare(tok.Raw(), openBrace) {
+ return nil, NewParseError("expected '['")
+ }
+
+ stmt := newStatement()
+ stack.Push(stmt)
+ case CloseScopeState:
+ if !runeCompare(tok.Raw(), closeBrace) {
+ return nil, NewParseError("expected ']'")
+ }
+
+ k = trimSpaces(k)
+ stack.Push(newCompletedSectionStatement(k))
+ case SectionState:
+ var stmt AST
+
+ switch k.Kind {
+ case ASTKindStatement:
+ // If there are multiple literals inside of a scope declaration,
+ // then the current token's raw value will be appended to the Name.
+ //
+ // This handles cases like [ profile default ]
+ //
+ // k will represent a SectionStatement with the children representing
+ // the label of the section
+ stmt = newSectionStatement(tok)
+ case ASTKindSectionStatement:
+ k.Root.raw = append(k.Root.raw, tok.Raw()...)
+ stmt = k
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
+ )
+ }
+
+ stack.Push(stmt)
+ case MarkCompleteState:
+ if k.Kind != ASTKindStart {
+ stack.MarkComplete(k)
+ }
+
+ if stack.Len() == 0 {
+ stack.Push(start)
+ }
+ case SkipState:
+ stack.Push(newSkipStatement(k))
+ s.Skip()
+ case CommentState:
+ if k.Kind == ASTKindStart {
+ stack.Push(k)
+ } else {
+ stack.MarkComplete(k)
+ }
+
+ stmt := newCommentStatement(tok)
+ stack.Push(stmt)
+ default:
+ return nil, NewParseError(
+ fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
+ k, tok.Type()))
+ }
+
+ if len(tokens) > 0 {
+ tokens = tokens[1:]
+ }
+ }
+
+ // this occurs when a statement has not been completed
+ if stack.top > 1 {
+ return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
+ }
+
+ // returns a sublist which excludes the start symbol
+ return stack.List(), nil
+}
+
+// trimSpaces will trim spaces on the left and right hand side of
+// the literal.
+func trimSpaces(k AST) AST {
+ // trim left hand side of spaces
+ for i := 0; i < len(k.Root.raw); i++ {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[1:]
+ i--
+ }
+
+ // trim right hand side of spaces
+ for i := len(k.Root.raw) - 1; i >= 0; i-- {
+ if !isWhitespace(k.Root.raw[i]) {
+ break
+ }
+
+ k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
+ }
+
+ return k
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
new file mode 100644
index 000000000..24df543d3
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
@@ -0,0 +1,324 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ runesTrue = []rune("true")
+ runesFalse = []rune("false")
+)
+
+var literalValues = [][]rune{
+ runesTrue,
+ runesFalse,
+}
+
+func isBoolValue(b []rune) bool {
+ for _, lv := range literalValues {
+ if isLitValue(lv, b) {
+ return true
+ }
+ }
+ return false
+}
+
+func isLitValue(want, have []rune) bool {
+ if len(have) < len(want) {
+ return false
+ }
+
+ for i := 0; i < len(want); i++ {
+ if want[i] != have[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+// isNumberValue will return whether not the leading characters in
+// a byte slice is a number. A number is delimited by whitespace or
+// the newline token.
+//
+// A number is defined to be in a binary, octal, decimal (int | float), hex format,
+// or in scientific notation.
+func isNumberValue(b []rune) bool {
+ negativeIndex := 0
+ helper := numberHelper{}
+ needDigit := false
+
+ for i := 0; i < len(b); i++ {
+ negativeIndex++
+
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return false
+ }
+ helper.Determine(b[i])
+ needDigit = true
+ continue
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ negativeIndex = 0
+ needDigit = true
+ continue
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ needDigit = true
+ if i == 0 {
+ return false
+ }
+
+ fallthrough
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return false
+ }
+ needDigit = true
+ continue
+ }
+
+ if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
+ return !needDigit
+ }
+
+ if !helper.CorrectByte(b[i]) {
+ return false
+ }
+ needDigit = false
+ }
+
+ return !needDigit
+}
+
+func isValid(b []rune) (bool, int, error) {
+ if len(b) == 0 {
+ // TODO: should probably return an error
+ return false, 0, nil
+ }
+
+ return isValidRune(b[0]), 1, nil
+}
+
+func isValidRune(r rune) bool {
+ return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
+}
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case DecimalType:
+ return "FLOAT"
+ case IntegerType:
+ return "INT"
+ case StringType:
+ return "STRING"
+ case BoolType:
+ return "BOOL"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ DecimalType
+ IntegerType
+ StringType
+ QuotedStringType
+ BoolType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+ raw []rune
+
+ integer int64
+ decimal float64
+ boolean bool
+ str string
+}
+
+func newValue(t ValueType, base int, raw []rune) (Value, error) {
+ v := Value{
+ Type: t,
+ raw: raw,
+ }
+ var err error
+
+ switch t {
+ case DecimalType:
+ v.decimal, err = strconv.ParseFloat(string(raw), 64)
+ case IntegerType:
+ if base != 10 {
+ raw = raw[2:]
+ }
+
+ v.integer, err = strconv.ParseInt(string(raw), base, 64)
+ case StringType:
+ v.str = string(raw)
+ case QuotedStringType:
+ v.str = string(raw[1 : len(raw)-1])
+ case BoolType:
+ v.boolean = runeCompare(v.raw, runesTrue)
+ }
+
+ // issue 2253
+ //
+ // if the value trying to be parsed is too large, then we will use
+ // the 'StringType' and raw value instead.
+ if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
+ v.Type = StringType
+ v.str = string(raw)
+ err = nil
+ }
+
+ return v, err
+}
+
+// Append will append values and change the type to a string
+// type.
+func (v *Value) Append(tok Token) {
+ r := tok.Raw()
+ if v.Type != QuotedStringType {
+ v.Type = StringType
+ r = tok.raw[1 : len(tok.raw)-1]
+ }
+ if tok.Type() != TokenLit {
+ v.raw = append(v.raw, tok.Raw()...)
+ } else {
+ v.raw = append(v.raw, r...)
+ }
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case DecimalType:
+ return fmt.Sprintf("decimal: %f", v.decimal)
+ case IntegerType:
+ return fmt.Sprintf("integer: %d", v.integer)
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.raw))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.raw))
+ case BoolType:
+ return fmt.Sprintf("bool: %t", v.boolean)
+ default:
+ return "union not set"
+ }
+}
+
+func newLitToken(b []rune) (Token, int, error) {
+ n := 0
+ var err error
+
+ token := Token{}
+ if b[0] == '"' {
+ n, err = getStringValue(b)
+ if err != nil {
+ return token, n, err
+ }
+
+ token = newToken(TokenLit, b[:n], QuotedStringType)
+ } else if isNumberValue(b) {
+ var base int
+ base, n, err = getNumericalValue(b)
+ if err != nil {
+ return token, 0, err
+ }
+
+ value := b[:n]
+ vType := IntegerType
+ if contains(value, '.') || hasExponent(value) {
+ vType = DecimalType
+ }
+ token = newToken(TokenLit, value, vType)
+ token.base = base
+ } else if isBoolValue(b) {
+ n, err = getBoolValue(b)
+
+ token = newToken(TokenLit, b[:n], BoolType)
+ } else {
+ n, err = getValue(b)
+ token = newToken(TokenLit, b[:n], StringType)
+ }
+
+ return token, n, err
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() int64 {
+ return v.integer
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() float64 {
+ return v.decimal
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() bool {
+ return v.boolean
+}
+
+func isTrimmable(r rune) bool {
+ switch r {
+ case '\n', ' ':
+ return true
+ }
+ return false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ switch v.Type {
+ case StringType:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ case QuotedStringType:
+ // preserve all characters in the quotes
+ return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
+ default:
+ return strings.TrimFunc(string(v.raw), isTrimmable)
+ }
+}
+
+func contains(runes []rune, c rune) bool {
+ for i := 0; i < len(runes); i++ {
+ if runes[i] == c {
+ return true
+ }
+ }
+
+ return false
+}
+
+func runeCompare(v1 []rune, v2 []rune) bool {
+ if len(v1) != len(v2) {
+ return false
+ }
+
+ for i := 0; i < len(v1); i++ {
+ if v1[i] != v2[i] {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
new file mode 100644
index 000000000..e52ac399f
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
@@ -0,0 +1,30 @@
+package ini
+
+func isNewline(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ if b[0] == '\n' {
+ return true
+ }
+
+ if len(b) < 2 {
+ return false
+ }
+
+ return b[0] == '\r' && b[1] == '\n'
+}
+
+func newNewlineToken(b []rune) (Token, int, error) {
+ i := 1
+ if b[0] == '\r' && isNewline(b[1:]) {
+ i++
+ }
+
+ if !isNewline([]rune(b[:i])) {
+ return emptyToken, 0, NewParseError("invalid new line token")
+ }
+
+ return newToken(TokenNL, b[:i], NoneType), i, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
new file mode 100644
index 000000000..a45c0bc56
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
@@ -0,0 +1,152 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+)
+
+const (
+ none = numberFormat(iota)
+ binary
+ octal
+ decimal
+ hex
+ exponent
+)
+
+type numberFormat int
+
+// numberHelper is used to dictate what format a number is in
+// and what to do for negative values. Since -1e-4 is a valid
+// number, we cannot just simply check for duplicate negatives.
+type numberHelper struct {
+ numberFormat numberFormat
+
+ negative bool
+ negativeExponent bool
+}
+
+func (b numberHelper) Exists() bool {
+ return b.numberFormat != none
+}
+
+func (b numberHelper) IsNegative() bool {
+ return b.negative || b.negativeExponent
+}
+
+func (b *numberHelper) Determine(c rune) error {
+ if b.Exists() {
+ return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
+ }
+
+ switch c {
+ case 'b':
+ b.numberFormat = binary
+ case 'o':
+ b.numberFormat = octal
+ case 'x':
+ b.numberFormat = hex
+ case 'e', 'E':
+ b.numberFormat = exponent
+ case '-':
+ if b.numberFormat != exponent {
+ b.negative = true
+ } else {
+ b.negativeExponent = true
+ }
+ case '.':
+ b.numberFormat = decimal
+ default:
+ return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
+ }
+
+ return nil
+}
+
+func (b numberHelper) CorrectByte(c rune) bool {
+ switch {
+ case b.numberFormat == binary:
+ if !isBinaryByte(c) {
+ return false
+ }
+ case b.numberFormat == octal:
+ if !isOctalByte(c) {
+ return false
+ }
+ case b.numberFormat == hex:
+ if !isHexByte(c) {
+ return false
+ }
+ case b.numberFormat == decimal:
+ if !isDigit(c) {
+ return false
+ }
+ case b.numberFormat == exponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negativeExponent:
+ if !isDigit(c) {
+ return false
+ }
+ case b.negative:
+ if !isDigit(c) {
+ return false
+ }
+ default:
+ if !isDigit(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b numberHelper) Base() int {
+ switch b.numberFormat {
+ case binary:
+ return 2
+ case octal:
+ return 8
+ case hex:
+ return 16
+ default:
+ return 10
+ }
+}
+
+func (b numberHelper) String() string {
+ buf := bytes.Buffer{}
+ i := 0
+
+ switch b.numberFormat {
+ case binary:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": binary format\n")
+ case octal:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": octal format\n")
+ case hex:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": hex format\n")
+ case exponent:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
+ default:
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": integer format\n")
+ }
+
+ if b.negative {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative format\n")
+ }
+
+ if b.negativeExponent {
+ i++
+ buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
+ }
+
+ return buf.String()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
new file mode 100644
index 000000000..8a84c7cbe
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
@@ -0,0 +1,39 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ equalOp = []rune("=")
+ equalColonOp = []rune(":")
+)
+
+func isOp(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '=':
+ return true
+ case ':':
+ return true
+ default:
+ return false
+ }
+}
+
+func newOpToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '=':
+ tok = newToken(TokenOp, equalOp, NoneType)
+ case ':':
+ tok = newToken(TokenOp, equalColonOp, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
new file mode 100644
index 000000000..457287019
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
@@ -0,0 +1,43 @@
+package ini
+
+import "fmt"
+
+const (
+ // ErrCodeParseError is returned when a parsing error
+ // has occurred.
+ ErrCodeParseError = "INIParseError"
+)
+
+// ParseError is an error which is returned during any part of
+// the parsing process.
+type ParseError struct {
+ msg string
+}
+
+// NewParseError will return a new ParseError where message
+// is the description of the error.
+func NewParseError(message string) *ParseError {
+ return &ParseError{
+ msg: message,
+ }
+}
+
+// Code will return the ErrCodeParseError
+func (err *ParseError) Code() string {
+ return ErrCodeParseError
+}
+
+// Message returns the error's message
+func (err *ParseError) Message() string {
+ return err.msg
+}
+
+// OrigError return nothing since there will never be any
+// original error.
+func (err *ParseError) OrigError() error {
+ return nil
+}
+
+func (err *ParseError) Error() string {
+ return fmt.Sprintf("%s: %s", err.Code(), err.Message())
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
new file mode 100644
index 000000000..7f01cf7c7
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
@@ -0,0 +1,60 @@
+package ini
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// ParseStack is a stack that contains a container, the stack portion,
+// and the list which is the list of ASTs that have been successfully
+// parsed.
+type ParseStack struct {
+ top int
+ container []AST
+ list []AST
+ index int
+}
+
+func newParseStack(sizeContainer, sizeList int) ParseStack {
+ return ParseStack{
+ container: make([]AST, sizeContainer),
+ list: make([]AST, sizeList),
+ }
+}
+
+// Pop will return and truncate the last container element.
+func (s *ParseStack) Pop() AST {
+ s.top--
+ return s.container[s.top]
+}
+
+// Push will add the new AST to the container
+func (s *ParseStack) Push(ast AST) {
+ s.container[s.top] = ast
+ s.top++
+}
+
+// MarkComplete will append the AST to the list of completed statements
+func (s *ParseStack) MarkComplete(ast AST) {
+ s.list[s.index] = ast
+ s.index++
+}
+
+// List will return the completed statements
+func (s ParseStack) List() []AST {
+ return s.list[:s.index]
+}
+
+// Len will return the length of the container
+func (s *ParseStack) Len() int {
+ return s.top
+}
+
+func (s ParseStack) String() string {
+ buf := bytes.Buffer{}
+ for i, node := range s.list {
+ buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
+ }
+
+ return buf.String()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
new file mode 100644
index 000000000..f82095ba2
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
@@ -0,0 +1,41 @@
+package ini
+
+import (
+ "fmt"
+)
+
+var (
+ emptyRunes = []rune{}
+)
+
+func isSep(b []rune) bool {
+ if len(b) == 0 {
+ return false
+ }
+
+ switch b[0] {
+ case '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+var (
+ openBrace = []rune("[")
+ closeBrace = []rune("]")
+)
+
+func newSepToken(b []rune) (Token, int, error) {
+ tok := Token{}
+
+ switch b[0] {
+ case '[':
+ tok = newToken(TokenSep, openBrace, NoneType)
+ case ']':
+ tok = newToken(TokenSep, closeBrace, NoneType)
+ default:
+ return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
+ }
+ return tok, 1, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
new file mode 100644
index 000000000..6bb696447
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
@@ -0,0 +1,45 @@
+package ini
+
+// skipper is used to skip certain blocks of an ini file.
+// Currently skipper is used to skip nested blocks of ini
+// files. See example below
+//
+// [ foo ]
+// nested = ; this section will be skipped
+// a=b
+// c=d
+// bar=baz ; this will be included
+type skipper struct {
+ shouldSkip bool
+ TokenSet bool
+ prevTok Token
+}
+
+func newSkipper() skipper {
+ return skipper{
+ prevTok: emptyToken,
+ }
+}
+
+func (s *skipper) ShouldSkip(tok Token) bool {
+ if s.shouldSkip &&
+ s.prevTok.Type() == TokenNL &&
+ tok.Type() != TokenWS {
+
+ s.Continue()
+ return false
+ }
+ s.prevTok = tok
+
+ return s.shouldSkip
+}
+
+func (s *skipper) Skip() {
+ s.shouldSkip = true
+ s.prevTok = emptyToken
+}
+
+func (s *skipper) Continue() {
+ s.shouldSkip = false
+ s.prevTok = emptyToken
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
new file mode 100644
index 000000000..18f3fe893
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
@@ -0,0 +1,35 @@
+package ini
+
+// Statement is an empty AST mostly used for transitioning states.
+func newStatement() AST {
+ return newAST(ASTKindStatement, AST{})
+}
+
+// SectionStatement represents a section AST
+func newSectionStatement(tok Token) AST {
+ return newASTWithRootToken(ASTKindSectionStatement, tok)
+}
+
+// ExprStatement represents a completed expression AST
+func newExprStatement(ast AST) AST {
+ return newAST(ASTKindExprStatement, ast)
+}
+
+// CommentStatement represents a comment in the ini definition.
+//
+// grammar:
+// comment -> #comment' | ;comment'
+// comment' -> epsilon | value
+func newCommentStatement(tok Token) AST {
+ return newAST(ASTKindCommentStatement, newExpression(tok))
+}
+
+// CompletedSectionStatement represents a completed section
+func newCompletedSectionStatement(ast AST) AST {
+ return newAST(ASTKindCompletedSectionStatement, ast)
+}
+
+// SkipStatement is used to skip whole statements
+func newSkipStatement(ast AST) AST {
+ return newAST(ASTKindSkipStatement, ast)
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
new file mode 100644
index 000000000..305999d29
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
@@ -0,0 +1,284 @@
+package ini
+
+import (
+ "fmt"
+)
+
+// getStringValue will return a quoted string and the amount
+// of bytes read
+//
+// an error will be returned if the string is not properly formatted
+func getStringValue(b []rune) (int, error) {
+ if b[0] != '"' {
+ return 0, NewParseError("strings must start with '\"'")
+ }
+
+ endQuote := false
+ i := 1
+
+ for ; i < len(b) && !endQuote; i++ {
+ if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
+ endQuote = true
+ break
+ } else if escaped {
+ /*c, err := getEscapedByte(b[i])
+ if err != nil {
+ return 0, err
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--*/
+
+ continue
+ }
+ }
+
+ if !endQuote {
+ return 0, NewParseError("missing '\"' in string value")
+ }
+
+ return i + 1, nil
+}
+
+// getBoolValue will return a boolean and the amount
+// of bytes read
+//
+// an error will be returned if the boolean is not of a correct
+// value
+func getBoolValue(b []rune) (int, error) {
+ if len(b) < 4 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ n := 0
+ for _, lv := range literalValues {
+ if len(lv) > len(b) {
+ continue
+ }
+
+ if isLitValue(lv, b) {
+ n = len(lv)
+ }
+ }
+
+ if n == 0 {
+ return 0, NewParseError("invalid boolean value")
+ }
+
+ return n, nil
+}
+
+// getNumericalValue will return a numerical string, the amount
+// of bytes read, and the base of the number
+//
+// an error will be returned if the number is not of a correct
+// value
+func getNumericalValue(b []rune) (int, int, error) {
+ if !isDigit(b[0]) {
+ return 0, 0, NewParseError("invalid digit value")
+ }
+
+ i := 0
+ helper := numberHelper{}
+
+loop:
+ for negativeIndex := 0; i < len(b); i++ {
+ negativeIndex++
+
+ if !isDigit(b[i]) {
+ switch b[i] {
+ case '-':
+ if helper.IsNegative() || negativeIndex != 1 {
+ return 0, 0, NewParseError("parse error '-'")
+ }
+
+ n := getNegativeNumber(b[i:])
+ i += (n - 1)
+ helper.Determine(b[i])
+ continue
+ case '.':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ case 'e', 'E':
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+
+ negativeIndex = 0
+ case 'b':
+ if helper.numberFormat == hex {
+ break
+ }
+ fallthrough
+ case 'o', 'x':
+ if i == 0 && b[i] != '0' {
+ return 0, 0, NewParseError("incorrect base format, expected leading '0'")
+ }
+
+ if i != 1 {
+ return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
+ }
+
+ if err := helper.Determine(b[i]); err != nil {
+ return 0, 0, err
+ }
+ default:
+ if isWhitespace(b[i]) {
+ break loop
+ }
+
+ if isNewline(b[i:]) {
+ break loop
+ }
+
+ if !(helper.numberFormat == hex && isHexByte(b[i])) {
+ if i+2 < len(b) && !isNewline(b[i:i+2]) {
+ return 0, 0, NewParseError("invalid numerical character")
+ } else if !isNewline([]rune{b[i]}) {
+ return 0, 0, NewParseError("invalid numerical character")
+ }
+
+ break loop
+ }
+ }
+ }
+ }
+
+ return helper.Base(), i, nil
+}
+
+// isDigit will return whether or not something is an integer
+func isDigit(b rune) bool {
+ return b >= '0' && b <= '9'
+}
+
+func hasExponent(v []rune) bool {
+ return contains(v, 'e') || contains(v, 'E')
+}
+
+func isBinaryByte(b rune) bool {
+ switch b {
+ case '0', '1':
+ return true
+ default:
+ return false
+ }
+}
+
+func isOctalByte(b rune) bool {
+ switch b {
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ return true
+ default:
+ return false
+ }
+}
+
+func isHexByte(b rune) bool {
+ if isDigit(b) {
+ return true
+ }
+ return (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+func getValue(b []rune) (int, error) {
+ i := 0
+
+ for i < len(b) {
+ if isNewline(b[i:]) {
+ break
+ }
+
+ if isOp(b[i:]) {
+ break
+ }
+
+ valid, n, err := isValid(b[i:])
+ if err != nil {
+ return 0, err
+ }
+
+ if !valid {
+ break
+ }
+
+ i += n
+ }
+
+ return i, nil
+}
+
+// getNegativeNumber will return a negative number from a
+// byte slice. This will iterate through all characters until
+// a non-digit has been found.
+func getNegativeNumber(b []rune) int {
+ if b[0] != '-' {
+ return 0
+ }
+
+ i := 1
+ for ; i < len(b); i++ {
+ if !isDigit(b[i]) {
+ return i
+ }
+ }
+
+ return i
+}
+
+// isEscaped will return whether or not the character is an escaped
+// character.
+func isEscaped(value []rune, b rune) bool {
+ if len(value) == 0 {
+ return false
+ }
+
+ switch b {
+ case '\'': // single quote
+ case '"': // quote
+ case 'n': // newline
+ case 't': // tab
+ case '\\': // backslash
+ default:
+ return false
+ }
+
+ return value[len(value)-1] == '\\'
+}
+
+func getEscapedByte(b rune) (rune, error) {
+ switch b {
+ case '\'': // single quote
+ return '\'', nil
+ case '"': // quote
+ return '"', nil
+ case 'n': // newline
+ return '\n', nil
+ case 't': // table
+ return '\t', nil
+ case '\\': // backslash
+ return '\\', nil
+ default:
+ return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
+ }
+}
+
+func removeEscapedCharacters(b []rune) []rune {
+ for i := 0; i < len(b); i++ {
+ if isEscaped(b[:i], b[i]) {
+ c, err := getEscapedByte(b[i])
+ if err != nil {
+ return b
+ }
+
+ b[i-1] = c
+ b = append(b[:i], b[i+1:]...)
+ i--
+ }
+ }
+
+ return b
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
new file mode 100644
index 000000000..94841c324
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
@@ -0,0 +1,166 @@
+package ini
+
+import (
+ "fmt"
+ "sort"
+)
+
+// Visitor is an interface used by walkers that will
+// traverse an array of ASTs.
+type Visitor interface {
+ VisitExpr(AST) error
+ VisitStatement(AST) error
+}
+
+// DefaultVisitor is used to visit statements and expressions
+// and ensure that they are both of the correct format.
+// In addition, upon visiting this will build sections and populate
+// the Sections field which can be used to retrieve profile
+// configuration.
+type DefaultVisitor struct {
+ scope string
+ Sections Sections
+}
+
+// NewDefaultVisitor return a DefaultVisitor
+func NewDefaultVisitor() *DefaultVisitor {
+ return &DefaultVisitor{
+ Sections: Sections{
+ container: map[string]Section{},
+ },
+ }
+}
+
+// VisitExpr visits expressions...
+func (v *DefaultVisitor) VisitExpr(expr AST) error {
+ t := v.Sections.container[v.scope]
+ if t.values == nil {
+ t.values = values{}
+ }
+
+ switch expr.Kind {
+ case ASTKindExprStatement:
+ opExpr := expr.GetRoot()
+ switch opExpr.Kind {
+ case ASTKindEqualExpr:
+ children := opExpr.GetChildren()
+ if len(children) <= 1 {
+ return NewParseError("unexpected token type")
+ }
+
+ rhs := children[1]
+
+ if rhs.Root.Type() != TokenLit {
+ return NewParseError("unexpected token type")
+ }
+
+ key := EqualExprKey(opExpr)
+ v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
+ if err != nil {
+ return err
+ }
+
+ t.values[key] = v
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+ default:
+ return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
+ }
+
+ v.Sections.container[v.scope] = t
+ return nil
+}
+
+// VisitStatement visits statements...
+func (v *DefaultVisitor) VisitStatement(stmt AST) error {
+ switch stmt.Kind {
+ case ASTKindCompletedSectionStatement:
+ child := stmt.GetRoot()
+ if child.Kind != ASTKindSectionStatement {
+ return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
+ }
+
+ name := string(child.Root.Raw())
+ v.Sections.container[name] = Section{}
+ v.scope = name
+ default:
+ return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
+ }
+
+ return nil
+}
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ Name string
+ values values
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) bool {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) int64 {
+ return t.values[k].IntValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) float64 {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
new file mode 100644
index 000000000..99915f7f7
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
@@ -0,0 +1,25 @@
+package ini
+
+// Walk will traverse the AST using the v, the Visitor.
+func Walk(tree []AST, v Visitor) error {
+ for _, node := range tree {
+ switch node.Kind {
+ case ASTKindExpr,
+ ASTKindExprStatement:
+
+ if err := v.VisitExpr(node); err != nil {
+ return err
+ }
+ case ASTKindStatement,
+ ASTKindCompletedSectionStatement,
+ ASTKindNestedSectionStatement,
+ ASTKindCompletedNestedSectionStatement:
+
+ if err := v.VisitStatement(node); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
new file mode 100644
index 000000000..7ffb4ae06
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
@@ -0,0 +1,24 @@
+package ini
+
+import (
+ "unicode"
+)
+
+// isWhitespace will return whether or not the character is
+// a whitespace character.
+//
+// Whitespace is defined as a space or tab.
+func isWhitespace(c rune) bool {
+ return unicode.IsSpace(c) && c != '\n' && c != '\r'
+}
+
+func newWSToken(b []rune) (Token, int, error) {
+ i := 0
+ for ; i < len(b); i++ {
+ if !isWhitespace(b[i]) {
+ break
+ }
+ }
+
+ return newToken(TokenWS, b[:i], NoneType), i, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
new file mode 100644
index 000000000..5aa9137e0
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
@@ -0,0 +1,10 @@
+// +build !go1.7
+
+package sdkio
+
+// Copy of Go 1.7 io package's Seeker constants.
+const (
+ SeekStart = 0 // seek relative to the origin of the file
+ SeekCurrent = 1 // seek relative to the current offset
+ SeekEnd = 2 // seek relative to the end
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
new file mode 100644
index 000000000..e5f005613
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
@@ -0,0 +1,12 @@
+// +build go1.7
+
+package sdkio
+
+import "io"
+
+// Alias for Go 1.7 io package Seeker constants
+const (
+ SeekStart = io.SeekStart // seek relative to the origin of the file
+ SeekCurrent = io.SeekCurrent // seek relative to the current offset
+ SeekEnd = io.SeekEnd // seek relative to the end
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
new file mode 100644
index 000000000..0c9802d87
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
@@ -0,0 +1,29 @@
+package sdkrand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
+
+// SeededRand is a new RNG using a thread safe implementation of rand.Source
+var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
new file mode 100644
index 000000000..38ea61afe
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
@@ -0,0 +1,23 @@
+package sdkuri
+
+import (
+ "path"
+ "strings"
+)
+
+// PathJoin will join the elements of the path delimited by the "/"
+// character. Similar to path.Join with the exception the trailing "/"
+// character is preserved if present.
+func PathJoin(elems ...string) string {
+ if len(elems) == 0 {
+ return ""
+ }
+
+ hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
+ str := path.Join(elems...)
+ if hasTrailing && str != "/" {
+ str += "/"
+ }
+
+ return str
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/src/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
new file mode 100644
index 000000000..7da8a49ce
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
@@ -0,0 +1,12 @@
+package shareddefaults
+
+const (
+ // ECSCredsProviderEnvVar is an environmental variable key used to
+ // determine which path needs to be hit.
+ ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// ECSContainerCredentialsURI is the endpoint to retrieve container
+// credentials. This can be overridden to test to ensure the credential process
+// is behaving correctly.
+var ECSContainerCredentialsURI = "http://169.254.170.2"
diff --git a/src/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/src/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
new file mode 100644
index 000000000..ebcbc2b40
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
@@ -0,0 +1,40 @@
+package shareddefaults
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
new file mode 100644
index 000000000..d7d42db0a
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
@@ -0,0 +1,68 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ValidateEndpointHostHandler is a request handler that will validate the
+// request endpoint's hosts is a valid RFC 3986 host.
+var ValidateEndpointHostHandler = request.NamedHandler{
+ Name: "awssdk.protocol.ValidateEndpointHostHandler",
+ Fn: func(r *request.Request) {
+ err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
+ if err != nil {
+ r.Error = err
+ }
+ },
+}
+
+// ValidateEndpointHost validates that the host string passed in is a valid RFC
+// 3986 host. Returns error if the host is not valid.
+func ValidateEndpointHost(opName, host string) error {
+ paramErrs := request.ErrInvalidParams{Context: opName}
+ labels := strings.Split(host, ".")
+
+ for i, label := range labels {
+ if i == len(labels)-1 && len(label) == 0 {
+ // Allow trailing dot for FQDN hosts.
+ continue
+ }
+
+ if !ValidHostLabel(label) {
+ paramErrs.Add(request.NewErrParamFormat(
+ "endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
+ }
+ }
+
+ if len(host) > 255 {
+ paramErrs.Add(request.NewErrParamMaxLen(
+ "endpoint host", 255, host,
+ ))
+ }
+
+ if paramErrs.Len() > 0 {
+ return paramErrs
+ }
+ return nil
+}
+
+// ValidHostLabel returns if the label is a valid RFC 3986 host label.
+func ValidHostLabel(label string) bool {
+ if l := len(label); l == 0 || l > 63 {
+ return false
+ }
+ for _, r := range label {
+ switch {
+ case r >= '0' && r <= '9':
+ case r >= 'A' && r <= 'Z':
+ case r >= 'a' && r <= 'z':
+ case r == '-':
+ default:
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
new file mode 100644
index 000000000..915b0fcaf
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
@@ -0,0 +1,54 @@
+package protocol
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// HostPrefixHandlerName is the handler name for the host prefix request
+// handler.
+const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
+
+// NewHostPrefixHandler constructs a build handler
+func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
+ builder := HostPrefixBuilder{
+ Prefix: prefix,
+ LabelsFn: labelsFn,
+ }
+
+ return request.NamedHandler{
+ Name: HostPrefixHandlerName,
+ Fn: builder.Build,
+ }
+}
+
+// HostPrefixBuilder provides the request handler to expand and prepend
+// the host prefix into the operation's request endpoint host.
+type HostPrefixBuilder struct {
+ Prefix string
+ LabelsFn func() map[string]string
+}
+
+// Build updates the passed in Request with the HostPrefix template expanded.
+func (h HostPrefixBuilder) Build(r *request.Request) {
+ if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
+ return
+ }
+
+ var labels map[string]string
+ if h.LabelsFn != nil {
+ labels = h.LabelsFn()
+ }
+
+ prefix := h.Prefix
+ for name, value := range labels {
+ prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
+ }
+
+ r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
+ if len(r.HTTPRequest.Host) > 0 {
+ r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 000000000..53831dff9
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
new file mode 100644
index 000000000..864fb6704
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
@@ -0,0 +1,296 @@
+// Package jsonutil provides JSON serialization of AWS requests and responses.
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+var timeType = reflect.ValueOf(time.Time{}).Type()
+var byteSliceType = reflect.ValueOf([]byte{}).Type()
+
+// BuildJSON builds a JSON string for a given object v.
+func BuildJSON(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+
+ err := buildAny(reflect.ValueOf(v), &buf, "")
+ return buf.Bytes(), err
+}
+
+func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ origVal := value
+ value = reflect.Indirect(value)
+ if !value.IsValid() {
+ return nil
+ }
+
+ vtype := value.Type()
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if value.Type() != timeType {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return buildStruct(value, buf, tag)
+ case "list":
+ return buildList(value, buf, tag)
+ case "map":
+ return buildMap(value, buf, tag)
+ default:
+ return buildScalar(origVal, buf, tag)
+ }
+}
+
+func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ buf.WriteByte('{')
+
+ t := value.Type()
+ first := true
+ for i := 0; i < t.NumField(); i++ {
+ member := value.Field(i)
+
+ // This allocates the most memory.
+ // Additionally, we cannot skip nil fields due to
+ // idempotency auto filling.
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("json") == "-" {
+ continue
+ }
+ if field.Tag.Get("location") != "" {
+ continue // ignore non-body elements
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(member, field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(&token)
+ }
+
+ if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+ continue // ignore unset fields
+ }
+
+ if first {
+ first = false
+ } else {
+ buf.WriteByte(',')
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ writeString(name, buf)
+ buf.WriteString(`:`)
+
+ err := buildAny(member, buf, field.Tag)
+ if err != nil {
+ return err
+ }
+
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("[")
+
+ for i := 0; i < value.Len(); i++ {
+ buildAny(value.Index(i), buf, "")
+
+ if i < value.Len()-1 {
+ buf.WriteString(",")
+ }
+ }
+
+ buf.WriteString("]")
+
+ return nil
+}
+
+type sortedValues []reflect.Value
+
+func (sv sortedValues) Len() int { return len(sv) }
+func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
+
+func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ buf.WriteString("{")
+
+ sv := sortedValues(value.MapKeys())
+ sort.Sort(sv)
+
+ for i, k := range sv {
+ if i > 0 {
+ buf.WriteByte(',')
+ }
+
+ writeString(k.String(), buf)
+ buf.WriteString(`:`)
+
+ buildAny(value.MapIndex(k), buf, "")
+ }
+
+ buf.WriteString("}")
+
+ return nil
+}
+
+func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+ // prevents allocation on the heap.
+ scratch := [64]byte{}
+ switch value := reflect.Indirect(v); value.Kind() {
+ case reflect.String:
+ writeString(value.String(), buf)
+ case reflect.Bool:
+ if value.Bool() {
+ buf.WriteString("true")
+ } else {
+ buf.WriteString("false")
+ }
+ case reflect.Int64:
+ buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
+ case reflect.Float64:
+ f := value.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
+ }
+ buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
+ default:
+ switch converted := value.Interface().(type) {
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.UnixTimeFormatName
+ }
+
+ ts := protocol.FormatTime(format, converted)
+ if format != protocol.UnixTimeFormatName {
+ ts = `"` + ts + `"`
+ }
+
+ buf.WriteString(ts)
+ case []byte:
+ if !value.IsNil() {
+ buf.WriteByte('"')
+ if len(converted) < 1024 {
+ // for small buffers, using Encode directly is much faster.
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
+ base64.StdEncoding.Encode(dst, converted)
+ buf.Write(dst)
+ } else {
+ // for large buffers, avoid unnecessary extra temporary
+ // buffer space.
+ enc := base64.NewEncoder(base64.StdEncoding, buf)
+ enc.Write(converted)
+ enc.Close()
+ }
+ buf.WriteByte('"')
+ }
+ case aws.JSONValue:
+ str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
+ if err != nil {
+ return fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ buf.WriteString(str)
+ default:
+ return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+ }
+ }
+ return nil
+}
+
+var hex = "0123456789abcdef"
+
+func writeString(s string, buf *bytes.Buffer) {
+ buf.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ if s[i] == '"' {
+ buf.WriteString(`\"`)
+ } else if s[i] == '\\' {
+ buf.WriteString(`\\`)
+ } else if s[i] == '\b' {
+ buf.WriteString(`\b`)
+ } else if s[i] == '\f' {
+ buf.WriteString(`\f`)
+ } else if s[i] == '\r' {
+ buf.WriteString(`\r`)
+ } else if s[i] == '\t' {
+ buf.WriteString(`\t`)
+ } else if s[i] == '\n' {
+ buf.WriteString(`\n`)
+ } else if s[i] < 32 {
+ buf.WriteString("\\u00")
+ buf.WriteByte(hex[s[i]>>4])
+ buf.WriteByte(hex[s[i]&0xF])
+ } else {
+ buf.WriteByte(s[i])
+ }
+ }
+ buf.WriteByte('"')
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
new file mode 100644
index 000000000..ea0da79a5
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
@@ -0,0 +1,250 @@
+package jsonutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
+// type. The value to unmarshal the json document into must be a pointer to the
+// type.
+func UnmarshalJSONError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := json.NewDecoder(body).Decode(v)
+ if err != nil {
+ msg := "failed decoding error message"
+ if err == io.EOF {
+ msg = "error message missing"
+ err = nil
+ }
+ return awserr.NewUnmarshalError(err, msg, errBuf.Bytes())
+ }
+
+ return nil
+}
+
+// UnmarshalJSON reads a stream and unmarshals the results in object v.
+func UnmarshalJSON(v interface{}, stream io.Reader) error {
+ var out interface{}
+
+ err := json.NewDecoder(stream).Decode(&out)
+ if err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+
+ return unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ vtype := value.Type()
+ if vtype.Kind() == reflect.Ptr {
+ vtype = vtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch vtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := value.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := value.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ // cannot be a JSONValue map
+ if _, ok := value.Interface().(aws.JSONValue); !ok {
+ t = "map"
+ }
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := vtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return unmarshalStruct(value, data, tag)
+ case "list":
+ return unmarshalList(value, data, tag)
+ case "map":
+ return unmarshalMap(value, data, tag)
+ default:
+ return unmarshalScalar(value, data, tag)
+ }
+}
+
+func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a structure (%#v)", data)
+ }
+
+ t := value.Type()
+ if value.Kind() == reflect.Ptr {
+ if value.IsNil() { // create the structure if it's nil
+ s := reflect.New(value.Type().Elem())
+ value.Set(s)
+ value = s
+ }
+
+ value = value.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return unmarshalAny(value.FieldByName(payload), data, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ member := value.FieldByIndex(field.Index)
+ err := unmarshalAny(member, mapData[name], field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ listData, ok := data.([]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a list (%#v)", data)
+ }
+
+ if value.IsNil() {
+ l := len(listData)
+ value.Set(reflect.MakeSlice(value.Type(), l, l))
+ }
+
+ for i, c := range listData {
+ err := unmarshalAny(value.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+ if data == nil {
+ return nil
+ }
+ mapData, ok := data.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("JSON value is not a map (%#v)", data)
+ }
+
+ if value.IsNil() {
+ value.Set(reflect.MakeMap(value.Type()))
+ }
+
+ for k, v := range mapData {
+ kvalue := reflect.ValueOf(k)
+ vvalue := reflect.New(value.Type().Elem()).Elem()
+
+ unmarshalAny(vvalue, v, "")
+ value.SetMapIndex(kvalue, vvalue)
+ }
+
+ return nil
+}
+
+func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+
+ switch d := data.(type) {
+ case nil:
+ return nil // nothing to do here
+ case string:
+ switch value.Interface().(type) {
+ case *string:
+ value.Set(reflect.ValueOf(&d))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(b))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, d)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ // No need to use escaping as the value is a non-quoted string.
+ v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(v))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ case float64:
+ switch value.Interface().(type) {
+ case *int64:
+ di := int64(d)
+ value.Set(reflect.ValueOf(&di))
+ case *float64:
+ value.Set(reflect.ValueOf(&d))
+ case *time.Time:
+ // Time unmarshaled from a float64 can only be epoch seconds
+ t := time.Unix(int64(d), 0).UTC()
+ value.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ case bool:
+ switch value.Interface().(type) {
+ case *bool:
+ value.Set(reflect.ValueOf(&d))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+ }
+ default:
+ return fmt.Errorf("unsupported JSON value (%v)", data)
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
new file mode 100644
index 000000000..bfedc9fd4
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
@@ -0,0 +1,110 @@
+// Package jsonrpc provides JSON RPC utilities for serialization of AWS
+// requests and responses.
+package jsonrpc
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
+
+import (
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+var emptyJSON = []byte("{}")
+
+// BuildHandler is a named request handler for building jsonrpc protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a JSON payload for a JSON RPC request.
+func Build(req *request.Request) {
+ var buf []byte
+ var err error
+ if req.ParamsFilled() {
+ buf, err = jsonutil.BuildJSON(req.Params)
+ if err != nil {
+ req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err)
+ return
+ }
+ } else {
+ buf = emptyJSON
+ }
+
+ if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" {
+ req.SetBufferBody(buf)
+ }
+
+ if req.ClientInfo.TargetPrefix != "" {
+ target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
+ req.HTTPRequest.Header.Add("X-Amz-Target", target)
+ }
+
+ // Only set the content type if one is not already specified and an
+ // JSONVersion is specified.
+ if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 {
+ jsonVersion := req.ClientInfo.JSONVersion
+ req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion)
+ }
+}
+
+// Unmarshal unmarshals a response for a JSON RPC service.
+func Unmarshal(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+ if req.DataFilled() {
+ err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+ }
+ }
+ return
+}
+
+// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
+func UnmarshalMeta(req *request.Request) {
+ rest.UnmarshalMeta(req)
+}
+
+// UnmarshalError unmarshals an error response for a JSON RPC service.
+func UnmarshalError(req *request.Request) {
+ defer req.HTTPResponse.Body.Close()
+
+ var jsonErr jsonErrorResponse
+ err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body)
+ if err != nil {
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+ return
+ }
+
+ codes := strings.SplitN(jsonErr.Code, "#", 2)
+ req.Error = awserr.NewRequestFailure(
+ awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
+ req.HTTPResponse.StatusCode,
+ req.RequestID,
+ )
+}
+
+type jsonErrorResponse struct {
+ Code string `json:"__type"`
+ Message string `json:"message"`
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
new file mode 100644
index 000000000..776d11018
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
@@ -0,0 +1,76 @@
+package protocol
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+)
+
+// EscapeMode is the mode that should be use for escaping a value
+type EscapeMode uint
+
+// The modes for escaping a value before it is marshaled, and unmarshaled.
+const (
+ NoEscape EscapeMode = iota
+ Base64Escape
+ QuotedEscape
+)
+
+// EncodeJSONValue marshals the value into a JSON string, and optionally base64
+// encodes the string before returning it.
+//
+// Will panic if the escape mode is unknown.
+func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
+ b, err := json.Marshal(v)
+ if err != nil {
+ return "", err
+ }
+
+ switch escape {
+ case NoEscape:
+ return string(b), nil
+ case Base64Escape:
+ return base64.StdEncoding.EncodeToString(b), nil
+ case QuotedEscape:
+ return strconv.Quote(string(b)), nil
+ }
+
+ panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
+}
+
+// DecodeJSONValue will attempt to decode the string input as a JSONValue.
+// Optionally decoding base64 the value first before JSON unmarshaling.
+//
+// Will panic if the escape mode is unknown.
+func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
+ var b []byte
+ var err error
+
+ switch escape {
+ case NoEscape:
+ b = []byte(v)
+ case Base64Escape:
+ b, err = base64.StdEncoding.DecodeString(v)
+ case QuotedEscape:
+ var u string
+ u, err = strconv.Unquote(v)
+ b = []byte(u)
+ default:
+ panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
new file mode 100644
index 000000000..e21614a12
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
@@ -0,0 +1,81 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// PayloadUnmarshaler provides the interface for unmarshaling a payload's
+// reader into a SDK shape.
+type PayloadUnmarshaler interface {
+ UnmarshalPayload(io.Reader, interface{}) error
+}
+
+// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
+// HandlerList. This provides the support for unmarshaling a payload reader to
+// a shape without needing a SDK request first.
+type HandlerPayloadUnmarshal struct {
+ Unmarshalers request.HandlerList
+}
+
+// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
+// the Unmarshalers HandlerList provided. Returns an error if unable
+// unmarshaling fails.
+func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
+ req := &request.Request{
+ HTTPRequest: &http.Request{},
+ HTTPResponse: &http.Response{
+ StatusCode: 200,
+ Header: http.Header{},
+ Body: ioutil.NopCloser(r),
+ },
+ Data: v,
+ }
+
+ h.Unmarshalers.Run(req)
+
+ return req.Error
+}
+
+// PayloadMarshaler provides the interface for marshaling a SDK shape into and
+// io.Writer.
+type PayloadMarshaler interface {
+ MarshalPayload(io.Writer, interface{}) error
+}
+
+// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
+// This provides support for marshaling a SDK shape into an io.Writer without
+// needing a SDK request first.
+type HandlerPayloadMarshal struct {
+ Marshalers request.HandlerList
+}
+
+// MarshalPayload marshals the SDK shape into the io.Writer using the
+// Marshalers HandlerList provided. Returns an error if unable if marshal
+// fails.
+func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
+ req := request.New(
+ aws.Config{},
+ metadata.ClientInfo{},
+ request.Handlers{},
+ nil,
+ &request.Operation{HTTPMethod: "GET"},
+ v,
+ nil,
+ )
+
+ h.Marshalers.Run(req)
+
+ if req.Error != nil {
+ return req.Error
+ }
+
+ io.Copy(w, req.GetBody())
+
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644
index 000000000..0cb99eb57
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err)
+ return
+ }
+
+ if !r.IsPresigned() {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 000000000..75866d012
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,246 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag)
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ v.Set(name, protocol.FormatTime(format, value))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 000000000..f69c1efc9
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,39 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 000000000..831b0110c
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,69 @@
+package query
+
+import (
+ "encoding/xml"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+type xmlErrorResponse struct {
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlResponseError struct {
+ xmlErrorResponse
+}
+
+func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+ const svcUnavailableTagName = "ServiceUnavailableException"
+ const errorResponseTagName = "ErrorResponse"
+
+ switch start.Name.Local {
+ case svcUnavailableTagName:
+ e.Code = svcUnavailableTagName
+ e.Message = "service is unavailable"
+ return d.Skip()
+
+ case errorResponseTagName:
+ return d.DecodeElement(&e.xmlErrorResponse, &start)
+
+ default:
+ return fmt.Errorf("unknown error response tag, %v", start)
+ }
+}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ var respErr xmlResponseError
+ err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(request.ErrCodeSerialization,
+ "failed to unmarshal error message", err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ reqID := respErr.RequestID
+ if len(reqID) == 0 {
+ reqID = r.RequestID
+ }
+
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(respErr.Code, respErr.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 000000000..1301b149d
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,310 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+var byteSliceType = reflect.TypeOf([]byte{})
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, false)
+ buildBody(r, v)
+ }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v, true)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+ query := r.HTTPRequest.URL.Query()
+
+ // Setup the raw path to match the base path pattern. This is needed
+ // so that when the path is mutated a custom escaped version can be
+ // stored in RawPath that will be used by the Go client.
+ r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if kind := m.Kind(); kind == reflect.Ptr {
+ m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
+ }
+ if !m.IsValid() {
+ continue
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ // Support the ability to customize values to be marshaled as a
+ // blob even though they were modeled as a string. Required for S3
+ // API operations like SSECustomerKey is modeled as stirng but
+ // required to be base64 encoded in request.
+ if field.Tag.Get("marshal-as") == "blob" {
+ m = m.Convert(byteSliceType)
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+ case "querystring":
+ err = buildQueryString(query, m, name, field.Tag)
+ default:
+ if buildGETQuery {
+ err = buildQueryString(query, m, name, field.Tag)
+ }
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+ cleanPath(r.HTTPRequest.URL)
+ }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+
+ name = strings.TrimSpace(name)
+ str = strings.TrimSpace(str)
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key), tag)
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+
+ }
+ keyStr := strings.TrimSpace(key.String())
+ str = strings.TrimSpace(str)
+
+ header.Add(prefix+keyStr, str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+
+ u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+ u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+ u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v, tag)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func cleanPath(u *url.URL) {
+ hasSlash := strings.HasSuffix(u.Path, "/")
+
+ // clean up path, removing duplicate `/`
+ u.Path = path.Clean(u.Path)
+ u.RawPath = path.Clean(u.RawPath)
+
+ if hasSlash && !strings.HasSuffix(u.Path, "/") {
+ u.Path += "/"
+ u.RawPath += "/"
+ }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ if tag.Get("location") == "querystring" {
+ format = protocol.ISO8601TimeFormatName
+ }
+ }
+ str = protocol.FormatTime(format, value)
+ case aws.JSONValue:
+ if len(value) == 0 {
+ return "", errValueNotSet
+ }
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ str, err = protocol.EncodeJSONValue(value, escaping)
+ if err != nil {
+ return "", fmt.Errorf("unable to encode JSONValue, %v", err)
+ }
+ default:
+ err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 000000000..4366de2e1
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 000000000..de021367d
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,225 @@
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ case "io.ReadSeeker":
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to read response body", err)
+ return
+ }
+ payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New(request.ErrCodeSerialization,
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ isJSONValue := tag.Get("type") == "jsonvalue"
+ if isJSONValue {
+ if len(header) == 0 {
+ return nil
+ }
+ } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.RFC822TimeFormatName
+ }
+ t, err := protocol.ParseTime(format, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ escaping := protocol.NoEscape
+ if tag.Get("location") == "header" {
+ escaping = protocol.Base64Escape
+ }
+ m, err := protocol.DecodeJSONValue(header, escaping)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
new file mode 100644
index 000000000..b7ed6c6f8
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
@@ -0,0 +1,72 @@
+package protocol
+
+import (
+ "strconv"
+ "time"
+)
+
+// Names of time formats supported by the SDK
+const (
+ RFC822TimeFormatName = "rfc822"
+ ISO8601TimeFormatName = "iso8601"
+ UnixTimeFormatName = "unixTimestamp"
+)
+
+// Time formats supported by the SDK
+const (
+ // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
+ RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+ // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
+ ISO8601TimeFormat = "2006-01-02T15:04:05Z"
+)
+
+// IsKnownTimestampFormat returns if the timestamp format name
+// is know to the SDK's protocols.
+func IsKnownTimestampFormat(name string) bool {
+ switch name {
+ case RFC822TimeFormatName:
+ fallthrough
+ case ISO8601TimeFormatName:
+ fallthrough
+ case UnixTimeFormatName:
+ return true
+ default:
+ return false
+ }
+}
+
+// FormatTime returns a string value of the time.
+func FormatTime(name string, t time.Time) string {
+ t = t.UTC()
+
+ switch name {
+ case RFC822TimeFormatName:
+ return t.Format(RFC822TimeFormat)
+ case ISO8601TimeFormatName:
+ return t.Format(ISO8601TimeFormat)
+ case UnixTimeFormatName:
+ return strconv.FormatInt(t.Unix(), 10)
+ default:
+ panic("unknown timestamp format name, " + name)
+ }
+}
+
+// ParseTime attempts to parse the time given the format. Returns
+// the time if it was able to be parsed, and fails otherwise.
+func ParseTime(formatName, value string) (time.Time, error) {
+ switch formatName {
+ case RFC822TimeFormatName:
+ return time.Parse(RFC822TimeFormat, value)
+ case ISO8601TimeFormatName:
+ return time.Parse(ISO8601TimeFormat, value)
+ case UnixTimeFormatName:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Unix(int64(v), 0), nil
+ default:
+ panic("unknown timestamp format name, " + formatName)
+ }
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 000000000..da1a68111
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 000000000..cf981fe95
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,306 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder. Error will be returned
+// if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ return buildXML(params, e, false)
+}
+
+func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, sorted)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ var payloadFields, nonPayloadFields int
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+ if field.Tag.Get("ignore") != "" {
+ continue
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ nonPayloadFields++
+ continue
+ }
+ payloadFields++
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+ }
+
+ // Only case where the child shape is not added is if the shape only contains
+ // non-payload fields, e.g headers/query.
+ if !(payloadFields == 0 && nonPayloadFields > 0) {
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ str = protocol.FormatTime(format, converted)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 000000000..7108d3800
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,291 @@
+package xmlutil
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalXMLError unmarshals the XML error from the stream into the value
+// type specified. The value must be a pointer. If the message fails to
+// unmarshal, the message content will be included in the returned error as a
+// awserr.UnmarshalError.
+func UnmarshalXMLError(v interface{}, stream io.Reader) error {
+ var errBuf bytes.Buffer
+ body := io.TeeReader(stream, &errBuf)
+
+ err := xml.NewDecoder(body).Decode(v)
+ if err != nil && err != io.EOF {
+ return awserr.NewUnmarshalError(err,
+ "failed to unmarshal error message", errBuf.Bytes())
+ }
+
+ return nil
+}
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err = parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ // also it can't be a time object
+ if _, ok := r.Interface().(*time.Time); !ok {
+ t = "structure"
+ }
+ case reflect.Slice:
+ // also it can't be a byte slice
+ if _, ok := r.Interface().([]byte); !ok {
+ t = "list"
+ }
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ if val, ok := node.findElem(name); ok {
+ elems = []*XMLNode{{Text: val}}
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ format := tag.Get("timestampFormat")
+ if len(format) == 0 {
+ format = protocol.ISO8601TimeFormatName
+ }
+
+ t, err := protocol.ParseTime(format, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 000000000..515ce1521
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,148 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+
+ namespaces map[string]string
+ parent *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ child.parent = n
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ out.findNamespaces()
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ node.findNamespaces()
+ tempOut := *out
+ // Save into a temp variable, simply because out gets squashed during
+ // loop iterations
+ node.parent = &tempOut
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ out = &XMLNode{}
+ }
+ }
+ return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+ ns := map[string]string{}
+ for _, a := range n.Attr {
+ if a.Name.Space == "xmlns" {
+ ns[a.Value] = a.Name.Local
+ }
+ }
+
+ n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+ for node := n; node != nil; node = node.parent {
+ for _, a := range node.Attr {
+ namespace := a.Name.Space
+ if v, ok := node.namespaces[namespace]; ok {
+ namespace = v
+ }
+ if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+ return a.Value, true
+ }
+ }
+ }
+ return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go
new file mode 100644
index 000000000..7eee124c7
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go
@@ -0,0 +1,5833 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package ecr
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+)
+
+const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability"
+
+// BatchCheckLayerAvailabilityRequest generates a "aws/request.Request" representing the
+// client's request for the BatchCheckLayerAvailability operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See BatchCheckLayerAvailability for more information on using the BatchCheckLayerAvailability
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the BatchCheckLayerAvailabilityRequest method.
+// req, resp := client.BatchCheckLayerAvailabilityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability
+func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) {
+ op := &request.Operation{
+ Name: opBatchCheckLayerAvailability,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &BatchCheckLayerAvailabilityInput{}
+ }
+
+ output = &BatchCheckLayerAvailabilityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// BatchCheckLayerAvailability API operation for Amazon EC2 Container Registry.
+//
+// Check the availability of multiple image layers in a specified registry and
+// repository.
+//
+// This operation is used by the Amazon ECR proxy, and it is not intended for
+// general use by customers for pulling and pushing images. In most cases, you
+// should use the docker CLI to pull, tag, and push images.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation BatchCheckLayerAvailability for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchCheckLayerAvailability
+func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInput) (*BatchCheckLayerAvailabilityOutput, error) {
+ req, out := c.BatchCheckLayerAvailabilityRequest(input)
+ return out, req.Send()
+}
+
+// BatchCheckLayerAvailabilityWithContext is the same as BatchCheckLayerAvailability with the addition of
+// the ability to pass a context and additional request options.
+//
+// See BatchCheckLayerAvailability for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) BatchCheckLayerAvailabilityWithContext(ctx aws.Context, input *BatchCheckLayerAvailabilityInput, opts ...request.Option) (*BatchCheckLayerAvailabilityOutput, error) {
+ req, out := c.BatchCheckLayerAvailabilityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opBatchDeleteImage = "BatchDeleteImage"
+
+// BatchDeleteImageRequest generates a "aws/request.Request" representing the
+// client's request for the BatchDeleteImage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See BatchDeleteImage for more information on using the BatchDeleteImage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the BatchDeleteImageRequest method.
+// req, resp := client.BatchDeleteImageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage
+func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) {
+ op := &request.Operation{
+ Name: opBatchDeleteImage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &BatchDeleteImageInput{}
+ }
+
+ output = &BatchDeleteImageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// BatchDeleteImage API operation for Amazon EC2 Container Registry.
+//
+// Deletes a list of specified images within a specified repository. Images
+// are specified with either imageTag or imageDigest.
+//
+// You can remove a tag from an image by specifying the image's tag in your
+// request. When you remove the last tag from an image, the image is deleted
+// from your repository.
+//
+// You can completely delete an image (and all of its tags) by specifying the
+// image's digest in your request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation BatchDeleteImage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchDeleteImage
+func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageOutput, error) {
+ req, out := c.BatchDeleteImageRequest(input)
+ return out, req.Send()
+}
+
+// BatchDeleteImageWithContext is the same as BatchDeleteImage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See BatchDeleteImage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) BatchDeleteImageWithContext(ctx aws.Context, input *BatchDeleteImageInput, opts ...request.Option) (*BatchDeleteImageOutput, error) {
+ req, out := c.BatchDeleteImageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opBatchGetImage = "BatchGetImage"
+
+// BatchGetImageRequest generates a "aws/request.Request" representing the
+// client's request for the BatchGetImage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See BatchGetImage for more information on using the BatchGetImage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the BatchGetImageRequest method.
+// req, resp := client.BatchGetImageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage
+func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) {
+ op := &request.Operation{
+ Name: opBatchGetImage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &BatchGetImageInput{}
+ }
+
+ output = &BatchGetImageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// BatchGetImage API operation for Amazon EC2 Container Registry.
+//
+// Gets detailed information for specified images within a specified repository.
+// Images are specified with either imageTag or imageDigest.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation BatchGetImage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/BatchGetImage
+func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, error) {
+ req, out := c.BatchGetImageRequest(input)
+ return out, req.Send()
+}
+
+// BatchGetImageWithContext is the same as BatchGetImage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See BatchGetImage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) BatchGetImageWithContext(ctx aws.Context, input *BatchGetImageInput, opts ...request.Option) (*BatchGetImageOutput, error) {
+ req, out := c.BatchGetImageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCompleteLayerUpload = "CompleteLayerUpload"
+
+// CompleteLayerUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CompleteLayerUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CompleteLayerUpload for more information on using the CompleteLayerUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CompleteLayerUploadRequest method.
+// req, resp := client.CompleteLayerUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload
+func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) {
+ op := &request.Operation{
+ Name: opCompleteLayerUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CompleteLayerUploadInput{}
+ }
+
+ output = &CompleteLayerUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CompleteLayerUpload API operation for Amazon EC2 Container Registry.
+//
+// Informs Amazon ECR that the image layer upload has completed for a specified
+// registry, repository name, and upload ID. You can optionally provide a sha256
+// digest of the image layer for data validation purposes.
+//
+// This operation is used by the Amazon ECR proxy, and it is not intended for
+// general use by customers for pulling and pushing images. In most cases, you
+// should use the docker CLI to pull, tag, and push images.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation CompleteLayerUpload for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeUploadNotFoundException "UploadNotFoundException"
+// The upload could not be found, or the specified upload id is not valid for
+// this repository.
+//
+// * ErrCodeInvalidLayerException "InvalidLayerException"
+// The layer digest calculation performed by Amazon ECR upon receipt of the
+// image layer does not match the digest specified.
+//
+// * ErrCodeLayerPartTooSmallException "LayerPartTooSmallException"
+// Layer parts must be at least 5 MiB in size.
+//
+// * ErrCodeLayerAlreadyExistsException "LayerAlreadyExistsException"
+// The image layer already exists in the associated repository.
+//
+// * ErrCodeEmptyUploadException "EmptyUploadException"
+// The specified layer upload does not contain any layer parts.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload
+func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) {
+ req, out := c.CompleteLayerUploadRequest(input)
+ return out, req.Send()
+}
+
+// CompleteLayerUploadWithContext is the same as CompleteLayerUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CompleteLayerUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) CompleteLayerUploadWithContext(ctx aws.Context, input *CompleteLayerUploadInput, opts ...request.Option) (*CompleteLayerUploadOutput, error) {
+ req, out := c.CompleteLayerUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opCreateRepository = "CreateRepository"
+
+// CreateRepositoryRequest generates a "aws/request.Request" representing the
+// client's request for the CreateRepository operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See CreateRepository for more information on using the CreateRepository
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the CreateRepositoryRequest method.
+// req, resp := client.CreateRepositoryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository
+func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) {
+ op := &request.Operation{
+ Name: opCreateRepository,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &CreateRepositoryInput{}
+ }
+
+ output = &CreateRepositoryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// CreateRepository API operation for Amazon EC2 Container Registry.
+//
+// Creates an image repository.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation CreateRepository for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeInvalidTagParameterException "InvalidTagParameterException"
+// An invalid parameter has been specified. Tag keys can have a maximum character
+// length of 128 characters, and tag values can have a maximum length of 256
+// characters.
+//
+// * ErrCodeTooManyTagsException "TooManyTagsException"
+// The list of tags on the repository is over the limit. The maximum number
+// of tags that can be applied to a repository is 50.
+//
+// * ErrCodeRepositoryAlreadyExistsException "RepositoryAlreadyExistsException"
+// The specified repository already exists in the specified registry.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// The operation did not succeed because it would have exceeded a service limit
+// for your account. For more information, see Amazon ECR Default Service Limits
+// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html)
+// in the Amazon Elastic Container Registry User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository
+func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) {
+ req, out := c.CreateRepositoryRequest(input)
+ return out, req.Send()
+}
+
+// CreateRepositoryWithContext is the same as CreateRepository with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateRepository for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) CreateRepositoryWithContext(ctx aws.Context, input *CreateRepositoryInput, opts ...request.Option) (*CreateRepositoryOutput, error) {
+ req, out := c.CreateRepositoryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteLifecyclePolicy = "DeleteLifecyclePolicy"
+
+// DeleteLifecyclePolicyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteLifecyclePolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteLifecyclePolicy for more information on using the DeleteLifecyclePolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteLifecyclePolicyRequest method.
+// req, resp := client.DeleteLifecyclePolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy
+func (c *ECR) DeleteLifecyclePolicyRequest(input *DeleteLifecyclePolicyInput) (req *request.Request, output *DeleteLifecyclePolicyOutput) {
+ op := &request.Operation{
+ Name: opDeleteLifecyclePolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteLifecyclePolicyInput{}
+ }
+
+ output = &DeleteLifecyclePolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteLifecyclePolicy API operation for Amazon EC2 Container Registry.
+//
+// Deletes the specified lifecycle policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation DeleteLifecyclePolicy for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException"
+// The lifecycle policy could not be found, and no policy is set to the repository.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteLifecyclePolicy
+func (c *ECR) DeleteLifecyclePolicy(input *DeleteLifecyclePolicyInput) (*DeleteLifecyclePolicyOutput, error) {
+ req, out := c.DeleteLifecyclePolicyRequest(input)
+ return out, req.Send()
+}
+
+// DeleteLifecyclePolicyWithContext is the same as DeleteLifecyclePolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteLifecyclePolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *DeleteLifecyclePolicyInput, opts ...request.Option) (*DeleteLifecyclePolicyOutput, error) {
+ req, out := c.DeleteLifecyclePolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteRepository = "DeleteRepository"
+
+// DeleteRepositoryRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteRepository operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteRepository for more information on using the DeleteRepository
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteRepositoryRequest method.
+// req, resp := client.DeleteRepositoryRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository
+func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) {
+ op := &request.Operation{
+ Name: opDeleteRepository,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteRepositoryInput{}
+ }
+
+ output = &DeleteRepositoryOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteRepository API operation for Amazon EC2 Container Registry.
+//
+// Deletes an existing image repository. If a repository contains images, you
+// must use the force option to delete it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation DeleteRepository for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeRepositoryNotEmptyException "RepositoryNotEmptyException"
+// The specified repository contains images. To delete a repository that contains
+// images, you must force the deletion with the force parameter.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository
+func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) {
+ req, out := c.DeleteRepositoryRequest(input)
+ return out, req.Send()
+}
+
+// DeleteRepositoryWithContext is the same as DeleteRepository with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteRepository for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DeleteRepositoryWithContext(ctx aws.Context, input *DeleteRepositoryInput, opts ...request.Option) (*DeleteRepositoryOutput, error) {
+ req, out := c.DeleteRepositoryRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy"
+
+// DeleteRepositoryPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteRepositoryPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DeleteRepositoryPolicy for more information on using the DeleteRepositoryPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DeleteRepositoryPolicyRequest method.
+// req, resp := client.DeleteRepositoryPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy
+func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) {
+ op := &request.Operation{
+ Name: opDeleteRepositoryPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DeleteRepositoryPolicyInput{}
+ }
+
+ output = &DeleteRepositoryPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DeleteRepositoryPolicy API operation for Amazon EC2 Container Registry.
+//
+// Deletes the repository policy from a specified repository.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation DeleteRepositoryPolicy for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeRepositoryPolicyNotFoundException "RepositoryPolicyNotFoundException"
+// The specified repository and registry combination does not have an associated
+// repository policy.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepositoryPolicy
+func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*DeleteRepositoryPolicyOutput, error) {
+ req, out := c.DeleteRepositoryPolicyRequest(input)
+ return out, req.Send()
+}
+
+// DeleteRepositoryPolicyWithContext is the same as DeleteRepositoryPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteRepositoryPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRepositoryPolicyInput, opts ...request.Option) (*DeleteRepositoryPolicyOutput, error) {
+ req, out := c.DeleteRepositoryPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDescribeImages = "DescribeImages"
+
+// DescribeImagesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeImages operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeImages for more information on using the DescribeImages
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeImagesRequest method.
+// req, resp := client.DescribeImagesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages
+func (c *ECR) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) {
+ op := &request.Operation{
+ Name: opDescribeImages,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeImagesInput{}
+ }
+
+ output = &DescribeImagesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeImages API operation for Amazon EC2 Container Registry.
+//
+// Returns metadata about the images in a repository, including image size,
+// image tags, and creation date.
+//
+// Beginning with Docker version 1.9, the Docker client compresses image layers
+// before pushing them to a V2 Docker registry. The output of the docker images
+// command shows the uncompressed image size, so it may return a larger image
+// size than the image sizes returned by DescribeImages.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation DescribeImages for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeImageNotFoundException "ImageNotFoundException"
+// The image requested does not exist in the specified repository.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImages
+func (c *ECR) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) {
+ req, out := c.DescribeImagesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeImagesWithContext is the same as DescribeImages with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeImages for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DescribeImagesWithContext(ctx aws.Context, input *DescribeImagesInput, opts ...request.Option) (*DescribeImagesOutput, error) {
+ req, out := c.DescribeImagesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeImagesPages iterates over the pages of a DescribeImages operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeImages method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeImages operation.
+// pageNum := 0
+// err := client.DescribeImagesPages(params,
+// func(page *ecr.DescribeImagesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *ECR) DescribeImagesPages(input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool) error {
+ return c.DescribeImagesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeImagesPagesWithContext same as DescribeImagesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DescribeImagesPagesWithContext(ctx aws.Context, input *DescribeImagesInput, fn func(*DescribeImagesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeImagesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeImagesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*DescribeImagesOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opDescribeRepositories = "DescribeRepositories"
+
+// DescribeRepositoriesRequest generates a "aws/request.Request" representing the
+// client's request for the DescribeRepositories operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DescribeRepositories for more information on using the DescribeRepositories
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DescribeRepositoriesRequest method.
+// req, resp := client.DescribeRepositoriesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories
+func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) {
+ op := &request.Operation{
+ Name: opDescribeRepositories,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &DescribeRepositoriesInput{}
+ }
+
+ output = &DescribeRepositoriesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DescribeRepositories API operation for Amazon EC2 Container Registry.
+//
+// Describes image repositories in a registry.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation DescribeRepositories for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeRepositories
+func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeRepositoriesOutput, error) {
+ req, out := c.DescribeRepositoriesRequest(input)
+ return out, req.Send()
+}
+
+// DescribeRepositoriesWithContext is the same as DescribeRepositories with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DescribeRepositories for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DescribeRepositoriesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, opts ...request.Option) (*DescribeRepositoriesOutput, error) {
+ req, out := c.DescribeRepositoriesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// DescribeRepositoriesPages iterates over the pages of a DescribeRepositories operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See DescribeRepositories method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a DescribeRepositories operation.
+// pageNum := 0
+// err := client.DescribeRepositoriesPages(params,
+// func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *ECR) DescribeRepositoriesPages(input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool) error {
+ return c.DescribeRepositoriesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// DescribeRepositoriesPagesWithContext same as DescribeRepositoriesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) DescribeRepositoriesPagesWithContext(ctx aws.Context, input *DescribeRepositoriesInput, fn func(*DescribeRepositoriesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *DescribeRepositoriesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.DescribeRepositoriesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*DescribeRepositoriesOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opGetAuthorizationToken = "GetAuthorizationToken"
+
+// GetAuthorizationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetAuthorizationToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetAuthorizationToken for more information on using the GetAuthorizationToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetAuthorizationTokenRequest method.
+// req, resp := client.GetAuthorizationTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken
+func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetAuthorizationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetAuthorizationTokenInput{}
+ }
+
+ output = &GetAuthorizationTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetAuthorizationToken API operation for Amazon EC2 Container Registry.
+//
+// Retrieves a token that is valid for a specified registry for 12 hours. This
+// command allows you to use the docker CLI to push and pull images with Amazon
+// ECR. If you do not specify a registry, the default registry is assumed.
+//
+// The authorizationToken returned for each registry specified is a base64 encoded
+// string that can be decoded and used in a docker login command to authenticate
+// to a registry. The AWS CLI offers an aws ecr get-login command that simplifies
+// the login process.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation GetAuthorizationToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetAuthorizationToken
+func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuthorizationTokenOutput, error) {
+ req, out := c.GetAuthorizationTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetAuthorizationTokenWithContext is the same as GetAuthorizationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetAuthorizationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) GetAuthorizationTokenWithContext(ctx aws.Context, input *GetAuthorizationTokenInput, opts ...request.Option) (*GetAuthorizationTokenOutput, error) {
+ req, out := c.GetAuthorizationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer"
+
+// GetDownloadUrlForLayerRequest generates a "aws/request.Request" representing the
+// client's request for the GetDownloadUrlForLayer operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetDownloadUrlForLayer for more information on using the GetDownloadUrlForLayer
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetDownloadUrlForLayerRequest method.
+// req, resp := client.GetDownloadUrlForLayerRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer
+func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) {
+ op := &request.Operation{
+ Name: opGetDownloadUrlForLayer,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetDownloadUrlForLayerInput{}
+ }
+
+ output = &GetDownloadUrlForLayerOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetDownloadUrlForLayer API operation for Amazon EC2 Container Registry.
+//
+// Retrieves the pre-signed Amazon S3 download URL corresponding to an image
+// layer. You can only get URLs for image layers that are referenced in an image.
+//
+// This operation is used by the Amazon ECR proxy, and it is not intended for
+// general use by customers for pulling and pushing images. In most cases, you
+// should use the docker CLI to pull, tag, and push images.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation GetDownloadUrlForLayer for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeLayersNotFoundException "LayersNotFoundException"
+// The specified layers could not be found, or the specified layer is not valid
+// for this repository.
+//
+// * ErrCodeLayerInaccessibleException "LayerInaccessibleException"
+// The specified layer is not available because it is not associated with an
+// image. Unassociated image layers may be cleaned up at any time.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetDownloadUrlForLayer
+func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDownloadUrlForLayerOutput, error) {
+ req, out := c.GetDownloadUrlForLayerRequest(input)
+ return out, req.Send()
+}
+
+// GetDownloadUrlForLayerWithContext is the same as GetDownloadUrlForLayer with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetDownloadUrlForLayer for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) GetDownloadUrlForLayerWithContext(ctx aws.Context, input *GetDownloadUrlForLayerInput, opts ...request.Option) (*GetDownloadUrlForLayerOutput, error) {
+ req, out := c.GetDownloadUrlForLayerRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetLifecyclePolicy = "GetLifecyclePolicy"
+
+// GetLifecyclePolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetLifecyclePolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetLifecyclePolicy for more information on using the GetLifecyclePolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetLifecyclePolicyRequest method.
+// req, resp := client.GetLifecyclePolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy
+func (c *ECR) GetLifecyclePolicyRequest(input *GetLifecyclePolicyInput) (req *request.Request, output *GetLifecyclePolicyOutput) {
+ op := &request.Operation{
+ Name: opGetLifecyclePolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetLifecyclePolicyInput{}
+ }
+
+ output = &GetLifecyclePolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetLifecyclePolicy API operation for Amazon EC2 Container Registry.
+//
+// Retrieves the specified lifecycle policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation GetLifecyclePolicy for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException"
+// The lifecycle policy could not be found, and no policy is set to the repository.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicy
+func (c *ECR) GetLifecyclePolicy(input *GetLifecyclePolicyInput) (*GetLifecyclePolicyOutput, error) {
+ req, out := c.GetLifecyclePolicyRequest(input)
+ return out, req.Send()
+}
+
+// GetLifecyclePolicyWithContext is the same as GetLifecyclePolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetLifecyclePolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLifecyclePolicyInput, opts ...request.Option) (*GetLifecyclePolicyOutput, error) {
+ req, out := c.GetLifecyclePolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetLifecyclePolicyPreview = "GetLifecyclePolicyPreview"
+
+// GetLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the
+// client's request for the GetLifecyclePolicyPreview operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetLifecyclePolicyPreview for more information on using the GetLifecyclePolicyPreview
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetLifecyclePolicyPreviewRequest method.
+// req, resp := client.GetLifecyclePolicyPreviewRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview
+func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewInput) (req *request.Request, output *GetLifecyclePolicyPreviewOutput) {
+ op := &request.Operation{
+ Name: opGetLifecyclePolicyPreview,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetLifecyclePolicyPreviewInput{}
+ }
+
+ output = &GetLifecyclePolicyPreviewOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetLifecyclePolicyPreview API operation for Amazon EC2 Container Registry.
+//
+// Retrieves the results of the specified lifecycle policy preview request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation GetLifecyclePolicyPreview for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeLifecyclePolicyPreviewNotFoundException "LifecyclePolicyPreviewNotFoundException"
+// There is no dry run for this repository.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetLifecyclePolicyPreview
+func (c *ECR) GetLifecyclePolicyPreview(input *GetLifecyclePolicyPreviewInput) (*GetLifecyclePolicyPreviewOutput, error) {
+ req, out := c.GetLifecyclePolicyPreviewRequest(input)
+ return out, req.Send()
+}
+
+// GetLifecyclePolicyPreviewWithContext is the same as GetLifecyclePolicyPreview with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetLifecyclePolicyPreview for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, opts ...request.Option) (*GetLifecyclePolicyPreviewOutput, error) {
+ req, out := c.GetLifecyclePolicyPreviewRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetRepositoryPolicy = "GetRepositoryPolicy"
+
+// GetRepositoryPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetRepositoryPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetRepositoryPolicy for more information on using the GetRepositoryPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetRepositoryPolicyRequest method.
+// req, resp := client.GetRepositoryPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy
+func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) {
+ op := &request.Operation{
+ Name: opGetRepositoryPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetRepositoryPolicyInput{}
+ }
+
+ output = &GetRepositoryPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetRepositoryPolicy API operation for Amazon EC2 Container Registry.
+//
+// Retrieves the repository policy for a specified repository.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation GetRepositoryPolicy for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeRepositoryPolicyNotFoundException "RepositoryPolicyNotFoundException"
+// The specified repository and registry combination does not have an associated
+// repository policy.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/GetRepositoryPolicy
+func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetRepositoryPolicyOutput, error) {
+ req, out := c.GetRepositoryPolicyRequest(input)
+ return out, req.Send()
+}
+
+// GetRepositoryPolicyWithContext is the same as GetRepositoryPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetRepositoryPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) GetRepositoryPolicyWithContext(ctx aws.Context, input *GetRepositoryPolicyInput, opts ...request.Option) (*GetRepositoryPolicyOutput, error) {
+ req, out := c.GetRepositoryPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opInitiateLayerUpload = "InitiateLayerUpload"
+
+// InitiateLayerUploadRequest generates a "aws/request.Request" representing the
+// client's request for the InitiateLayerUpload operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See InitiateLayerUpload for more information on using the InitiateLayerUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the InitiateLayerUploadRequest method.
+// req, resp := client.InitiateLayerUploadRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload
+func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) {
+ op := &request.Operation{
+ Name: opInitiateLayerUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &InitiateLayerUploadInput{}
+ }
+
+ output = &InitiateLayerUploadOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// InitiateLayerUpload API operation for Amazon EC2 Container Registry.
+//
+// Notify Amazon ECR that you intend to upload an image layer.
+//
+// This operation is used by the Amazon ECR proxy, and it is not intended for
+// general use by customers for pulling and pushing images. In most cases, you
+// should use the docker CLI to pull, tag, and push images.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation InitiateLayerUpload for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload
+func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) {
+ req, out := c.InitiateLayerUploadRequest(input)
+ return out, req.Send()
+}
+
+// InitiateLayerUploadWithContext is the same as InitiateLayerUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See InitiateLayerUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) InitiateLayerUploadWithContext(ctx aws.Context, input *InitiateLayerUploadInput, opts ...request.Option) (*InitiateLayerUploadOutput, error) {
+ req, out := c.InitiateLayerUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opListImages = "ListImages"
+
+// ListImagesRequest generates a "aws/request.Request" representing the
+// client's request for the ListImages operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListImages for more information on using the ListImages
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListImagesRequest method.
+// req, resp := client.ListImagesRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages
+func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) {
+ op := &request.Operation{
+ Name: opListImages,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"nextToken"},
+ OutputTokens: []string{"nextToken"},
+ LimitToken: "maxResults",
+ TruncationToken: "",
+ },
+ }
+
+ if input == nil {
+ input = &ListImagesInput{}
+ }
+
+ output = &ListImagesOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListImages API operation for Amazon EC2 Container Registry.
+//
+// Lists all the image IDs for a given repository.
+//
+// You can filter images based on whether or not they are tagged by setting
+// the tagStatus parameter to TAGGED or UNTAGGED. For example, you can filter
+// your results to return only UNTAGGED images and then pipe that result to
+// a BatchDeleteImage operation to delete them. Or, you can filter your results
+// to return only TAGGED images to list all of the tags in your repository.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation ListImages for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListImages
+func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) {
+ req, out := c.ListImagesRequest(input)
+ return out, req.Send()
+}
+
+// ListImagesWithContext is the same as ListImages with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListImages for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) ListImagesWithContext(ctx aws.Context, input *ListImagesInput, opts ...request.Option) (*ListImagesOutput, error) {
+ req, out := c.ListImagesRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// ListImagesPages iterates over the pages of a ListImages operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListImages method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+// // Example iterating over at most 3 pages of a ListImages operation.
+// pageNum := 0
+// err := client.ListImagesPages(params,
+// func(page *ecr.ListImagesOutput, lastPage bool) bool {
+// pageNum++
+// fmt.Println(page)
+// return pageNum <= 3
+// })
+//
+func (c *ECR) ListImagesPages(input *ListImagesInput, fn func(*ListImagesOutput, bool) bool) error {
+ return c.ListImagesPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListImagesPagesWithContext same as ListImagesPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) ListImagesPagesWithContext(ctx aws.Context, input *ListImagesInput, fn func(*ListImagesOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListImagesInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListImagesRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListImagesOutput), !p.HasNextPage())
+ }
+ return p.Err()
+}
+
+const opListTagsForResource = "ListTagsForResource"
+
+// ListTagsForResourceRequest generates a "aws/request.Request" representing the
+// client's request for the ListTagsForResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See ListTagsForResource for more information on using the ListTagsForResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the ListTagsForResourceRequest method.
+// req, resp := client.ListTagsForResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource
+func (c *ECR) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) {
+ op := &request.Operation{
+ Name: opListTagsForResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListTagsForResourceInput{}
+ }
+
+ output = &ListTagsForResourceOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// ListTagsForResource API operation for Amazon EC2 Container Registry.
+//
+// List the tags for an Amazon ECR resource.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation ListTagsForResource for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/ListTagsForResource
+func (c *ECR) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) {
+ req, out := c.ListTagsForResourceRequest(input)
+ return out, req.Send()
+}
+
+// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListTagsForResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) {
+ req, out := c.ListTagsForResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutImage = "PutImage"
+
+// PutImageRequest generates a "aws/request.Request" representing the
+// client's request for the PutImage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutImage for more information on using the PutImage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutImageRequest method.
+// req, resp := client.PutImageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage
+func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) {
+ op := &request.Operation{
+ Name: opPutImage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutImageInput{}
+ }
+
+ output = &PutImageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutImage API operation for Amazon EC2 Container Registry.
+//
+// Creates or updates the image manifest and tags associated with an image.
+//
+// This operation is used by the Amazon ECR proxy, and it is not intended for
+// general use by customers for pulling and pushing images. In most cases, you
+// should use the docker CLI to pull, tag, and push images.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation PutImage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeImageAlreadyExistsException "ImageAlreadyExistsException"
+// The specified image has already been pushed, and there were no changes to
+// the manifest or image tag after the last push.
+//
+// * ErrCodeLayersNotFoundException "LayersNotFoundException"
+// The specified layers could not be found, or the specified layer is not valid
+// for this repository.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// The operation did not succeed because it would have exceeded a service limit
+// for your account. For more information, see Amazon ECR Default Service Limits
+// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html)
+// in the Amazon Elastic Container Registry User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage
+func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) {
+ req, out := c.PutImageRequest(input)
+ return out, req.Send()
+}
+
+// PutImageWithContext is the same as PutImage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutImage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts ...request.Option) (*PutImageOutput, error) {
+ req, out := c.PutImageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opPutLifecyclePolicy = "PutLifecyclePolicy"
+
+// PutLifecyclePolicyRequest generates a "aws/request.Request" representing the
+// client's request for the PutLifecyclePolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See PutLifecyclePolicy for more information on using the PutLifecyclePolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the PutLifecyclePolicyRequest method.
+// req, resp := client.PutLifecyclePolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy
+func (c *ECR) PutLifecyclePolicyRequest(input *PutLifecyclePolicyInput) (req *request.Request, output *PutLifecyclePolicyOutput) {
+ op := &request.Operation{
+ Name: opPutLifecyclePolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &PutLifecyclePolicyInput{}
+ }
+
+ output = &PutLifecyclePolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// PutLifecyclePolicy API operation for Amazon EC2 Container Registry.
+//
+// Creates or updates a lifecycle policy. For information about lifecycle policy
+// syntax, see Lifecycle Policy Template (http://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation PutLifecyclePolicy for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutLifecyclePolicy
+func (c *ECR) PutLifecyclePolicy(input *PutLifecyclePolicyInput) (*PutLifecyclePolicyOutput, error) {
+ req, out := c.PutLifecyclePolicyRequest(input)
+ return out, req.Send()
+}
+
+// PutLifecyclePolicyWithContext is the same as PutLifecyclePolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutLifecyclePolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLifecyclePolicyInput, opts ...request.Option) (*PutLifecyclePolicyOutput, error) {
+ req, out := c.PutLifecyclePolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opSetRepositoryPolicy = "SetRepositoryPolicy"
+
+// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the SetRepositoryPolicy operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See SetRepositoryPolicy for more information on using the SetRepositoryPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the SetRepositoryPolicyRequest method.
+// req, resp := client.SetRepositoryPolicyRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy
+func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) {
+ op := &request.Operation{
+ Name: opSetRepositoryPolicy,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &SetRepositoryPolicyInput{}
+ }
+
+ output = &SetRepositoryPolicyOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// SetRepositoryPolicy API operation for Amazon EC2 Container Registry.
+//
+// Applies a repository policy on a specified repository to control access permissions.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation SetRepositoryPolicy for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/SetRepositoryPolicy
+func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetRepositoryPolicyOutput, error) {
+ req, out := c.SetRepositoryPolicyRequest(input)
+ return out, req.Send()
+}
+
+// SetRepositoryPolicyWithContext is the same as SetRepositoryPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See SetRepositoryPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetRepositoryPolicyInput, opts ...request.Option) (*SetRepositoryPolicyOutput, error) {
+ req, out := c.SetRepositoryPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview"
+
+// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the
+// client's request for the StartLifecyclePolicyPreview operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the StartLifecyclePolicyPreviewRequest method.
+// req, resp := client.StartLifecyclePolicyPreviewRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview
+func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) {
+ op := &request.Operation{
+ Name: opStartLifecyclePolicyPreview,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &StartLifecyclePolicyPreviewInput{}
+ }
+
+ output = &StartLifecyclePolicyPreviewOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry.
+//
+// Starts a preview of the specified lifecycle policy. This allows you to see
+// the results before creating the lifecycle policy.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation StartLifecyclePolicyPreview for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException"
+// The lifecycle policy could not be found, and no policy is set to the repository.
+//
+// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException"
+// The previous lifecycle policy preview request has not completed. Please try
+// again later.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview
+func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) {
+ req, out := c.StartLifecyclePolicyPreviewRequest(input)
+ return out, req.Send()
+}
+
+// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of
+// the ability to pass a context and additional request options.
+//
+// See StartLifecyclePolicyPreview for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) {
+ req, out := c.StartLifecyclePolicyPreviewRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opTagResource = "TagResource"
+
+// TagResourceRequest generates a "aws/request.Request" representing the
+// client's request for the TagResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See TagResource for more information on using the TagResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the TagResourceRequest method.
+// req, resp := client.TagResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource
+func (c *ECR) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
+ op := &request.Operation{
+ Name: opTagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &TagResourceInput{}
+ }
+
+ output = &TagResourceOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// TagResource API operation for Amazon EC2 Container Registry.
+//
+// Adds specified tags to a resource with the specified ARN. Existing tags on
+// a resource are not changed if they are not specified in the request parameters.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation TagResource for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeInvalidTagParameterException "InvalidTagParameterException"
+// An invalid parameter has been specified. Tag keys can have a maximum character
+// length of 128 characters, and tag values can have a maximum length of 256
+// characters.
+//
+// * ErrCodeTooManyTagsException "TooManyTagsException"
+// The list of tags on the repository is over the limit. The maximum number
+// of tags that can be applied to a repository is 50.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/TagResource
+func (c *ECR) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
+ req, out := c.TagResourceRequest(input)
+ return out, req.Send()
+}
+
+// TagResourceWithContext is the same as TagResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See TagResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
+ req, out := c.TagResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUntagResource = "UntagResource"
+
+// UntagResourceRequest generates a "aws/request.Request" representing the
+// client's request for the UntagResource operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UntagResource for more information on using the UntagResource
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UntagResourceRequest method.
+// req, resp := client.UntagResourceRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource
+func (c *ECR) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
+ op := &request.Operation{
+ Name: opUntagResource,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UntagResourceInput{}
+ }
+
+ output = &UntagResourceOutput{}
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
+ return
+}
+
+// UntagResource API operation for Amazon EC2 Container Registry.
+//
+// Deletes specified tags from a resource.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation UntagResource for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeInvalidTagParameterException "InvalidTagParameterException"
+// An invalid parameter has been specified. Tag keys can have a maximum character
+// length of 128 characters, and tag values can have a maximum length of 256
+// characters.
+//
+// * ErrCodeTooManyTagsException "TooManyTagsException"
+// The list of tags on the repository is over the limit. The maximum number
+// of tags that can be applied to a repository is 50.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UntagResource
+func (c *ECR) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
+ req, out := c.UntagResourceRequest(input)
+ return out, req.Send()
+}
+
+// UntagResourceWithContext is the same as UntagResource with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UntagResource for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
+ req, out := c.UntagResourceRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opUploadLayerPart = "UploadLayerPart"
+
+// UploadLayerPartRequest generates a "aws/request.Request" representing the
+// client's request for the UploadLayerPart operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See UploadLayerPart for more information on using the UploadLayerPart
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the UploadLayerPartRequest method.
+// req, resp := client.UploadLayerPartRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart
+func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) {
+ op := &request.Operation{
+ Name: opUploadLayerPart,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &UploadLayerPartInput{}
+ }
+
+ output = &UploadLayerPartOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// UploadLayerPart API operation for Amazon EC2 Container Registry.
+//
+// Uploads an image layer part to Amazon ECR.
+//
+// This operation is used by the Amazon ECR proxy, and it is not intended for
+// general use by customers for pulling and pushing images. In most cases, you
+// should use the docker CLI to pull, tag, and push images.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon EC2 Container Registry's
+// API operation UploadLayerPart for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeServerException "ServerException"
+// These errors are usually caused by a server-side issue.
+//
+// * ErrCodeInvalidParameterException "InvalidParameterException"
+// The specified parameter is invalid. Review the available parameters for the
+// API request.
+//
+// * ErrCodeInvalidLayerPartException "InvalidLayerPartException"
+// The layer part size is not valid, or the first byte specified is not consecutive
+// to the last byte of a previous layer part upload.
+//
+// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException"
+// The specified repository could not be found. Check the spelling of the specified
+// repository and ensure that you are performing operations on the correct registry.
+//
+// * ErrCodeUploadNotFoundException "UploadNotFoundException"
+// The upload could not be found, or the specified upload id is not valid for
+// this repository.
+//
+// * ErrCodeLimitExceededException "LimitExceededException"
+// The operation did not succeed because it would have exceeded a service limit
+// for your account. For more information, see Amazon ECR Default Service Limits
+// (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html)
+// in the Amazon Elastic Container Registry User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart
+func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) {
+ req, out := c.UploadLayerPartRequest(input)
+ return out, req.Send()
+}
+
+// UploadLayerPartWithContext is the same as UploadLayerPart with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadLayerPart for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPartInput, opts ...request.Option) (*UploadLayerPartOutput, error) {
+ req, out := c.UploadLayerPartRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+// An object representing authorization data for an Amazon ECR registry.
+type AuthorizationData struct {
+ _ struct{} `type:"structure"`
+
+ // A base64-encoded string that contains authorization data for the specified
+ // Amazon ECR registry. When the string is decoded, it is presented in the format
+ // user:password for private registry authentication using docker login.
+ AuthorizationToken *string `locationName:"authorizationToken" type:"string"`
+
+ // The Unix time in seconds and milliseconds when the authorization token expires.
+ // Authorization tokens are valid for 12 hours.
+ ExpiresAt *time.Time `locationName:"expiresAt" type:"timestamp"`
+
+ // The registry URL to use for this authorization token in a docker login command.
+ // The Amazon ECR registry URL format is https://aws_account_id.dkr.ecr.region.amazonaws.com.
+ // For example, https://012345678910.dkr.ecr.us-east-1.amazonaws.com..
+ ProxyEndpoint *string `locationName:"proxyEndpoint" type:"string"`
+}
+
+// String returns the string representation
+func (s AuthorizationData) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AuthorizationData) GoString() string {
+ return s.String()
+}
+
+// SetAuthorizationToken sets the AuthorizationToken field's value.
+func (s *AuthorizationData) SetAuthorizationToken(v string) *AuthorizationData {
+ s.AuthorizationToken = &v
+ return s
+}
+
+// SetExpiresAt sets the ExpiresAt field's value.
+func (s *AuthorizationData) SetExpiresAt(v time.Time) *AuthorizationData {
+ s.ExpiresAt = &v
+ return s
+}
+
+// SetProxyEndpoint sets the ProxyEndpoint field's value.
+func (s *AuthorizationData) SetProxyEndpoint(v string) *AuthorizationData {
+ s.ProxyEndpoint = &v
+ return s
+}
+
+type BatchCheckLayerAvailabilityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The digests of the image layers to check.
+ //
+ // LayerDigests is a required field
+ LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"`
+
+ // The AWS account ID associated with the registry that contains the image layers
+ // to check. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository that is associated with the image layers to check.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s BatchCheckLayerAvailabilityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchCheckLayerAvailabilityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchCheckLayerAvailabilityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BatchCheckLayerAvailabilityInput"}
+ if s.LayerDigests == nil {
+ invalidParams.Add(request.NewErrParamRequired("LayerDigests"))
+ }
+ if s.LayerDigests != nil && len(s.LayerDigests) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLayerDigests sets the LayerDigests field's value.
+func (s *BatchCheckLayerAvailabilityInput) SetLayerDigests(v []*string) *BatchCheckLayerAvailabilityInput {
+ s.LayerDigests = v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *BatchCheckLayerAvailabilityInput) SetRegistryId(v string) *BatchCheckLayerAvailabilityInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *BatchCheckLayerAvailabilityInput) SetRepositoryName(v string) *BatchCheckLayerAvailabilityInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type BatchCheckLayerAvailabilityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Any failures associated with the call.
+ Failures []*LayerFailure `locationName:"failures" type:"list"`
+
+ // A list of image layer objects corresponding to the image layer references
+ // in the request.
+ Layers []*Layer `locationName:"layers" type:"list"`
+}
+
+// String returns the string representation
+func (s BatchCheckLayerAvailabilityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchCheckLayerAvailabilityOutput) GoString() string {
+ return s.String()
+}
+
+// SetFailures sets the Failures field's value.
+func (s *BatchCheckLayerAvailabilityOutput) SetFailures(v []*LayerFailure) *BatchCheckLayerAvailabilityOutput {
+ s.Failures = v
+ return s
+}
+
+// SetLayers sets the Layers field's value.
+func (s *BatchCheckLayerAvailabilityOutput) SetLayers(v []*Layer) *BatchCheckLayerAvailabilityOutput {
+ s.Layers = v
+ return s
+}
+
+// Deletes specified images within a specified repository. Images are specified
+// with either the imageTag or imageDigest.
+type BatchDeleteImageInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of image ID references that correspond to images to delete. The format
+ // of the imageIds reference is imageTag=tag or imageDigest=digest.
+ //
+ // ImageIds is a required field
+ ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"`
+
+ // The AWS account ID associated with the registry that contains the image to
+ // delete. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository that contains the image to delete.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s BatchDeleteImageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchDeleteImageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchDeleteImageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BatchDeleteImageInput"}
+ if s.ImageIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageIds"))
+ }
+ if s.ImageIds != nil && len(s.ImageIds) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *BatchDeleteImageInput) SetImageIds(v []*ImageIdentifier) *BatchDeleteImageInput {
+ s.ImageIds = v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *BatchDeleteImageInput) SetRegistryId(v string) *BatchDeleteImageInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *BatchDeleteImageInput) SetRepositoryName(v string) *BatchDeleteImageInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type BatchDeleteImageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Any failures associated with the call.
+ Failures []*ImageFailure `locationName:"failures" type:"list"`
+
+ // The image IDs of the deleted images.
+ ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s BatchDeleteImageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchDeleteImageOutput) GoString() string {
+ return s.String()
+}
+
+// SetFailures sets the Failures field's value.
+func (s *BatchDeleteImageOutput) SetFailures(v []*ImageFailure) *BatchDeleteImageOutput {
+ s.Failures = v
+ return s
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *BatchDeleteImageOutput) SetImageIds(v []*ImageIdentifier) *BatchDeleteImageOutput {
+ s.ImageIds = v
+ return s
+}
+
+type BatchGetImageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The accepted media types for the request.
+ //
+ // Valid values: application/vnd.docker.distribution.manifest.v1+json | application/vnd.docker.distribution.manifest.v2+json
+ // | application/vnd.oci.image.manifest.v1+json
+ AcceptedMediaTypes []*string `locationName:"acceptedMediaTypes" min:"1" type:"list"`
+
+ // A list of image ID references that correspond to images to describe. The
+ // format of the imageIds reference is imageTag=tag or imageDigest=digest.
+ //
+ // ImageIds is a required field
+ ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list" required:"true"`
+
+ // The AWS account ID associated with the registry that contains the images
+ // to describe. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository that contains the images to describe.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s BatchGetImageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchGetImageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BatchGetImageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BatchGetImageInput"}
+ if s.AcceptedMediaTypes != nil && len(s.AcceptedMediaTypes) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("AcceptedMediaTypes", 1))
+ }
+ if s.ImageIds == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageIds"))
+ }
+ if s.ImageIds != nil && len(s.ImageIds) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetAcceptedMediaTypes sets the AcceptedMediaTypes field's value.
+func (s *BatchGetImageInput) SetAcceptedMediaTypes(v []*string) *BatchGetImageInput {
+ s.AcceptedMediaTypes = v
+ return s
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *BatchGetImageInput) SetImageIds(v []*ImageIdentifier) *BatchGetImageInput {
+ s.ImageIds = v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *BatchGetImageInput) SetRegistryId(v string) *BatchGetImageInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *BatchGetImageInput) SetRepositoryName(v string) *BatchGetImageInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type BatchGetImageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Any failures associated with the call.
+ Failures []*ImageFailure `locationName:"failures" type:"list"`
+
+ // A list of image objects corresponding to the image references in the request.
+ Images []*Image `locationName:"images" type:"list"`
+}
+
+// String returns the string representation
+func (s BatchGetImageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BatchGetImageOutput) GoString() string {
+ return s.String()
+}
+
+// SetFailures sets the Failures field's value.
+func (s *BatchGetImageOutput) SetFailures(v []*ImageFailure) *BatchGetImageOutput {
+ s.Failures = v
+ return s
+}
+
+// SetImages sets the Images field's value.
+func (s *BatchGetImageOutput) SetImages(v []*Image) *BatchGetImageOutput {
+ s.Images = v
+ return s
+}
+
+type CompleteLayerUploadInput struct {
+ _ struct{} `type:"structure"`
+
+ // The sha256 digest of the image layer.
+ //
+ // LayerDigests is a required field
+ LayerDigests []*string `locationName:"layerDigests" min:"1" type:"list" required:"true"`
+
+ // The AWS account ID associated with the registry to which to upload layers.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to associate with the image layer.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+
+ // The upload ID from a previous InitiateLayerUpload operation to associate
+ // with the image layer.
+ //
+ // UploadId is a required field
+ UploadId *string `locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CompleteLayerUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteLayerUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CompleteLayerUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CompleteLayerUploadInput"}
+ if s.LayerDigests == nil {
+ invalidParams.Add(request.NewErrParamRequired("LayerDigests"))
+ }
+ if s.LayerDigests != nil && len(s.LayerDigests) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("LayerDigests", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLayerDigests sets the LayerDigests field's value.
+func (s *CompleteLayerUploadInput) SetLayerDigests(v []*string) *CompleteLayerUploadInput {
+ s.LayerDigests = v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *CompleteLayerUploadInput) SetRegistryId(v string) *CompleteLayerUploadInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *CompleteLayerUploadInput) SetRepositoryName(v string) *CompleteLayerUploadInput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteLayerUploadInput) SetUploadId(v string) *CompleteLayerUploadInput {
+ s.UploadId = &v
+ return s
+}
+
+type CompleteLayerUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The sha256 digest of the image layer.
+ LayerDigest *string `locationName:"layerDigest" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+
+ // The upload ID associated with the layer.
+ UploadId *string `locationName:"uploadId" type:"string"`
+}
+
+// String returns the string representation
+func (s CompleteLayerUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteLayerUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetLayerDigest sets the LayerDigest field's value.
+func (s *CompleteLayerUploadOutput) SetLayerDigest(v string) *CompleteLayerUploadOutput {
+ s.LayerDigest = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *CompleteLayerUploadOutput) SetRegistryId(v string) *CompleteLayerUploadOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *CompleteLayerUploadOutput) SetRepositoryName(v string) *CompleteLayerUploadOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOutput {
+ s.UploadId = &v
+ return s
+}
+
+type CreateRepositoryInput struct {
+ _ struct{} `type:"structure"`
+
+ // The name to use for the repository. The repository name may be specified
+ // on its own (such as nginx-web-app) or it can be prepended with a namespace
+ // to group the repository into a category (such as project-a/nginx-web-app).
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s CreateRepositoryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRepositoryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateRepositoryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateRepositoryInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *CreateRepositoryInput) SetRepositoryName(v string) *CreateRepositoryInput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *CreateRepositoryInput) SetTags(v []*Tag) *CreateRepositoryInput {
+ s.Tags = v
+ return s
+}
+
+type CreateRepositoryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The repository that was created.
+ Repository *Repository `locationName:"repository" type:"structure"`
+}
+
+// String returns the string representation
+func (s CreateRepositoryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateRepositoryOutput) GoString() string {
+ return s.String()
+}
+
+// SetRepository sets the Repository field's value.
+func (s *CreateRepositoryOutput) SetRepository(v *Repository) *CreateRepositoryOutput {
+ s.Repository = v
+ return s
+}
+
+type DeleteLifecyclePolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteLifecyclePolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLifecyclePolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteLifecyclePolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteLifecyclePolicyInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DeleteLifecyclePolicyInput) SetRegistryId(v string) *DeleteLifecyclePolicyInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *DeleteLifecyclePolicyInput) SetRepositoryName(v string) *DeleteLifecyclePolicyInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type DeleteLifecyclePolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The time stamp of the last time that the lifecycle policy was run.
+ LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp"`
+
+ // The JSON lifecycle policy text.
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteLifecyclePolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteLifecyclePolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetLastEvaluatedAt sets the LastEvaluatedAt field's value.
+func (s *DeleteLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *DeleteLifecyclePolicyOutput {
+ s.LastEvaluatedAt = &v
+ return s
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *DeleteLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *DeleteLifecyclePolicyOutput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DeleteLifecyclePolicyOutput) SetRegistryId(v string) *DeleteLifecyclePolicyOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *DeleteLifecyclePolicyOutput) SetRepositoryName(v string) *DeleteLifecyclePolicyOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+type DeleteRepositoryInput struct {
+ _ struct{} `type:"structure"`
+
+ // If a repository contains images, forces the deletion.
+ Force *bool `locationName:"force" type:"boolean"`
+
+ // The AWS account ID associated with the registry that contains the repository
+ // to delete. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to delete.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteRepositoryInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRepositoryInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteRepositoryInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetForce sets the Force field's value.
+func (s *DeleteRepositoryInput) SetForce(v bool) *DeleteRepositoryInput {
+ s.Force = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DeleteRepositoryInput) SetRegistryId(v string) *DeleteRepositoryInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *DeleteRepositoryInput) SetRepositoryName(v string) *DeleteRepositoryInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type DeleteRepositoryOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The repository that was deleted.
+ Repository *Repository `locationName:"repository" type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteRepositoryOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRepositoryOutput) GoString() string {
+ return s.String()
+}
+
+// SetRepository sets the Repository field's value.
+func (s *DeleteRepositoryOutput) SetRepository(v *Repository) *DeleteRepositoryOutput {
+ s.Repository = v
+ return s
+}
+
+type DeleteRepositoryPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID associated with the registry that contains the repository
+ // policy to delete. If you do not specify a registry, the default registry
+ // is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository that is associated with the repository policy
+ // to delete.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteRepositoryPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRepositoryPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteRepositoryPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteRepositoryPolicyInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DeleteRepositoryPolicyInput) SetRegistryId(v string) *DeleteRepositoryPolicyInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *DeleteRepositoryPolicyInput) SetRepositoryName(v string) *DeleteRepositoryPolicyInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type DeleteRepositoryPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON repository policy that was deleted from the repository.
+ PolicyText *string `locationName:"policyText" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteRepositoryPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteRepositoryPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyText sets the PolicyText field's value.
+func (s *DeleteRepositoryPolicyOutput) SetPolicyText(v string) *DeleteRepositoryPolicyOutput {
+ s.PolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DeleteRepositoryPolicyOutput) SetRegistryId(v string) *DeleteRepositoryPolicyOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteRepositoryPolicyOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// An object representing a filter on a DescribeImages operation.
+type DescribeImagesFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The tag status with which to filter your DescribeImages results. You can
+ // filter results based on whether they are TAGGED or UNTAGGED.
+ TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"`
+}
+
+// String returns the string representation
+func (s DescribeImagesFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeImagesFilter) GoString() string {
+ return s.String()
+}
+
+// SetTagStatus sets the TagStatus field's value.
+func (s *DescribeImagesFilter) SetTagStatus(v string) *DescribeImagesFilter {
+ s.TagStatus = &v
+ return s
+}
+
+type DescribeImagesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The filter key and value with which to filter your DescribeImages results.
+ Filter *DescribeImagesFilter `locationName:"filter" type:"structure"`
+
+ // The list of image IDs for the requested repository.
+ ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
+
+ // The maximum number of repository results returned by DescribeImages in paginated
+ // output. When this parameter is used, DescribeImages only returns maxResults
+ // results in a single page along with a nextToken response element. The remaining
+ // results of the initial request can be seen by sending another DescribeImages
+ // request with the returned nextToken value. This value can be between 1 and
+ // 1000. If this parameter is not used, then DescribeImages returns up to 100
+ // results and a nextToken value, if applicable. This option cannot be used
+ // when you specify images with imageIds.
+ MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
+
+ // The nextToken value returned from a previous paginated DescribeImages request
+ // where maxResults was used and the results exceeded the value of that parameter.
+ // Pagination continues from the end of the previous results that returned the
+ // nextToken value. This value is null when there are no more results to return.
+ // This option cannot be used when you specify images with imageIds.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The AWS account ID associated with the registry that contains the repository
+ // in which to describe images. If you do not specify a registry, the default
+ // registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // A list of repositories to describe.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DescribeImagesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeImagesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeImagesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeImagesInput"}
+ if s.ImageIds != nil && len(s.ImageIds) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *DescribeImagesInput) SetFilter(v *DescribeImagesFilter) *DescribeImagesInput {
+ s.Filter = v
+ return s
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *DescribeImagesInput) SetImageIds(v []*ImageIdentifier) *DescribeImagesInput {
+ s.ImageIds = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeImagesInput) SetMaxResults(v int64) *DescribeImagesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeImagesInput) SetNextToken(v string) *DescribeImagesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DescribeImagesInput) SetRegistryId(v string) *DescribeImagesInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *DescribeImagesInput) SetRepositoryName(v string) *DescribeImagesInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type DescribeImagesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of ImageDetail objects that contain data about the image.
+ ImageDetails []*ImageDetail `locationName:"imageDetails" type:"list"`
+
+ // The nextToken value to include in a future DescribeImages request. When the
+ // results of a DescribeImages request exceed maxResults, this value can be
+ // used to retrieve the next page of results. This value is null when there
+ // are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s DescribeImagesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeImagesOutput) GoString() string {
+ return s.String()
+}
+
+// SetImageDetails sets the ImageDetails field's value.
+func (s *DescribeImagesOutput) SetImageDetails(v []*ImageDetail) *DescribeImagesOutput {
+ s.ImageDetails = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeImagesOutput) SetNextToken(v string) *DescribeImagesOutput {
+ s.NextToken = &v
+ return s
+}
+
+type DescribeRepositoriesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The maximum number of repository results returned by DescribeRepositories
+ // in paginated output. When this parameter is used, DescribeRepositories only
+ // returns maxResults results in a single page along with a nextToken response
+ // element. The remaining results of the initial request can be seen by sending
+ // another DescribeRepositories request with the returned nextToken value. This
+ // value can be between 1 and 1000. If this parameter is not used, then DescribeRepositories
+ // returns up to 100 results and a nextToken value, if applicable. This option
+ // cannot be used when you specify repositories with repositoryNames.
+ MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
+
+ // The nextToken value returned from a previous paginated DescribeRepositories
+ // request where maxResults was used and the results exceeded the value of that
+ // parameter. Pagination continues from the end of the previous results that
+ // returned the nextToken value. This value is null when there are no more results
+ // to return. This option cannot be used when you specify repositories with
+ // repositoryNames.
+ //
+ // This token should be treated as an opaque identifier that is only used to
+ // retrieve the next items in a list and not for other programmatic purposes.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The AWS account ID associated with the registry that contains the repositories
+ // to be described. If you do not specify a registry, the default registry is
+ // assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // A list of repositories to describe. If this parameter is omitted, then all
+ // repositories in a registry are described.
+ RepositoryNames []*string `locationName:"repositoryNames" min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeRepositoriesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeRepositoriesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DescribeRepositoriesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DescribeRepositoriesInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+ if s.RepositoryNames != nil && len(s.RepositoryNames) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryNames", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *DescribeRepositoriesInput) SetMaxResults(v int64) *DescribeRepositoriesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeRepositoriesInput) SetNextToken(v string) *DescribeRepositoriesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *DescribeRepositoriesInput) SetRegistryId(v string) *DescribeRepositoriesInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryNames sets the RepositoryNames field's value.
+func (s *DescribeRepositoriesInput) SetRepositoryNames(v []*string) *DescribeRepositoriesInput {
+ s.RepositoryNames = v
+ return s
+}
+
+type DescribeRepositoriesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The nextToken value to include in a future DescribeRepositories request.
+ // When the results of a DescribeRepositories request exceed maxResults, this
+ // value can be used to retrieve the next page of results. This value is null
+ // when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // A list of repository objects corresponding to valid repositories.
+ Repositories []*Repository `locationName:"repositories" type:"list"`
+}
+
+// String returns the string representation
+func (s DescribeRepositoriesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DescribeRepositoriesOutput) GoString() string {
+ return s.String()
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *DescribeRepositoriesOutput) SetNextToken(v string) *DescribeRepositoriesOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRepositories sets the Repositories field's value.
+func (s *DescribeRepositoriesOutput) SetRepositories(v []*Repository) *DescribeRepositoriesOutput {
+ s.Repositories = v
+ return s
+}
+
+type GetAuthorizationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of AWS account IDs that are associated with the registries for which
+ // to get authorization tokens. If you do not specify a registry, the default
+ // registry is assumed.
+ RegistryIds []*string `locationName:"registryIds" min:"1" type:"list"`
+}
+
+// String returns the string representation
+func (s GetAuthorizationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAuthorizationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetAuthorizationTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetAuthorizationTokenInput"}
+ if s.RegistryIds != nil && len(s.RegistryIds) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("RegistryIds", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegistryIds sets the RegistryIds field's value.
+func (s *GetAuthorizationTokenInput) SetRegistryIds(v []*string) *GetAuthorizationTokenInput {
+ s.RegistryIds = v
+ return s
+}
+
+type GetAuthorizationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of authorization token data objects that correspond to the registryIds
+ // values in the request.
+ AuthorizationData []*AuthorizationData `locationName:"authorizationData" type:"list"`
+}
+
+// String returns the string representation
+func (s GetAuthorizationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAuthorizationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetAuthorizationData sets the AuthorizationData field's value.
+func (s *GetAuthorizationTokenOutput) SetAuthorizationData(v []*AuthorizationData) *GetAuthorizationTokenOutput {
+ s.AuthorizationData = v
+ return s
+}
+
+type GetDownloadUrlForLayerInput struct {
+ _ struct{} `type:"structure"`
+
+ // The digest of the image layer to download.
+ //
+ // LayerDigest is a required field
+ LayerDigest *string `locationName:"layerDigest" type:"string" required:"true"`
+
+ // The AWS account ID associated with the registry that contains the image layer
+ // to download. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository that is associated with the image layer to download.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetDownloadUrlForLayerInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDownloadUrlForLayerInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetDownloadUrlForLayerInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetDownloadUrlForLayerInput"}
+ if s.LayerDigest == nil {
+ invalidParams.Add(request.NewErrParamRequired("LayerDigest"))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLayerDigest sets the LayerDigest field's value.
+func (s *GetDownloadUrlForLayerInput) SetLayerDigest(v string) *GetDownloadUrlForLayerInput {
+ s.LayerDigest = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetDownloadUrlForLayerInput) SetRegistryId(v string) *GetDownloadUrlForLayerInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetDownloadUrlForLayerInput) SetRepositoryName(v string) *GetDownloadUrlForLayerInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type GetDownloadUrlForLayerOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The pre-signed Amazon S3 download URL for the requested layer.
+ DownloadUrl *string `locationName:"downloadUrl" type:"string"`
+
+ // The digest of the image layer to download.
+ LayerDigest *string `locationName:"layerDigest" type:"string"`
+}
+
+// String returns the string representation
+func (s GetDownloadUrlForLayerOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetDownloadUrlForLayerOutput) GoString() string {
+ return s.String()
+}
+
+// SetDownloadUrl sets the DownloadUrl field's value.
+func (s *GetDownloadUrlForLayerOutput) SetDownloadUrl(v string) *GetDownloadUrlForLayerOutput {
+ s.DownloadUrl = &v
+ return s
+}
+
+// SetLayerDigest sets the LayerDigest field's value.
+func (s *GetDownloadUrlForLayerOutput) SetLayerDigest(v string) *GetDownloadUrlForLayerOutput {
+ s.LayerDigest = &v
+ return s
+}
+
+type GetLifecyclePolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetLifecyclePolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLifecyclePolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetLifecyclePolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetLifecyclePolicyInput) SetRegistryId(v string) *GetLifecyclePolicyInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetLifecyclePolicyInput) SetRepositoryName(v string) *GetLifecyclePolicyInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type GetLifecyclePolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The time stamp of the last time that the lifecycle policy was run.
+ LastEvaluatedAt *time.Time `locationName:"lastEvaluatedAt" type:"timestamp"`
+
+ // The JSON lifecycle policy text.
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s GetLifecyclePolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLifecyclePolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetLastEvaluatedAt sets the LastEvaluatedAt field's value.
+func (s *GetLifecyclePolicyOutput) SetLastEvaluatedAt(v time.Time) *GetLifecyclePolicyOutput {
+ s.LastEvaluatedAt = &v
+ return s
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *GetLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyOutput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetLifecyclePolicyOutput) SetRegistryId(v string) *GetLifecyclePolicyOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetLifecyclePolicyOutput) SetRepositoryName(v string) *GetLifecyclePolicyOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+type GetLifecyclePolicyPreviewInput struct {
+ _ struct{} `type:"structure"`
+
+ // An optional parameter that filters results based on image tag status and
+ // all tags, if tagged.
+ Filter *LifecyclePolicyPreviewFilter `locationName:"filter" type:"structure"`
+
+ // The list of imageIDs to be included.
+ ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
+
+ // The maximum number of repository results returned by GetLifecyclePolicyPreviewRequest
+ // in paginated output. When this parameter is used, GetLifecyclePolicyPreviewRequest
+ // only returns maxResults results in a single page along with a nextToken response
+ // element. The remaining results of the initial request can be seen by sending
+ // another GetLifecyclePolicyPreviewRequest request with the returned nextToken
+ // value. This value can be between 1 and 1000. If this parameter is not used,
+ // then GetLifecyclePolicyPreviewRequest returns up to 100 results and a nextToken
+ // value, if applicable. This option cannot be used when you specify images
+ // with imageIds.
+ MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
+
+ // The nextToken value returned from a previous paginated GetLifecyclePolicyPreviewRequest
+ // request where maxResults was used and the results exceeded the value of that
+ // parameter. Pagination continues from the end of the previous results that
+ // returned the nextToken value. This value is null when there are no more results
+ // to return. This option cannot be used when you specify images with imageIds.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetLifecyclePolicyPreviewInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLifecyclePolicyPreviewInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetLifecyclePolicyPreviewInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetLifecyclePolicyPreviewInput"}
+ if s.ImageIds != nil && len(s.ImageIds) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("ImageIds", 1))
+ }
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *GetLifecyclePolicyPreviewInput) SetFilter(v *LifecyclePolicyPreviewFilter) *GetLifecyclePolicyPreviewInput {
+ s.Filter = v
+ return s
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *GetLifecyclePolicyPreviewInput) SetImageIds(v []*ImageIdentifier) *GetLifecyclePolicyPreviewInput {
+ s.ImageIds = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *GetLifecyclePolicyPreviewInput) SetMaxResults(v int64) *GetLifecyclePolicyPreviewInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetLifecyclePolicyPreviewInput) SetNextToken(v string) *GetLifecyclePolicyPreviewInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetLifecyclePolicyPreviewInput) SetRegistryId(v string) *GetLifecyclePolicyPreviewInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetLifecyclePolicyPreviewInput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type GetLifecyclePolicyPreviewOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON lifecycle policy text.
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
+
+ // The nextToken value to include in a future GetLifecyclePolicyPreview request.
+ // When the results of a GetLifecyclePolicyPreview request exceed maxResults,
+ // this value can be used to retrieve the next page of results. This value is
+ // null when there are no more results to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The results of the lifecycle policy preview request.
+ PreviewResults []*LifecyclePolicyPreviewResult `locationName:"previewResults" type:"list"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+
+ // The status of the lifecycle policy preview request.
+ Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"`
+
+ // The list of images that is returned as a result of the action.
+ Summary *LifecyclePolicyPreviewSummary `locationName:"summary" type:"structure"`
+}
+
+// String returns the string representation
+func (s GetLifecyclePolicyPreviewOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetLifecyclePolicyPreviewOutput) GoString() string {
+ return s.String()
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *GetLifecyclePolicyPreviewOutput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetNextToken(v string) *GetLifecyclePolicyPreviewOutput {
+ s.NextToken = &v
+ return s
+}
+
+// SetPreviewResults sets the PreviewResults field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetPreviewResults(v []*LifecyclePolicyPreviewResult) *GetLifecyclePolicyPreviewOutput {
+ s.PreviewResults = v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetRegistryId(v string) *GetLifecyclePolicyPreviewOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *GetLifecyclePolicyPreviewOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetStatus(v string) *GetLifecyclePolicyPreviewOutput {
+ s.Status = &v
+ return s
+}
+
+// SetSummary sets the Summary field's value.
+func (s *GetLifecyclePolicyPreviewOutput) SetSummary(v *LifecyclePolicyPreviewSummary) *GetLifecyclePolicyPreviewOutput {
+ s.Summary = v
+ return s
+}
+
+type GetRepositoryPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository with the policy to retrieve.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetRepositoryPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRepositoryPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetRepositoryPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetRepositoryPolicyInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetRepositoryPolicyInput) SetRegistryId(v string) *GetRepositoryPolicyInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetRepositoryPolicyInput) SetRepositoryName(v string) *GetRepositoryPolicyInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type GetRepositoryPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON repository policy text associated with the repository.
+ PolicyText *string `locationName:"policyText" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s GetRepositoryPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetRepositoryPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyText sets the PolicyText field's value.
+func (s *GetRepositoryPolicyOutput) SetPolicyText(v string) *GetRepositoryPolicyOutput {
+ s.PolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *GetRepositoryPolicyOutput) SetRegistryId(v string) *GetRepositoryPolicyOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *GetRepositoryPolicyOutput) SetRepositoryName(v string) *GetRepositoryPolicyOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// An object representing an Amazon ECR image.
+type Image struct {
+ _ struct{} `type:"structure"`
+
+ // An object containing the image tag and image digest associated with an image.
+ ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
+
+ // The image manifest associated with the image.
+ ImageManifest *string `locationName:"imageManifest" type:"string"`
+
+ // The AWS account ID associated with the registry containing the image.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository associated with the image.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s Image) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Image) GoString() string {
+ return s.String()
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *Image) SetImageId(v *ImageIdentifier) *Image {
+ s.ImageId = v
+ return s
+}
+
+// SetImageManifest sets the ImageManifest field's value.
+func (s *Image) SetImageManifest(v string) *Image {
+ s.ImageManifest = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *Image) SetRegistryId(v string) *Image {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *Image) SetRepositoryName(v string) *Image {
+ s.RepositoryName = &v
+ return s
+}
+
+// An object that describes an image returned by a DescribeImages operation.
+type ImageDetail struct {
+ _ struct{} `type:"structure"`
+
+ // The sha256 digest of the image manifest.
+ ImageDigest *string `locationName:"imageDigest" type:"string"`
+
+ // The date and time, expressed in standard JavaScript date format, at which
+ // the current image was pushed to the repository.
+ ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"`
+
+ // The size, in bytes, of the image in the repository.
+ //
+ // Beginning with Docker version 1.9, the Docker client compresses image layers
+ // before pushing them to a V2 Docker registry. The output of the docker images
+ // command shows the uncompressed image size, so it may return a larger image
+ // size than the image sizes returned by DescribeImages.
+ ImageSizeInBytes *int64 `locationName:"imageSizeInBytes" type:"long"`
+
+ // The list of tags associated with this image.
+ ImageTags []*string `locationName:"imageTags" type:"list"`
+
+ // The AWS account ID associated with the registry to which this image belongs.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to which this image belongs.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s ImageDetail) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ImageDetail) GoString() string {
+ return s.String()
+}
+
+// SetImageDigest sets the ImageDigest field's value.
+func (s *ImageDetail) SetImageDigest(v string) *ImageDetail {
+ s.ImageDigest = &v
+ return s
+}
+
+// SetImagePushedAt sets the ImagePushedAt field's value.
+func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail {
+ s.ImagePushedAt = &v
+ return s
+}
+
+// SetImageSizeInBytes sets the ImageSizeInBytes field's value.
+func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail {
+ s.ImageSizeInBytes = &v
+ return s
+}
+
+// SetImageTags sets the ImageTags field's value.
+func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail {
+ s.ImageTags = v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *ImageDetail) SetRegistryId(v string) *ImageDetail {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail {
+ s.RepositoryName = &v
+ return s
+}
+
+// An object representing an Amazon ECR image failure.
+type ImageFailure struct {
+ _ struct{} `type:"structure"`
+
+ // The code associated with the failure.
+ FailureCode *string `locationName:"failureCode" type:"string" enum:"ImageFailureCode"`
+
+ // The reason for the failure.
+ FailureReason *string `locationName:"failureReason" type:"string"`
+
+ // The image ID associated with the failure.
+ ImageId *ImageIdentifier `locationName:"imageId" type:"structure"`
+}
+
+// String returns the string representation
+func (s ImageFailure) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ImageFailure) GoString() string {
+ return s.String()
+}
+
+// SetFailureCode sets the FailureCode field's value.
+func (s *ImageFailure) SetFailureCode(v string) *ImageFailure {
+ s.FailureCode = &v
+ return s
+}
+
+// SetFailureReason sets the FailureReason field's value.
+func (s *ImageFailure) SetFailureReason(v string) *ImageFailure {
+ s.FailureReason = &v
+ return s
+}
+
+// SetImageId sets the ImageId field's value.
+func (s *ImageFailure) SetImageId(v *ImageIdentifier) *ImageFailure {
+ s.ImageId = v
+ return s
+}
+
+// An object with identifying information for an Amazon ECR image.
+type ImageIdentifier struct {
+ _ struct{} `type:"structure"`
+
+ // The sha256 digest of the image manifest.
+ ImageDigest *string `locationName:"imageDigest" type:"string"`
+
+ // The tag used for the image.
+ ImageTag *string `locationName:"imageTag" type:"string"`
+}
+
+// String returns the string representation
+func (s ImageIdentifier) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ImageIdentifier) GoString() string {
+ return s.String()
+}
+
+// SetImageDigest sets the ImageDigest field's value.
+func (s *ImageIdentifier) SetImageDigest(v string) *ImageIdentifier {
+ s.ImageDigest = &v
+ return s
+}
+
+// SetImageTag sets the ImageTag field's value.
+func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier {
+ s.ImageTag = &v
+ return s
+}
+
+type InitiateLayerUploadInput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID associated with the registry to which you intend to upload
+ // layers. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to which you intend to upload layers.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s InitiateLayerUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InitiateLayerUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InitiateLayerUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "InitiateLayerUploadInput"}
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *InitiateLayerUploadInput) SetRegistryId(v string) *InitiateLayerUploadInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *InitiateLayerUploadInput) SetRepositoryName(v string) *InitiateLayerUploadInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type InitiateLayerUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The size, in bytes, that Amazon ECR expects future layer part uploads to
+ // be.
+ PartSize *int64 `locationName:"partSize" type:"long"`
+
+ // The upload ID for the layer upload. This parameter is passed to further UploadLayerPart
+ // and CompleteLayerUpload operations.
+ UploadId *string `locationName:"uploadId" type:"string"`
+}
+
+// String returns the string representation
+func (s InitiateLayerUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InitiateLayerUploadOutput) GoString() string {
+ return s.String()
+}
+
+// SetPartSize sets the PartSize field's value.
+func (s *InitiateLayerUploadOutput) SetPartSize(v int64) *InitiateLayerUploadOutput {
+ s.PartSize = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *InitiateLayerUploadOutput) SetUploadId(v string) *InitiateLayerUploadOutput {
+ s.UploadId = &v
+ return s
+}
+
+// An object representing an Amazon ECR image layer.
+type Layer struct {
+ _ struct{} `type:"structure"`
+
+ // The availability status of the image layer.
+ LayerAvailability *string `locationName:"layerAvailability" type:"string" enum:"LayerAvailability"`
+
+ // The sha256 digest of the image layer.
+ LayerDigest *string `locationName:"layerDigest" type:"string"`
+
+ // The size, in bytes, of the image layer.
+ LayerSize *int64 `locationName:"layerSize" type:"long"`
+
+ // The media type of the layer, such as application/vnd.docker.image.rootfs.diff.tar.gzip
+ // or application/vnd.oci.image.layer.v1.tar+gzip.
+ MediaType *string `locationName:"mediaType" type:"string"`
+}
+
+// String returns the string representation
+func (s Layer) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Layer) GoString() string {
+ return s.String()
+}
+
+// SetLayerAvailability sets the LayerAvailability field's value.
+func (s *Layer) SetLayerAvailability(v string) *Layer {
+ s.LayerAvailability = &v
+ return s
+}
+
+// SetLayerDigest sets the LayerDigest field's value.
+func (s *Layer) SetLayerDigest(v string) *Layer {
+ s.LayerDigest = &v
+ return s
+}
+
+// SetLayerSize sets the LayerSize field's value.
+func (s *Layer) SetLayerSize(v int64) *Layer {
+ s.LayerSize = &v
+ return s
+}
+
+// SetMediaType sets the MediaType field's value.
+func (s *Layer) SetMediaType(v string) *Layer {
+ s.MediaType = &v
+ return s
+}
+
+// An object representing an Amazon ECR image layer failure.
+type LayerFailure struct {
+ _ struct{} `type:"structure"`
+
+ // The failure code associated with the failure.
+ FailureCode *string `locationName:"failureCode" type:"string" enum:"LayerFailureCode"`
+
+ // The reason for the failure.
+ FailureReason *string `locationName:"failureReason" type:"string"`
+
+ // The layer digest associated with the failure.
+ LayerDigest *string `locationName:"layerDigest" type:"string"`
+}
+
+// String returns the string representation
+func (s LayerFailure) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LayerFailure) GoString() string {
+ return s.String()
+}
+
+// SetFailureCode sets the FailureCode field's value.
+func (s *LayerFailure) SetFailureCode(v string) *LayerFailure {
+ s.FailureCode = &v
+ return s
+}
+
+// SetFailureReason sets the FailureReason field's value.
+func (s *LayerFailure) SetFailureReason(v string) *LayerFailure {
+ s.FailureReason = &v
+ return s
+}
+
+// SetLayerDigest sets the LayerDigest field's value.
+func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure {
+ s.LayerDigest = &v
+ return s
+}
+
+// The filter for the lifecycle policy preview.
+type LifecyclePolicyPreviewFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The tag status of the image.
+ TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"`
+}
+
+// String returns the string representation
+func (s LifecyclePolicyPreviewFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecyclePolicyPreviewFilter) GoString() string {
+ return s.String()
+}
+
+// SetTagStatus sets the TagStatus field's value.
+func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPreviewFilter {
+ s.TagStatus = &v
+ return s
+}
+
+// The result of the lifecycle policy preview.
+type LifecyclePolicyPreviewResult struct {
+ _ struct{} `type:"structure"`
+
+ // The type of action to be taken.
+ Action *LifecyclePolicyRuleAction `locationName:"action" type:"structure"`
+
+ // The priority of the applied rule.
+ AppliedRulePriority *int64 `locationName:"appliedRulePriority" min:"1" type:"integer"`
+
+ // The sha256 digest of the image manifest.
+ ImageDigest *string `locationName:"imageDigest" type:"string"`
+
+ // The date and time, expressed in standard JavaScript date format, at which
+ // the current image was pushed to the repository.
+ ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"`
+
+ // The list of tags associated with this image.
+ ImageTags []*string `locationName:"imageTags" type:"list"`
+}
+
+// String returns the string representation
+func (s LifecyclePolicyPreviewResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecyclePolicyPreviewResult) GoString() string {
+ return s.String()
+}
+
+// SetAction sets the Action field's value.
+func (s *LifecyclePolicyPreviewResult) SetAction(v *LifecyclePolicyRuleAction) *LifecyclePolicyPreviewResult {
+ s.Action = v
+ return s
+}
+
+// SetAppliedRulePriority sets the AppliedRulePriority field's value.
+func (s *LifecyclePolicyPreviewResult) SetAppliedRulePriority(v int64) *LifecyclePolicyPreviewResult {
+ s.AppliedRulePriority = &v
+ return s
+}
+
+// SetImageDigest sets the ImageDigest field's value.
+func (s *LifecyclePolicyPreviewResult) SetImageDigest(v string) *LifecyclePolicyPreviewResult {
+ s.ImageDigest = &v
+ return s
+}
+
+// SetImagePushedAt sets the ImagePushedAt field's value.
+func (s *LifecyclePolicyPreviewResult) SetImagePushedAt(v time.Time) *LifecyclePolicyPreviewResult {
+ s.ImagePushedAt = &v
+ return s
+}
+
+// SetImageTags sets the ImageTags field's value.
+func (s *LifecyclePolicyPreviewResult) SetImageTags(v []*string) *LifecyclePolicyPreviewResult {
+ s.ImageTags = v
+ return s
+}
+
+// The summary of the lifecycle policy preview request.
+type LifecyclePolicyPreviewSummary struct {
+ _ struct{} `type:"structure"`
+
+ // The number of expiring images.
+ ExpiringImageTotalCount *int64 `locationName:"expiringImageTotalCount" type:"integer"`
+}
+
+// String returns the string representation
+func (s LifecyclePolicyPreviewSummary) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecyclePolicyPreviewSummary) GoString() string {
+ return s.String()
+}
+
+// SetExpiringImageTotalCount sets the ExpiringImageTotalCount field's value.
+func (s *LifecyclePolicyPreviewSummary) SetExpiringImageTotalCount(v int64) *LifecyclePolicyPreviewSummary {
+ s.ExpiringImageTotalCount = &v
+ return s
+}
+
+// The type of action to be taken.
+type LifecyclePolicyRuleAction struct {
+ _ struct{} `type:"structure"`
+
+ // The type of action to be taken.
+ Type *string `locationName:"type" type:"string" enum:"ImageActionType"`
+}
+
+// String returns the string representation
+func (s LifecyclePolicyRuleAction) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecyclePolicyRuleAction) GoString() string {
+ return s.String()
+}
+
+// SetType sets the Type field's value.
+func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction {
+ s.Type = &v
+ return s
+}
+
+// An object representing a filter on a ListImages operation.
+type ListImagesFilter struct {
+ _ struct{} `type:"structure"`
+
+ // The tag status with which to filter your ListImages results. You can filter
+ // results based on whether they are TAGGED or UNTAGGED.
+ TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"`
+}
+
+// String returns the string representation
+func (s ListImagesFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListImagesFilter) GoString() string {
+ return s.String()
+}
+
+// SetTagStatus sets the TagStatus field's value.
+func (s *ListImagesFilter) SetTagStatus(v string) *ListImagesFilter {
+ s.TagStatus = &v
+ return s
+}
+
+type ListImagesInput struct {
+ _ struct{} `type:"structure"`
+
+ // The filter key and value with which to filter your ListImages results.
+ Filter *ListImagesFilter `locationName:"filter" type:"structure"`
+
+ // The maximum number of image results returned by ListImages in paginated output.
+ // When this parameter is used, ListImages only returns maxResults results in
+ // a single page along with a nextToken response element. The remaining results
+ // of the initial request can be seen by sending another ListImages request
+ // with the returned nextToken value. This value can be between 1 and 1000.
+ // If this parameter is not used, then ListImages returns up to 100 results
+ // and a nextToken value, if applicable.
+ MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"`
+
+ // The nextToken value returned from a previous paginated ListImages request
+ // where maxResults was used and the results exceeded the value of that parameter.
+ // Pagination continues from the end of the previous results that returned the
+ // nextToken value. This value is null when there are no more results to return.
+ //
+ // This token should be treated as an opaque identifier that is only used to
+ // retrieve the next items in a list and not for other programmatic purposes.
+ NextToken *string `locationName:"nextToken" type:"string"`
+
+ // The AWS account ID associated with the registry that contains the repository
+ // in which to list images. If you do not specify a registry, the default registry
+ // is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository with image IDs to be listed.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListImagesInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListImagesInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListImagesInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListImagesInput"}
+ if s.MaxResults != nil && *s.MaxResults < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *ListImagesInput) SetFilter(v *ListImagesFilter) *ListImagesInput {
+ s.Filter = v
+ return s
+}
+
+// SetMaxResults sets the MaxResults field's value.
+func (s *ListImagesInput) SetMaxResults(v int64) *ListImagesInput {
+ s.MaxResults = &v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListImagesInput) SetNextToken(v string) *ListImagesInput {
+ s.NextToken = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *ListImagesInput) SetRegistryId(v string) *ListImagesInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *ListImagesInput) SetRepositoryName(v string) *ListImagesInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type ListImagesOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The list of image IDs for the requested repository.
+ ImageIds []*ImageIdentifier `locationName:"imageIds" min:"1" type:"list"`
+
+ // The nextToken value to include in a future ListImages request. When the results
+ // of a ListImages request exceed maxResults, this value can be used to retrieve
+ // the next page of results. This value is null when there are no more results
+ // to return.
+ NextToken *string `locationName:"nextToken" type:"string"`
+}
+
+// String returns the string representation
+func (s ListImagesOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListImagesOutput) GoString() string {
+ return s.String()
+}
+
+// SetImageIds sets the ImageIds field's value.
+func (s *ListImagesOutput) SetImageIds(v []*ImageIdentifier) *ListImagesOutput {
+ s.ImageIds = v
+ return s
+}
+
+// SetNextToken sets the NextToken field's value.
+func (s *ListImagesOutput) SetNextToken(v string) *ListImagesOutput {
+ s.NextToken = &v
+ return s
+}
+
+type ListTagsForResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) that identifies the resource for which to
+ // list the tags. Currently, the only supported resource is an Amazon ECR repository.
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListTagsForResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTagsForResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListTagsForResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"}
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+type ListTagsForResourceOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The tags for the resource.
+ Tags []*Tag `locationName:"tags" type:"list"`
+}
+
+// String returns the string representation
+func (s ListTagsForResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListTagsForResourceOutput) GoString() string {
+ return s.String()
+}
+
+// SetTags sets the Tags field's value.
+func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput {
+ s.Tags = v
+ return s
+}
+
+type PutImageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The image manifest corresponding to the image to be uploaded.
+ //
+ // ImageManifest is a required field
+ ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"`
+
+ // The tag to associate with the image. This parameter is required for images
+ // that use the Docker Image Manifest V2 Schema 2 or OCI formats.
+ ImageTag *string `locationName:"imageTag" type:"string"`
+
+ // The AWS account ID associated with the registry that contains the repository
+ // in which to put the image. If you do not specify a registry, the default
+ // registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository in which to put the image.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutImageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutImageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutImageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutImageInput"}
+ if s.ImageManifest == nil {
+ invalidParams.Add(request.NewErrParamRequired("ImageManifest"))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetImageManifest sets the ImageManifest field's value.
+func (s *PutImageInput) SetImageManifest(v string) *PutImageInput {
+ s.ImageManifest = &v
+ return s
+}
+
+// SetImageTag sets the ImageTag field's value.
+func (s *PutImageInput) SetImageTag(v string) *PutImageInput {
+ s.ImageTag = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *PutImageInput) SetRegistryId(v string) *PutImageInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *PutImageInput) SetRepositoryName(v string) *PutImageInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type PutImageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Details of the image uploaded.
+ Image *Image `locationName:"image" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutImageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutImageOutput) GoString() string {
+ return s.String()
+}
+
+// SetImage sets the Image field's value.
+func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput {
+ s.Image = v
+ return s
+}
+
+type PutLifecyclePolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON repository policy text to apply to the repository.
+ //
+ // LifecyclePolicyText is a required field
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string" required:"true"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to receive the policy.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutLifecyclePolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutLifecyclePolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutLifecyclePolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutLifecyclePolicyInput"}
+ if s.LifecyclePolicyText == nil {
+ invalidParams.Add(request.NewErrParamRequired("LifecyclePolicyText"))
+ }
+ if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 {
+ invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *PutLifecyclePolicyInput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyInput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *PutLifecyclePolicyInput) SetRegistryId(v string) *PutLifecyclePolicyInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *PutLifecyclePolicyInput) SetRepositoryName(v string) *PutLifecyclePolicyInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type PutLifecyclePolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON repository policy text.
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s PutLifecyclePolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutLifecyclePolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *PutLifecyclePolicyOutput) SetLifecyclePolicyText(v string) *PutLifecyclePolicyOutput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *PutLifecyclePolicyOutput) SetRegistryId(v string) *PutLifecyclePolicyOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePolicyOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// An object representing a repository.
+type Repository struct {
+ _ struct{} `type:"structure"`
+
+ // The date and time, in JavaScript date format, when the repository was created.
+ CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The Amazon Resource Name (ARN) that identifies the repository. The ARN contains
+ // the arn:aws:ecr namespace, followed by the region of the repository, AWS
+ // account ID of the repository owner, repository namespace, and repository
+ // name. For example, arn:aws:ecr:region:012345678910:repository/test.
+ RepositoryArn *string `locationName:"repositoryArn" type:"string"`
+
+ // The name of the repository.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+
+ // The URI for the repository. You can use this URI for Docker push or pull
+ // operations.
+ RepositoryUri *string `locationName:"repositoryUri" type:"string"`
+}
+
+// String returns the string representation
+func (s Repository) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Repository) GoString() string {
+ return s.String()
+}
+
+// SetCreatedAt sets the CreatedAt field's value.
+func (s *Repository) SetCreatedAt(v time.Time) *Repository {
+ s.CreatedAt = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *Repository) SetRegistryId(v string) *Repository {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryArn sets the RepositoryArn field's value.
+func (s *Repository) SetRepositoryArn(v string) *Repository {
+ s.RepositoryArn = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *Repository) SetRepositoryName(v string) *Repository {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetRepositoryUri sets the RepositoryUri field's value.
+func (s *Repository) SetRepositoryUri(v string) *Repository {
+ s.RepositoryUri = &v
+ return s
+}
+
+type SetRepositoryPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ // If the policy you are attempting to set on a repository policy would prevent
+ // you from setting another policy in the future, you must force the SetRepositoryPolicy
+ // operation. This is intended to prevent accidental repository lock outs.
+ Force *bool `locationName:"force" type:"boolean"`
+
+ // The JSON repository policy text to apply to the repository.
+ //
+ // PolicyText is a required field
+ PolicyText *string `locationName:"policyText" type:"string" required:"true"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to receive the policy.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s SetRepositoryPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetRepositoryPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *SetRepositoryPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "SetRepositoryPolicyInput"}
+ if s.PolicyText == nil {
+ invalidParams.Add(request.NewErrParamRequired("PolicyText"))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetForce sets the Force field's value.
+func (s *SetRepositoryPolicyInput) SetForce(v bool) *SetRepositoryPolicyInput {
+ s.Force = &v
+ return s
+}
+
+// SetPolicyText sets the PolicyText field's value.
+func (s *SetRepositoryPolicyInput) SetPolicyText(v string) *SetRepositoryPolicyInput {
+ s.PolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *SetRepositoryPolicyInput) SetRegistryId(v string) *SetRepositoryPolicyInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *SetRepositoryPolicyInput) SetRepositoryName(v string) *SetRepositoryPolicyInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type SetRepositoryPolicyOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON repository policy text applied to the repository.
+ PolicyText *string `locationName:"policyText" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+}
+
+// String returns the string representation
+func (s SetRepositoryPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s SetRepositoryPolicyOutput) GoString() string {
+ return s.String()
+}
+
+// SetPolicyText sets the PolicyText field's value.
+func (s *SetRepositoryPolicyOutput) SetPolicyText(v string) *SetRepositoryPolicyOutput {
+ s.PolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *SetRepositoryPolicyOutput) SetRegistryId(v string) *SetRepositoryPolicyOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPolicyOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+type StartLifecyclePolicyPreviewInput struct {
+ _ struct{} `type:"structure"`
+
+ // The policy to be evaluated against. If you do not specify a policy, the current
+ // policy for the repository is used.
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
+
+ // The AWS account ID associated with the registry that contains the repository.
+ // If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to be evaluated.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s StartLifecyclePolicyPreviewInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StartLifecyclePolicyPreviewInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StartLifecyclePolicyPreviewInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "StartLifecyclePolicyPreviewInput"}
+ if s.LifecyclePolicyText != nil && len(*s.LifecyclePolicyText) < 100 {
+ invalidParams.Add(request.NewErrParamMinLen("LifecyclePolicyText", 100))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *StartLifecyclePolicyPreviewInput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewInput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *StartLifecyclePolicyPreviewInput) SetRegistryId(v string) *StartLifecyclePolicyPreviewInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *StartLifecyclePolicyPreviewInput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewInput {
+ s.RepositoryName = &v
+ return s
+}
+
+type StartLifecyclePolicyPreviewOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The JSON repository policy text.
+ LifecyclePolicyText *string `locationName:"lifecyclePolicyText" min:"100" type:"string"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+
+ // The status of the lifecycle policy preview request.
+ Status *string `locationName:"status" type:"string" enum:"LifecyclePolicyPreviewStatus"`
+}
+
+// String returns the string representation
+func (s StartLifecyclePolicyPreviewOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StartLifecyclePolicyPreviewOutput) GoString() string {
+ return s.String()
+}
+
+// SetLifecyclePolicyText sets the LifecyclePolicyText field's value.
+func (s *StartLifecyclePolicyPreviewOutput) SetLifecyclePolicyText(v string) *StartLifecyclePolicyPreviewOutput {
+ s.LifecyclePolicyText = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *StartLifecyclePolicyPreviewOutput) SetRegistryId(v string) *StartLifecyclePolicyPreviewOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *StartLifecyclePolicyPreviewOutput) SetRepositoryName(v string) *StartLifecyclePolicyPreviewOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *StartLifecyclePolicyPreviewOutput) SetStatus(v string) *StartLifecyclePolicyPreviewOutput {
+ s.Status = &v
+ return s
+}
+
+// The metadata that you apply to a resource to help you categorize and organize
+// them. Each tag consists of a key and an optional value, both of which you
+// define. Tag keys can have a maximum character length of 128 characters, and
+// tag values can have a maximum length of 256 characters.
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // One part of a key-value pair that make up a tag. A key is a general label
+ // that acts like a category for more specific tag values.
+ Key *string `type:"string"`
+
+ // The optional part of a key-value pair that make up a tag. A value acts as
+ // a descriptor within a tag category (key).
+ Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+ s.Key = &v
+ return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+ s.Value = &v
+ return s
+}
+
+type TagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the the resource to which to add tags.
+ // Currently, the only supported resource is an Amazon ECR repository.
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"`
+
+ // The tags to add to the resource. A tag is an array of key-value pairs. Tag
+ // keys can have a maximum character length of 128 characters, and tag values
+ // can have a maximum length of 256 characters.
+ //
+ // Tags is a required field
+ Tags []*Tag `locationName:"tags" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s TagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TagResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.Tags == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tags"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput {
+ s.Tags = v
+ return s
+}
+
+type TagResourceOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s TagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TagResourceOutput) GoString() string {
+ return s.String()
+}
+
+type UntagResourceInput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the resource from which to remove tags.
+ // Currently, the only supported resource is an Amazon ECR repository.
+ //
+ // ResourceArn is a required field
+ ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"`
+
+ // The keys of the tags to be removed.
+ //
+ // TagKeys is a required field
+ TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s UntagResourceInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UntagResourceInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
+ if s.ResourceArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
+ }
+ if s.TagKeys == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagKeys"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetResourceArn sets the ResourceArn field's value.
+func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
+ s.ResourceArn = &v
+ return s
+}
+
+// SetTagKeys sets the TagKeys field's value.
+func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
+ s.TagKeys = v
+ return s
+}
+
+type UntagResourceOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s UntagResourceOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UntagResourceOutput) GoString() string {
+ return s.String()
+}
+
+type UploadLayerPartInput struct {
+ _ struct{} `type:"structure"`
+
+ // The base64-encoded layer part payload.
+ //
+ // LayerPartBlob is automatically base64 encoded/decoded by the SDK.
+ //
+ // LayerPartBlob is a required field
+ LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"`
+
+ // The integer value of the first byte of the layer part.
+ //
+ // PartFirstByte is a required field
+ PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"`
+
+ // The integer value of the last byte of the layer part.
+ //
+ // PartLastByte is a required field
+ PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"`
+
+ // The AWS account ID associated with the registry to which you are uploading
+ // layer parts. If you do not specify a registry, the default registry is assumed.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The name of the repository to which you are uploading layer parts.
+ //
+ // RepositoryName is a required field
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"`
+
+ // The upload ID from a previous InitiateLayerUpload operation to associate
+ // with the layer part upload.
+ //
+ // UploadId is a required field
+ UploadId *string `locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadLayerPartInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadLayerPartInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadLayerPartInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadLayerPartInput"}
+ if s.LayerPartBlob == nil {
+ invalidParams.Add(request.NewErrParamRequired("LayerPartBlob"))
+ }
+ if s.PartFirstByte == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartFirstByte"))
+ }
+ if s.PartLastByte == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartLastByte"))
+ }
+ if s.RepositoryName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RepositoryName"))
+ }
+ if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetLayerPartBlob sets the LayerPartBlob field's value.
+func (s *UploadLayerPartInput) SetLayerPartBlob(v []byte) *UploadLayerPartInput {
+ s.LayerPartBlob = v
+ return s
+}
+
+// SetPartFirstByte sets the PartFirstByte field's value.
+func (s *UploadLayerPartInput) SetPartFirstByte(v int64) *UploadLayerPartInput {
+ s.PartFirstByte = &v
+ return s
+}
+
+// SetPartLastByte sets the PartLastByte field's value.
+func (s *UploadLayerPartInput) SetPartLastByte(v int64) *UploadLayerPartInput {
+ s.PartLastByte = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *UploadLayerPartInput) SetRegistryId(v string) *UploadLayerPartInput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *UploadLayerPartInput) SetRepositoryName(v string) *UploadLayerPartInput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadLayerPartInput) SetUploadId(v string) *UploadLayerPartInput {
+ s.UploadId = &v
+ return s
+}
+
+type UploadLayerPartOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The integer value of the last byte received in the request.
+ LastByteReceived *int64 `locationName:"lastByteReceived" type:"long"`
+
+ // The registry ID associated with the request.
+ RegistryId *string `locationName:"registryId" type:"string"`
+
+ // The repository name associated with the request.
+ RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
+
+ // The upload ID associated with the request.
+ UploadId *string `locationName:"uploadId" type:"string"`
+}
+
+// String returns the string representation
+func (s UploadLayerPartOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadLayerPartOutput) GoString() string {
+ return s.String()
+}
+
+// SetLastByteReceived sets the LastByteReceived field's value.
+func (s *UploadLayerPartOutput) SetLastByteReceived(v int64) *UploadLayerPartOutput {
+ s.LastByteReceived = &v
+ return s
+}
+
+// SetRegistryId sets the RegistryId field's value.
+func (s *UploadLayerPartOutput) SetRegistryId(v string) *UploadLayerPartOutput {
+ s.RegistryId = &v
+ return s
+}
+
+// SetRepositoryName sets the RepositoryName field's value.
+func (s *UploadLayerPartOutput) SetRepositoryName(v string) *UploadLayerPartOutput {
+ s.RepositoryName = &v
+ return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput {
+ s.UploadId = &v
+ return s
+}
+
+const (
+ // ImageActionTypeExpire is a ImageActionType enum value
+ ImageActionTypeExpire = "EXPIRE"
+)
+
+const (
+ // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value
+ ImageFailureCodeInvalidImageDigest = "InvalidImageDigest"
+
+ // ImageFailureCodeInvalidImageTag is a ImageFailureCode enum value
+ ImageFailureCodeInvalidImageTag = "InvalidImageTag"
+
+ // ImageFailureCodeImageTagDoesNotMatchDigest is a ImageFailureCode enum value
+ ImageFailureCodeImageTagDoesNotMatchDigest = "ImageTagDoesNotMatchDigest"
+
+ // ImageFailureCodeImageNotFound is a ImageFailureCode enum value
+ ImageFailureCodeImageNotFound = "ImageNotFound"
+
+ // ImageFailureCodeMissingDigestAndTag is a ImageFailureCode enum value
+ ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag"
+)
+
+const (
+ // LayerAvailabilityAvailable is a LayerAvailability enum value
+ LayerAvailabilityAvailable = "AVAILABLE"
+
+ // LayerAvailabilityUnavailable is a LayerAvailability enum value
+ LayerAvailabilityUnavailable = "UNAVAILABLE"
+)
+
+const (
+ // LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value
+ LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest"
+
+ // LayerFailureCodeMissingLayerDigest is a LayerFailureCode enum value
+ LayerFailureCodeMissingLayerDigest = "MissingLayerDigest"
+)
+
+const (
+ // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value
+ LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS"
+
+ // LifecyclePolicyPreviewStatusComplete is a LifecyclePolicyPreviewStatus enum value
+ LifecyclePolicyPreviewStatusComplete = "COMPLETE"
+
+ // LifecyclePolicyPreviewStatusExpired is a LifecyclePolicyPreviewStatus enum value
+ LifecyclePolicyPreviewStatusExpired = "EXPIRED"
+
+ // LifecyclePolicyPreviewStatusFailed is a LifecyclePolicyPreviewStatus enum value
+ LifecyclePolicyPreviewStatusFailed = "FAILED"
+)
+
+const (
+ // TagStatusTagged is a TagStatus enum value
+ TagStatusTagged = "TAGGED"
+
+ // TagStatusUntagged is a TagStatus enum value
+ TagStatusUntagged = "UNTAGGED"
+
+ // TagStatusAny is a TagStatus enum value
+ TagStatusAny = "ANY"
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go
new file mode 100644
index 000000000..d970974bc
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go
@@ -0,0 +1,33 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package ecr provides the client and types for making API
+// requests to Amazon EC2 Container Registry.
+//
+// Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry
+// service. Customers can use the familiar Docker CLI to push, pull, and manage
+// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon
+// ECR supports private Docker repositories with resource-based permissions
+// using IAM so that specific users or Amazon EC2 instances can access repositories
+// and images. Developers can use the Docker CLI to author and manage images.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service.
+//
+// See ecr package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/
+//
+// Using the Client
+//
+// To contact Amazon EC2 Container Registry with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon EC2 Container Registry client ECR for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/ecr/#New
+package ecr
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go
new file mode 100644
index 000000000..834905106
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go
@@ -0,0 +1,155 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package ecr
+
+const (
+
+ // ErrCodeEmptyUploadException for service response error code
+ // "EmptyUploadException".
+ //
+ // The specified layer upload does not contain any layer parts.
+ ErrCodeEmptyUploadException = "EmptyUploadException"
+
+ // ErrCodeImageAlreadyExistsException for service response error code
+ // "ImageAlreadyExistsException".
+ //
+ // The specified image has already been pushed, and there were no changes to
+ // the manifest or image tag after the last push.
+ ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException"
+
+ // ErrCodeImageNotFoundException for service response error code
+ // "ImageNotFoundException".
+ //
+ // The image requested does not exist in the specified repository.
+ ErrCodeImageNotFoundException = "ImageNotFoundException"
+
+ // ErrCodeInvalidLayerException for service response error code
+ // "InvalidLayerException".
+ //
+ // The layer digest calculation performed by Amazon ECR upon receipt of the
+ // image layer does not match the digest specified.
+ ErrCodeInvalidLayerException = "InvalidLayerException"
+
+ // ErrCodeInvalidLayerPartException for service response error code
+ // "InvalidLayerPartException".
+ //
+ // The layer part size is not valid, or the first byte specified is not consecutive
+ // to the last byte of a previous layer part upload.
+ ErrCodeInvalidLayerPartException = "InvalidLayerPartException"
+
+ // ErrCodeInvalidParameterException for service response error code
+ // "InvalidParameterException".
+ //
+ // The specified parameter is invalid. Review the available parameters for the
+ // API request.
+ ErrCodeInvalidParameterException = "InvalidParameterException"
+
+ // ErrCodeInvalidTagParameterException for service response error code
+ // "InvalidTagParameterException".
+ //
+ // An invalid parameter has been specified. Tag keys can have a maximum character
+ // length of 128 characters, and tag values can have a maximum length of 256
+ // characters.
+ ErrCodeInvalidTagParameterException = "InvalidTagParameterException"
+
+ // ErrCodeLayerAlreadyExistsException for service response error code
+ // "LayerAlreadyExistsException".
+ //
+ // The image layer already exists in the associated repository.
+ ErrCodeLayerAlreadyExistsException = "LayerAlreadyExistsException"
+
+ // ErrCodeLayerInaccessibleException for service response error code
+ // "LayerInaccessibleException".
+ //
+ // The specified layer is not available because it is not associated with an
+ // image. Unassociated image layers may be cleaned up at any time.
+ ErrCodeLayerInaccessibleException = "LayerInaccessibleException"
+
+ // ErrCodeLayerPartTooSmallException for service response error code
+ // "LayerPartTooSmallException".
+ //
+ // Layer parts must be at least 5 MiB in size.
+ ErrCodeLayerPartTooSmallException = "LayerPartTooSmallException"
+
+ // ErrCodeLayersNotFoundException for service response error code
+ // "LayersNotFoundException".
+ //
+ // The specified layers could not be found, or the specified layer is not valid
+ // for this repository.
+ ErrCodeLayersNotFoundException = "LayersNotFoundException"
+
+ // ErrCodeLifecyclePolicyNotFoundException for service response error code
+ // "LifecyclePolicyNotFoundException".
+ //
+ // The lifecycle policy could not be found, and no policy is set to the repository.
+ ErrCodeLifecyclePolicyNotFoundException = "LifecyclePolicyNotFoundException"
+
+ // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code
+ // "LifecyclePolicyPreviewInProgressException".
+ //
+ // The previous lifecycle policy preview request has not completed. Please try
+ // again later.
+ ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException"
+
+ // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code
+ // "LifecyclePolicyPreviewNotFoundException".
+ //
+ // There is no dry run for this repository.
+ ErrCodeLifecyclePolicyPreviewNotFoundException = "LifecyclePolicyPreviewNotFoundException"
+
+ // ErrCodeLimitExceededException for service response error code
+ // "LimitExceededException".
+ //
+ // The operation did not succeed because it would have exceeded a service limit
+ // for your account. For more information, see Amazon ECR Default Service Limits
+ // (http://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html)
+ // in the Amazon Elastic Container Registry User Guide.
+ ErrCodeLimitExceededException = "LimitExceededException"
+
+ // ErrCodeRepositoryAlreadyExistsException for service response error code
+ // "RepositoryAlreadyExistsException".
+ //
+ // The specified repository already exists in the specified registry.
+ ErrCodeRepositoryAlreadyExistsException = "RepositoryAlreadyExistsException"
+
+ // ErrCodeRepositoryNotEmptyException for service response error code
+ // "RepositoryNotEmptyException".
+ //
+ // The specified repository contains images. To delete a repository that contains
+ // images, you must force the deletion with the force parameter.
+ ErrCodeRepositoryNotEmptyException = "RepositoryNotEmptyException"
+
+ // ErrCodeRepositoryNotFoundException for service response error code
+ // "RepositoryNotFoundException".
+ //
+ // The specified repository could not be found. Check the spelling of the specified
+ // repository and ensure that you are performing operations on the correct registry.
+ ErrCodeRepositoryNotFoundException = "RepositoryNotFoundException"
+
+ // ErrCodeRepositoryPolicyNotFoundException for service response error code
+ // "RepositoryPolicyNotFoundException".
+ //
+ // The specified repository and registry combination does not have an associated
+ // repository policy.
+ ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException"
+
+ // ErrCodeServerException for service response error code
+ // "ServerException".
+ //
+ // These errors are usually caused by a server-side issue.
+ ErrCodeServerException = "ServerException"
+
+ // ErrCodeTooManyTagsException for service response error code
+ // "TooManyTagsException".
+ //
+ // The list of tags on the repository is over the limit. The maximum number
+ // of tags that can be applied to a repository is 50.
+ ErrCodeTooManyTagsException = "TooManyTagsException"
+
+ // ErrCodeUploadNotFoundException for service response error code
+ // "UploadNotFoundException".
+ //
+ // The upload could not be found, or the specified upload id is not valid for
+ // this repository.
+ ErrCodeUploadNotFoundException = "UploadNotFoundException"
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go
new file mode 100644
index 000000000..3eba7f696
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go
@@ -0,0 +1,100 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package ecr
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
+)
+
+// ECR provides the API operation methods for making requests to
+// Amazon EC2 Container Registry. See this package's package overview docs
+// for details on the service.
+//
+// ECR methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type ECR struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "ecr" // Name of service.
+ EndpointsID = "api.ecr" // ID to lookup a service endpoint with.
+ ServiceID = "ECR" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the ECR client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a ECR client from just a session.
+// svc := ecr.New(mySession)
+//
+// // Create a ECR client with additional configuration
+// svc := ecr.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ if c.SigningNameDerived || len(c.SigningName) == 0 {
+ c.SigningName = "ecr"
+ }
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECR {
+ svc := &ECR{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2015-09-21",
+ JSONVersion: "1.1",
+ TargetPrefix: "AmazonEC2ContainerRegistry_V20150921",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a ECR operation and runs any
+// custom request initialization.
+func (c *ECR) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644
index 000000000..9e610591a
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -0,0 +1,2580 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRole operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRole for more information on using the AssumeRole
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleRequest method.
+// req, resp := client.AssumeRoleRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+ op := &request.Operation{
+ Name: opAssumeRole,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleInput{}
+ }
+
+ output = &AssumeRoleOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// AssumeRole API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials that you can use to access
+// AWS resources that you might not normally have access to. These temporary
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use AssumeRole within your account or for cross-account
+// access. For a comparison of AssumeRole with other API operations that produce
+// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// You cannot use AWS account root user credentials to call AssumeRole. You
+// must use credentials for an IAM user or an IAM role to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account.
+// Then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
+// in the IAM User Guide.
+//
+// By default, the temporary security credentials created by AssumeRole last
+// for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: You cannot call
+// the AWS STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide.
+//
+// To assume a role from a different account, your AWS account must be trusted
+// by the role. The trust relationship is defined in the role's trust policy
+// when the role is created. That trust policy states which accounts are allowed
+// to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have permissions
+// that are delegated from the user account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN
+// of the role in the other account. If the user is in the same account as the
+// role, then you can do either of the following:
+//
+// * Attach a policy to the user (identical to the previous user in a different
+// account).
+//
+// * Add the user as a principal directly in the role's trust policy.
+//
+// In this case, the trust policy acts as an IAM resource-based policy. Users
+// in the same account as the role do not need explicit permission to assume
+// the role. For more information about trust policies and resource-based policies,
+// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
+// in the IAM User Guide.
+//
+// Using MFA with AssumeRole
+//
+// (Optional) You can include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios to ensure
+// that the user that assumes the role has been authenticated with an AWS MFA
+// device. In that scenario, the trust policy of the role being assumed includes
+// a condition that tests for MFA authentication. If the caller does not include
+// valid MFA information, the request to assume the role is denied. The condition
+// in a trust policy that tests for MFA authentication might look like the following
+// example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA device produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithSAML operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithSAMLRequest method.
+// req, resp := client.AssumeRoleWithSAMLRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithSAML,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithSAMLInput{}
+ }
+
+ output = &AssumeRoleWithSAMLOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other API operations that produce temporary credentials, see Requesting
+// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
+// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
+// duration setting for the role. This setting can have a value from 1 hour
+// to 12 hours. To learn how to view the maximum value for your role, see View
+// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider. You must
+// also create an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the IAM User Guide.
+//
+// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the IAM User Guide.
+//
+// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the IAM User Guide.
+//
+// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+ op := &request.Operation{
+ Name: opAssumeRoleWithWebIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &AssumeRoleWithWebIdentityInput{}
+ }
+
+ output = &AssumeRoleWithWebIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ req.Config.Credentials = credentials.AnonymousCredentials
+ return
+}
+
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider. Example providers
+// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID
+// Connect-compatible identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
+// to uniquely identify a user. You can also supply the user with a consistent
+// identity throughout the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application. You also don't need to deploy
+// server-based proxy services that use long-term AWS credentials. Instead,
+// the identity of the caller is validated by using a token from the web identity
+// provider. For a comparison of AssumeRoleWithWebIdentity with the other API
+// operations that produce temporary credentials, see Requesting Temporary Security
+// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service API operations.
+//
+// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. You can provide a value from 900
+// seconds (15 minutes) up to the maximum session duration setting for the role.
+// This setting can have a value from 1 hour to 12 hours. To learn how to view
+// the maximum value for your role, see View the Maximum Session Duration Setting
+// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// Walk through the process of authenticating through Login with Amazon,
+// Facebook, or Google, getting temporary security credentials, and then
+// using those credentials to make a request to AWS.
+//
+// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and
+// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
+// These toolkits contain sample apps that show how to invoke the identity
+// providers, and then how to use the information from these providers to
+// get and use temporary security credentials.
+//
+// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
+// This article discusses web identity federation and shows an example of
+// how to use web identity federation to get access to content in Amazon
+// S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+// The identity provider (IdP) reported that authentication failed. This might
+// be because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it
+// can also mean that the claim has expired or has been explicitly revoked.
+//
+// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
+// The request could not be fulfilled because the non-AWS identity provider
+// (IDP) that was asked to verify the incoming identity token could not be reached.
+// This is often a transient error caused by network conditions. Retry the request
+// a limited number of times so that you don't exceed the request rate. If the
+// error persists, the non-AWS identity provider might be down or not responding.
+//
+// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+// The web identity token that was passed could not be validated by AWS. Get
+// a new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeExpiredTokenException "ExpiredTokenException"
+// The web identity token that was passed is expired or is not valid. Get a
+// new identity token from the identity provider and then retry the request.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DecodeAuthorizationMessage operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the DecodeAuthorizationMessageRequest method.
+// req, resp := client.DecodeAuthorizationMessageRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+ op := &request.Operation{
+ Name: opDecodeAuthorizationMessage,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &DecodeAuthorizationMessageInput{}
+ }
+
+ output = &DecodeAuthorizationMessageOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an operation that he
+// or she has requested, the request returns a Client.UnauthorizedOperation
+// response (an HTTP 403 response). Some AWS operations additionally return
+// an encoded message that can provide details about this authorization failure.
+//
+// Only certain AWS operations return an encoded authorization message. The
+// documentation for an individual operation indicates whether that operation
+// returns an encoded message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the operation
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+// * Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see Determining Whether
+// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the IAM User Guide.
+//
+// * The principal who made the request.
+//
+// * The requested action.
+//
+// * The requested resource.
+//
+// * The values of condition keys in the context of the user's request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
+// The error returned if the message passed to DecodeAuthorizationMessage was
+// invalid. This can happen if the token contains invalid characters, such as
+// linebreaks.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the GetCallerIdentity operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetCallerIdentity for more information on using the GetCallerIdentity
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetCallerIdentityRequest method.
+// req, resp := client.GetCallerIdentityRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
+ op := &request.Operation{
+ Name: opGetCallerIdentity,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetCallerIdentityInput{}
+ }
+
+ output = &GetCallerIdentityOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
+// Returns details about the IAM identity whose credentials are used to call
+// the API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetFederationToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetFederationToken for more information on using the GetFederationToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetFederationTokenRequest method.
+// req, resp := client.GetFederationTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+ op := &request.Operation{
+ Name: opGetFederationToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetFederationTokenInput{}
+ }
+
+ output = &GetFederationTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetFederationToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. You must
+// call the GetFederationToken operation using the long-term security credentials
+// of an IAM user. As a result, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other API operations that
+// produce temporary credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider. In this case, we recommend
+// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// You can also call GetFederationToken using the security credentials of an
+// AWS account root user, but we do not recommend it. Instead, we recommend
+// that you create an IAM user for the purpose of the proxy application. Then
+// attach a policy to the IAM user that limits federated users to only the actions
+// and resources that they need to access. For more information, see IAM Best
+// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// is 43,200 seconds (12 hours). Temporary credentials that are obtained by
+// using AWS account root user credentials have a maximum duration of 3,600
+// seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot use these credentials to call any IAM API operations.
+//
+// * You cannot call any STS API operations except GetCallerIdentity.
+//
+// Permissions
+//
+// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. The only exception
+// is when the credentials are used to access a resource that has a resource-based
+// policy that specifically references the federated user session in the Principal
+// element of the policy. When you pass session policies, the session permissions
+// are the intersection of the IAM user policies and the session policies that
+// you pass. This gives you a way to further restrict the permissions for a
+// federated user. You cannot use session policies to grant more permissions
+// than those that are defined in the permissions policy of the IAM user. For
+// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. For information about using GetFederationToken to
+// create temporary security credentials, see GetFederationToken—Federation
+// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+// The request was rejected because the policy document was malformed. The error
+// message describes the specific error.
+//
+// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+// The request was rejected because the policy document was too large. The error
+// message describes how big the policy document is, in packed form, as a percentage
+// of what the API allows.
+//
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetSessionToken operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetSessionToken for more information on using the GetSessionToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+// // Example sending a request using the GetSessionTokenRequest method.
+// req, resp := client.GetSessionTokenRequest(params)
+//
+// err := req.Send()
+// if err == nil { // resp is now filled
+// fmt.Println(resp)
+// }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+ op := &request.Operation{
+ Name: opGetSessionToken,
+ HTTPMethod: "POST",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &GetSessionTokenInput{}
+ }
+
+ output = &GetSessionTokenOutput{}
+ req = c.newRequest(op, input, output)
+ return
+}
+
+// GetSessionToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances.
+// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA
+// code that is associated with their MFA device. Using the temporary security
+// credentials that are returned from the call, IAM users can then make programmatic
+// calls to API operations that require MFA authentication. If you do not supply
+// a correct MFA code, then the API returns an access denied error. For a comparison
+// of GetSessionToken with the other API operations that produce temporary credentials,
+// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken operation must be called by using the long-term AWS security
+// credentials of the AWS account root user or an IAM user. Credentials that
+// are created by IAM users are valid for the duration that you specify. This
+// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600
+// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials
+// based on account credentials can range from 900 seconds (15 minutes) up to
+// 3,600 seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+// * You cannot call any IAM API operations unless MFA authentication information
+// is included in the request.
+//
+// * You cannot call any STS API except AssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with AWS account root user
+// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The credentials that are returned by GetSessionToken are based on permissions
+// associated with the user whose credentials were used to call the operation.
+// If GetSessionToken is called using AWS account root user credentials, the
+// temporary credentials have root user permissions. Similarly, if GetSessionToken
+// is called using the credentials of an IAM user, the temporary credentials
+// have the same permissions as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+// * ErrCodeRegionDisabledException "RegionDisabledException"
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see Activating
+// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
+}
+
+type AssumeRoleInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // A unique identifier that might be required when you assume a role in another
+ // account. If the administrator of the account to which the role belongs provided
+ // you with an external ID, then provide that value in the ExternalId parameter.
+ // This value can be any string, such as a passphrase or account number. A cross-account
+ // role is usually set up to trust everyone in an account. Therefore, the administrator
+ // of the trusting account might send an external ID to the administrator of
+ // the trusted account. That way, only someone with the ID can assume the role,
+ // rather than everyone in the account. For more information about the external
+ // ID, see How to Use an External ID When Granting Access to Your AWS Resources
+ // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ ExternalId *string `min:"2" type:"string"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use
+ // the role's temporary credentials in subsequent AWS API calls to access resources
+ // in the account that owns the role. You cannot use session policies to grant
+ // more permissions than those allowed by the identity-based policy of the role
+ // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as managed session policies. The policies must exist in the same account
+ // as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // The Amazon Resource Name (ARN) of the role to assume.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role
+ // is assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the
+ // account that owns the role. The role session name is also used in the ARN
+ // of the assumed role principal. This means that subsequent cross-account API
+ // requests that use the temporary security credentials will expose the role
+ // session name to the external account in their AWS CloudTrail logs.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy
+ // of the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as GAHT12345678)
+ // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if the trust policy of the role being
+ // assumed requires MFA (that is, if the policy includes a condition that tests
+ // for MFA). If the role being assumed requires MFA and if the TokenCode value
+ // is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+ s.ExternalId = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput {
+ s.PolicyArns = v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type AssumeRoleWithSAMLInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. Your role session lasts for
+ // the duration that you specify for the DurationSeconds parameter, or until
+ // the time specified in the SAML authentication response's SessionNotOnOrAfter
+ // value, whichever is shorter. You can provide a DurationSeconds value from
+ // 900 seconds (15 minutes) up to the maximum session duration setting for the
+ // role. This setting can have a value from 1 hour to 12 hours. If you specify
+ // a value higher than this setting, the operation fails. For example, if you
+ // specify a session duration of 12 hours, but your administrator set the maximum
+ // session duration to 6 hours, your operation fails. To learn how to view the
+ // maximum value for your role, see View the Maximum Session Duration Setting
+ // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use
+ // the role's temporary credentials in subsequent AWS API calls to access resources
+ // in the account that owns the role. You cannot use session policies to grant
+ // more permissions than those allowed by the identity-based policy of the role
+ // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as managed session policies. The policies must exist in the same account
+ // as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+ // the IdP.
+ //
+ // PrincipalArn is a required field
+ PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // The base-64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+ // in the IAM User Guide.
+ //
+ // SAMLAssertion is a required field
+ SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PrincipalArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
+ }
+ if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.SAMLAssertion == nil {
+ invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
+ }
+ if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput {
+ s.PolicyArns = v
+ return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+ s.PrincipalArn = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+ s.SAMLAssertion = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The identifiers for the temporary security credentials that the operation
+ // returns.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The value of the Recipient attribute of the SubjectConfirmationData element
+ // of the SAML assertion.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // The value of the Issuer element of the SAML assertion.
+ Issuer *string `type:"string"`
+
+ // A hash value based on the concatenation of the Issuer response value, the
+ // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+ // provider in IAM. The combination of NameQualifier and Subject can be used
+ // to uniquely identify a federated user.
+ //
+ // The following pseudocode shows how the hash value is calculated:
+ //
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+ // ) )
+ NameQualifier *string `type:"string"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The value of the NameID element in the Subject element of the SAML assertion.
+ Subject *string `type:"string"`
+
+ // The format of the name ID, as defined by the Format attribute in the NameID
+ // element of the SAML assertion. Typical examples of the format are transient
+ // or persistent.
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+ // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+ // is returned as transient. If the format includes any other prefix, the format
+ // is returned with no modifications.
+ SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+ s.Issuer = &v
+ return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+ s.NameQualifier = &v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+ s.Subject = &v
+ return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+ s.SubjectType = &v
+ return s
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, of the role session. The value can range from 900
+ // seconds (15 minutes) up to the maximum session duration setting for the role.
+ // This setting can have a value from 1 hour to 12 hours. If you specify a value
+ // higher than this setting, the operation fails. For example, if you specify
+ // a session duration of 12 hours, but your administrator set the maximum session
+ // duration to 6 hours, your operation fails. To learn how to view the maximum
+ // value for your role, see View the Maximum Session Duration Setting for a
+ // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+ // in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request
+ // to the federation endpoint for a console sign-in token takes a SessionDuration
+ // parameter that specifies the maximum length of the console session. For more
+ // information, see Creating a URL that Enables Federated Users to Access the
+ // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+ // in the IAM User Guide.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // This parameter is optional. Passing policies to this operation returns new
+ // temporary credentials. The resulting session's permissions are the intersection
+ // of the role's identity-based policy and the session policies. You can use
+ // the role's temporary credentials in subsequent AWS API calls to access resources
+ // in the account that owns the role. You cannot use session policies to grant
+ // more permissions than those allowed by the identity-based policy of the role
+ // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as managed session policies. The policies must exist in the same account
+ // as the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plain text that you use for both inline and managed session
+ // policies shouldn't exceed 2048 characters. For more information about ARNs,
+ // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's identity-based
+ // policy and the session policies. You can use the role's temporary credentials
+ // in subsequent AWS API calls to access resources in the account that owns
+ // the role. You cannot use session policies to grant more permissions than
+ // those allowed by the identity-based policy of the role that is being assumed.
+ // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+
+ // The fully qualified host component of the domain name of the identity provider.
+ //
+ // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+ // and graph.facebook.com are the only supported identity providers for OAuth
+ // 2.0 access tokens. Do not include URL schemes and port numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
+ ProviderId *string `min:"4" type:"string"`
+
+ // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+ //
+ // RoleArn is a required field
+ RoleArn *string `min:"20" type:"string" required:"true"`
+
+ // An identifier for the assumed role session. Typically, you pass the name
+ // or identifier that is associated with the user who is using your application.
+ // That way, the temporary security credentials that your application will use
+ // are associated with that user. This session name is included as part of the
+ // ARN and assumed role ID in the AssumedRoleUser response element.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // RoleSessionName is a required field
+ RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+ // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+ // the identity provider. Your application must get this token by authenticating
+ // the user who is using your application with a web identity provider before
+ // the application makes an AssumeRoleWithWebIdentity call.
+ //
+ // WebIdentityToken is a required field
+ WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
+ }
+ if s.RoleArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+ }
+ if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+ }
+ if s.RoleSessionName == nil {
+ invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+ }
+ if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+ }
+ if s.WebIdentityToken == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
+ }
+ if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+ invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput {
+ s.PolicyArns = v
+ return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+ s.ProviderId = &v
+ return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleArn = &v
+ return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+ s.RoleSessionName = &v
+ return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+ s.WebIdentityToken = &v
+ return s
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+ // that you can use to refer to the resulting temporary security credentials.
+ // For example, you can reference these credentials as a principal in a resource-based
+ // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+ // that you specified when you called AssumeRole.
+ AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+ // The intended audience (also known as client ID) of the web identity token.
+ // This is traditionally the client identifier issued to the application that
+ // requested the web identity token.
+ Audience *string `type:"string"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // A percentage value that indicates the size of the policy in packed form.
+ // The service rejects any policy with a packed size greater than 100 percent,
+ // which means the policy exceeded the allowed space.
+ PackedPolicySize *int64 `type:"integer"`
+
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed
+ // in the AssumeRoleWithWebIdentity request.
+ Provider *string `type:"string"`
+
+ // The unique user identifier that is returned by the identity provider. This
+ // identifier is associated with the WebIdentityToken that was submitted with
+ // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+ // the user and the application that acquired the WebIdentityToken (pairwise
+ // identifier). For OpenID Connect ID tokens, this field contains the value
+ // returned by the identity provider as the token's sub (Subject) claim.
+ SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+ s.AssumedRoleUser = v
+ return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Audience = &v
+ return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+ s.Provider = &v
+ return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+ s.SubjectFromWebIdentityToken = &v
+ return s
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN of the temporary security credentials that are returned from the
+ // AssumeRole action. For more information about ARNs and how to use them in
+ // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // A unique identifier that contains the role ID and the role session name of
+ // the role that is being assumed. The role ID is generated by AWS when the
+ // role is created.
+ //
+ // AssumedRoleId is a required field
+ AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+ s.Arn = &v
+ return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+ s.AssumedRoleId = &v
+ return s
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+ _ struct{} `type:"structure"`
+
+ // The access key ID that identifies the temporary security credentials.
+ //
+ // AccessKeyId is a required field
+ AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+ // The date on which the current credentials expire.
+ //
+ // Expiration is a required field
+ Expiration *time.Time `type:"timestamp" required:"true"`
+
+ // The secret access key that can be used to sign requests.
+ //
+ // SecretAccessKey is a required field
+ SecretAccessKey *string `type:"string" required:"true"`
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ //
+ // SessionToken is a required field
+ SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+ return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+ s.AccessKeyId = &v
+ return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+ s.Expiration = &v
+ return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+ s.SecretAccessKey = &v
+ return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+ s.SessionToken = &v
+ return s
+}
+
+type DecodeAuthorizationMessageInput struct {
+ _ struct{} `type:"structure"`
+
+ // The encoded message that was returned with the response.
+ //
+ // EncodedMessage is a required field
+ EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+ if s.EncodedMessage == nil {
+ invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
+ }
+ if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+ s.EncodedMessage = &v
+ return s
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+ _ struct{} `type:"structure"`
+
+ // An XML document that contains the decoded message.
+ DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+ return s.String()
+}
+
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+ s.DecodedMessage = &v
+ return s
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+ _ struct{} `type:"structure"`
+
+ // The ARN that specifies the federated user that is associated with the credentials.
+ // For more information about ARNs and how to use them in policies, see IAM
+ // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+ // in Using IAM.
+ //
+ // Arn is a required field
+ Arn *string `min:"20" type:"string" required:"true"`
+
+ // The string that identifies the federated user associated with the credentials,
+ // similar to the unique ID of an IAM user.
+ //
+ // FederatedUserId is a required field
+ FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+ return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+ s.Arn = &v
+ return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+ s.FederatedUserId = &v
+ return s
+}
+
+type GetCallerIdentityInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+ return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+type GetCallerIdentityOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The AWS account ID number of the account that owns or contains the calling
+ // entity.
+ Account *string `type:"string"`
+
+ // The AWS ARN associated with the calling entity.
+ Arn *string `min:"20" type:"string"`
+
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity that is making the call. The values returned are those listed
+ // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+ // found on the Policy Variables reference page in the IAM User Guide.
+ UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+ return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+ s.Account = &v
+ return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+ s.Arn = &v
+ return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+ s.UserId = &v
+ return s
+}
+
+type GetFederationTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
+ // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
+ // using AWS account root user credentials are restricted to a maximum of 3,600
+ // seconds (one hour). If the specified duration is longer than one hour, the
+ // session obtained by using root user credentials defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The name of the federated user. The name is used as an identifier for the
+ // temporary security credentials (such as Bob). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon
+ // S3 bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@-
+ //
+ // Name is a required field
+ Name *string `min:"2" type:"string" required:"true"`
+
+ // An IAM policy in JSON format that you want to use as an inline session policy.
+ //
+ // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // to this operation. You can pass a single JSON policy document to use as an
+ // inline session policy. You can also specify up to 10 managed policies to
+ // use as managed session policies.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions. The only exception
+ // is when the credentials are used to access a resource that has a resource-based
+ // policy that specifically references the federated user session in the Principal
+ // element of the policy.
+ //
+ // When you pass session policies, the session permissions are the intersection
+ // of the IAM user policies and the session policies that you pass. This gives
+ // you a way to further restrict the permissions for a federated user. You cannot
+ // use session policies to grant more permissions than those that are defined
+ // in the permissions policy of the IAM user. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The plain text that you use for both inline and managed session policies
+ // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+ // character from the space character to the end of the valid character list
+ // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+ // and carriage return (\u000D) characters.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ Policy *string `min:"1" type:"string"`
+
+ // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+ // to use as a managed session policy. The policies must exist in the same account
+ // as the IAM user that is requesting federated access.
+ //
+ // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // to this operation. You can pass a single JSON policy document to use as an
+ // inline session policy. You can also specify up to 10 managed policies to
+ // use as managed session policies. The plain text that you use for both inline
+ // and managed session policies shouldn't exceed 2048 characters. You can provide
+ // up to 10 managed policy ARNs. For more information about ARNs, see Amazon
+ // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions. The only exception
+ // is when the credentials are used to access a resource that has a resource-based
+ // policy that specifically references the federated user session in the Principal
+ // element of the policy.
+ //
+ // When you pass session policies, the session permissions are the intersection
+ // of the IAM user policies and the session policies that you pass. This gives
+ // you a way to further restrict the permissions for a federated user. You cannot
+ // use session policies to grant more permissions than those that are defined
+ // in the permissions policy of the IAM user. For more information, see Session
+ // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+ // in the IAM User Guide.
+ //
+ // The characters in this parameter count towards the 2048 character session
+ // policy guideline. However, an AWS conversion compresses the session policies
+ // into a packed binary format that has a separate limit. This is the enforced
+ // limit. The PackedPolicySize response element indicates by percentage how
+ // close the policy is to the upper size limit.
+ PolicyArns []*PolicyDescriptorType `type:"list"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.Name == nil {
+ invalidParams.Add(request.NewErrParamRequired("Name"))
+ }
+ if s.Name != nil && len(*s.Name) < 2 {
+ invalidParams.Add(request.NewErrParamMinLen("Name", 2))
+ }
+ if s.Policy != nil && len(*s.Policy) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+ }
+ if s.PolicyArns != nil {
+ for i, v := range s.PolicyArns {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+ s.Name = &v
+ return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+ s.Policy = &v
+ return s
+}
+
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput {
+ s.PolicyArns = v
+ return s
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+
+ // Identifiers for the federated user associated with the credentials (such
+ // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+ // can use the federated user's ARN in your resource-based policies, such as
+ // an Amazon S3 bucket policy.
+ FederatedUser *FederatedUser `type:"structure"`
+
+ // A percentage value indicating the size of the policy in packed form. The
+ // service rejects policies for which the packed size is greater than 100 percent
+ // of the allowed value.
+ PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+ s.FederatedUser = v
+ return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+ s.PackedPolicySize = &v
+ return s
+}
+
+type GetSessionTokenInput struct {
+ _ struct{} `type:"structure"`
+
+ // The duration, in seconds, that the credentials should remain valid. Acceptable
+ // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
+ // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions
+ // for AWS account owners are restricted to a maximum of 3,600 seconds (one
+ // hour). If the duration is longer than one hour, the session for AWS account
+ // owners defaults to one hour.
+ DurationSeconds *int64 `min:"900" type:"integer"`
+
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM
+ // user has a policy that requires MFA authentication. The value is either the
+ // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+ // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+ // You can find the device for an IAM user by going to the AWS Management Console
+ // and viewing the user's security credentials.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can
+ // also include underscores or any of the following characters: =,.@:/-
+ SerialNumber *string `min:"9" type:"string"`
+
+ // The value provided by the MFA device, if MFA is required. If any policy requires
+ // the IAM user to submit an MFA code, specify this value. If MFA authentication
+ // is required, the user must provide a code when requesting a set of temporary
+ // security credentials. A user who fails to provide the code receives an "access
+ // denied" response when requesting resources that require MFA authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
+ TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
+ if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+ invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+ }
+ if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+ invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+ }
+ if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+ invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+ s.DurationSeconds = &v
+ return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+ s.SerialNumber = &v
+ return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+ s.TokenCode = &v
+ return s
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed.
+ // We strongly recommend that you make no assumptions about the maximum size.
+ Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+ return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+ s.Credentials = v
+ return s
+}
+
+// A reference to the IAM managed policy that is passed as a session policy
+// for a role session or a federated user session.
+type PolicyDescriptorType struct {
+ _ struct{} `type:"structure"`
+
+ // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
+ // policy for the role. For more information about ARNs, see Amazon Resource
+ // Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
+ // in the AWS General Reference.
+ Arn *string `locationName:"arn" min:"20" type:"string"`
+}
+
+// String returns the string representation
+func (s PolicyDescriptorType) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PolicyDescriptorType) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PolicyDescriptorType) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"}
+ if s.Arn != nil && len(*s.Arn) < 20 {
+ invalidParams.Add(request.NewErrParamMinLen("Arn", 20))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetArn sets the Arn field's value.
+func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
+ s.Arn = &v
+ return s
+}
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644
index 000000000..fcb720dca
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,108 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// By default, AWS Security Token Service (STS) is available as a global service,
+// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
+// Global requests map to the US East (N. Virginia) region. AWS recommends using
+// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
+// build in redundancy, and increase session token validity. For more information,
+// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Most AWS Regions are enabled for operations in all AWS services by default.
+// Those Regions are automatically activated for use with AWS STS. Some Regions,
+// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
+// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
+// in the AWS General Reference. When you enable these AWS Regions, they are
+// automatically activated for use with AWS STS. You cannot activate the STS
+// endpoint for a Region that is disabled. Tokens that are valid in all AWS
+// Regions are longer than tokens that are valid in Regions that are enabled
+// by default. Changing this setting might affect existing systems where you
+// temporarily store tokens. For more information, see Managing Global Endpoint
+// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
+// in the IAM User Guide.
+//
+// After you activate a Region for use with AWS STS, you can direct AWS STS
+// API calls to that Region. AWS STS recommends that you provide both the Region
+// and endpoint when you make calls to a Regional endpoint. You can provide
+// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
+// Kong). In this case, the calls are directed to the STS Regional endpoint.
+// However, if you provide the Region alone for Regions enabled by default,
+// the calls are directed to the global endpoint of https://sts.amazonaws.com.
+//
+// To view the list of AWS STS endpoints and whether they are active by default,
+// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
+// in the IAM User Guide.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on.
+//
+// If you activate AWS STS endpoints in Regions other than the default global
+// endpoint, then you must also turn on CloudTrail logging in those Regions.
+// This is necessary to record any AWS STS API calls that are made in those
+// Regions. For more information, see Turning On CloudTrail in Additional Regions
+// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
+// in the AWS CloudTrail User Guide.
+//
+// AWS Security Token Service (STS) is a global service with a single endpoint
+// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
+// to a global service. However, because this endpoint is physically located
+// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
+// Region. CloudTrail does not write these logs to the US East (Ohio) Region
+// unless you choose to include global service logs in that Region. CloudTrail
+// writes calls to all Regional endpoints to their respective Regions. For example,
+// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
+// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
+// (Frankfurt) Region.
+//
+// To learn more about CloudTrail, including how to turn it on and find your
+// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To contact AWS Security Token Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+package sts
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/src/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100644
index 000000000..41ea09c35
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+ // ErrCodeExpiredTokenException for service response error code
+ // "ExpiredTokenException".
+ //
+ // The web identity token that was passed is expired or is not valid. Get a
+ // new identity token from the identity provider and then retry the request.
+ ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+ // ErrCodeIDPCommunicationErrorException for service response error code
+ // "IDPCommunicationError".
+ //
+ // The request could not be fulfilled because the non-AWS identity provider
+ // (IDP) that was asked to verify the incoming identity token could not be reached.
+ // This is often a transient error caused by network conditions. Retry the request
+ // a limited number of times so that you don't exceed the request rate. If the
+ // error persists, the non-AWS identity provider might be down or not responding.
+ ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+ // ErrCodeIDPRejectedClaimException for service response error code
+ // "IDPRejectedClaim".
+ //
+ // The identity provider (IdP) reported that authentication failed. This might
+ // be because the claim is invalid.
+ //
+ // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+ // can also mean that the claim has expired or has been explicitly revoked.
+ ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+ // ErrCodeInvalidAuthorizationMessageException for service response error code
+ // "InvalidAuthorizationMessageException".
+ //
+ // The error returned if the message passed to DecodeAuthorizationMessage was
+ // invalid. This can happen if the token contains invalid characters, such as
+ // linebreaks.
+ ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+ // ErrCodeInvalidIdentityTokenException for service response error code
+ // "InvalidIdentityToken".
+ //
+ // The web identity token that was passed could not be validated by AWS. Get
+ // a new identity token from the identity provider and then retry the request.
+ ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+ // ErrCodeMalformedPolicyDocumentException for service response error code
+ // "MalformedPolicyDocument".
+ //
+ // The request was rejected because the policy document was malformed. The error
+ // message describes the specific error.
+ ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+ // ErrCodePackedPolicyTooLargeException for service response error code
+ // "PackedPolicyTooLarge".
+ //
+ // The request was rejected because the policy document was too large. The error
+ // message describes how big the policy document is, in packed form, as a percentage
+ // of what the API allows.
+ ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+ // ErrCodeRegionDisabledException for service response error code
+ // "RegionDisabledException".
+ //
+ // STS is not activated in the requested region for the account that is being
+ // asked to generate credentials. The account administrator must use the IAM
+ // console to activate STS in that region. For more information, see Activating
+ // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+ // in the IAM User Guide.
+ ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/src/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/src/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644
index 000000000..185c914d1
--- /dev/null
+++ b/src/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -0,0 +1,95 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/signer/v4"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+ ServiceName = "sts" // Name of service.
+ EndpointsID = ServiceName // ID to lookup a service endpoint with.
+ ServiceID = "STS" // ServiceID is a unique identifer of a specific service.
+)
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a STS client from just a session.
+// svc := sts.New(mySession)
+//
+// // Create a STS client with additional configuration
+// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+ c := p.ClientConfig(EndpointsID, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
+ svc := &STS{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ ServiceID: ServiceID,
+ SigningName: signingName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2011-06-15",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+ svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/src/vendor/github.com/bmatcuk/doublestar/test/b/symlink-dir b/src/vendor/github.com/bmatcuk/doublestar/test/b/symlink-dir
deleted file mode 120000
index 4a075011f..000000000
--- a/src/vendor/github.com/bmatcuk/doublestar/test/b/symlink-dir
+++ /dev/null
@@ -1 +0,0 @@
-../axbxcxdxe/
\ No newline at end of file
diff --git a/src/vendor/github.com/bmatcuk/doublestar/test/broken-symlink b/src/vendor/github.com/bmatcuk/doublestar/test/broken-symlink
deleted file mode 120000
index 0b8ae1dd3..000000000
--- a/src/vendor/github.com/bmatcuk/doublestar/test/broken-symlink
+++ /dev/null
@@ -1 +0,0 @@
-/tmp/nonexistant-file-20160902155705
\ No newline at end of file
diff --git a/src/vendor/github.com/bmatcuk/doublestar/test/working-symlink b/src/vendor/github.com/bmatcuk/doublestar/test/working-symlink
deleted file mode 120000
index db89c972f..000000000
--- a/src/vendor/github.com/bmatcuk/doublestar/test/working-symlink
+++ /dev/null
@@ -1 +0,0 @@
-a/b
\ No newline at end of file
diff --git a/src/vendor/github.com/coreos/go-oidc/test b/src/vendor/github.com/coreos/go-oidc/test
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/davecgh/go-spew/LICENSE b/src/vendor/github.com/davecgh/go-spew/LICENSE
index c83641619..bc52e96f2 100644
--- a/src/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/src/vendor/github.com/davecgh/go-spew/LICENSE
@@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
diff --git a/src/vendor/github.com/davecgh/go-spew/spew/bypass.go b/src/vendor/github.com/davecgh/go-spew/spew/bypass.go
index 8a4a6589a..792994785 100644
--- a/src/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/src/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
-var (
- // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
- // internal reflect.Value fields. These values are valid before golang
- // commit ecccf07e7f9d which changed the format. The are also valid
- // after commit 82f48826c6c7 which changed the format again to mirror
- // the original format. Code in the init function updates these offsets
- // as necessary.
- offsetPtr = uintptr(ptrSize)
- offsetScalar = uintptr(0)
- offsetFlag = uintptr(ptrSize * 2)
+type flag uintptr
- // flagKindWidth and flagKindShift indicate various bits that the
- // reflect package uses internally to track kind information.
- //
- // flagRO indicates whether or not the value field of a reflect.Value is
- // read-only.
- //
- // flagIndir indicates whether the value field of a reflect.Value is
- // the actual data or a pointer to the data.
- //
- // These values are valid before golang commit 90a7c3c86944 which
- // changed their positions. Code in the init function updates these
- // flags as necessary.
- flagKindWidth = uintptr(5)
- flagKindShift = uintptr(flagKindWidth - 1)
- flagRO = uintptr(1 << 0)
- flagIndir = uintptr(1 << 1)
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
)
-func init() {
- // Older versions of reflect.Value stored small integers directly in the
- // ptr field (which is named val in the older versions). Versions
- // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
- // scalar for this purpose which unfortunately came before the flag
- // field, so the offset of the flag field is different for those
- // versions.
- //
- // This code constructs a new reflect.Value from a known small integer
- // and checks if the size of the reflect.Value struct indicates it has
- // the scalar field. When it does, the offsets are updated accordingly.
- vv := reflect.ValueOf(0xf00)
- if unsafe.Sizeof(vv) == (ptrSize * 4) {
- offsetScalar = ptrSize * 2
- offsetFlag = ptrSize * 3
- }
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
- // Commit 90a7c3c86944 changed the flag positions such that the low
- // order bits are the kind. This code extracts the kind from the flags
- // field and ensures it's the correct type. When it's not, the flag
- // order has been changed to the newer format, so the flags are updated
- // accordingly.
- upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
- upfv := *(*uintptr)(upf)
- flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) {
- flagKindShift = 0
- flagRO = 1 << 5
- flagIndir = 1 << 6
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
- // Commit adf9b30e5594 modified the flags to separate the
- // flagRO flag into two bits which specifies whether or not the
- // field is embedded. This causes flagIndir to move over a bit
- // and means that flagRO is the combination of either of the
- // original flagRO bit and the new bit.
- //
- // This code detects the change by extracting what used to be
- // the indirect bit to ensure it's set. When it's not, the flag
- // order has been changed to the newer format, so the flags are
- // updated accordingly.
- if upfv&flagIndir == 0 {
- flagRO = 3 << 5
- flagIndir = 1 << 7
- }
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
}
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
- indirects := 1
- vt := v.Type()
- upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
- rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
- if rvf&flagIndir != 0 {
- vt = reflect.PtrTo(v.Type())
- indirects++
- } else if offsetScalar != 0 {
- // The value is in the scalar field when it's not one of the
- // reference types.
- switch vt.Kind() {
- case reflect.Uintptr:
- case reflect.Chan:
- case reflect.Func:
- case reflect.Map:
- case reflect.Ptr:
- case reflect.UnsafePointer:
- default:
- upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
- offsetScalar)
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
}
}
-
- pv := reflect.NewAt(vt, upv)
- rv = pv
- for i := 0; i < indirects; i++ {
- rv = rv.Elem()
- }
- return rv
+ panic("reflect.Value read-only flag has changed semantics")
}
diff --git a/src/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/src/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index 1fe3cf3d5..205c28d68 100644
--- a/src/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/src/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe
+// +build js appengine safe disableunsafe !go1.4
package spew
diff --git a/src/vendor/github.com/davecgh/go-spew/spew/common.go b/src/vendor/github.com/davecgh/go-spew/spew/common.go
index 7c519ff47..1be8ce945 100644
--- a/src/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/src/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
diff --git a/src/vendor/github.com/davecgh/go-spew/spew/dump.go b/src/vendor/github.com/davecgh/go-spew/spew/dump.go
index df1d582a7..f78d89fc1 100644
--- a/src/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/src/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
- cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
- case nilFound == true:
+ case nilFound:
d.w.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
d.w.Write(circularBytes)
default:
diff --git a/src/vendor/github.com/davecgh/go-spew/spew/format.go b/src/vendor/github.com/davecgh/go-spew/spew/format.go
index c49875bac..b04edb7d7 100644
--- a/src/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/src/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
- case nilFound == true:
+ case nilFound:
f.fs.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
f.fs.Write(circularShortBytes)
default:
diff --git a/src/vendor/github.com/dghubble/sling/test b/src/vendor/github.com/dghubble/sling/test
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/src/vendor/github.com/dgrijalva/jwt-go/.travis.yml
index bde823d8a..1027f56cd 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/.travis.yml
+++ b/src/vendor/github.com/dgrijalva/jwt-go/.travis.yml
@@ -1,8 +1,13 @@
language: go
+script:
+ - go vet ./...
+ - go test -v ./...
+
go:
- 1.3
- 1.4
- 1.5
- 1.6
+ - 1.7
- tip
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/src/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
index fd62e9490..7fc1f793c 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
+++ b/src/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
@@ -56,8 +56,9 @@ This simple parsing example:
is directly mapped to:
```go
- if token, err := request.ParseFromRequest(tokenString, request.OAuth2Extractor, req, keyLookupFunc); err == nil {
- fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
+ if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
+ claims := token.Claims.(jwt.MapClaims)
+ fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
}
```
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/README.md b/src/vendor/github.com/dgrijalva/jwt-go/README.md
index 00f613672..d358d881b 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/README.md
+++ b/src/vendor/github.com/dgrijalva/jwt-go/README.md
@@ -1,11 +1,15 @@
-A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
+# jwt-go
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
+[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go)
-**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
-**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
+**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
## What the heck is a JWT?
@@ -25,8 +29,8 @@ This library supports the parsing and verification as well as the generation and
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
-* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_Parse_hmac)
-* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_New_hmac)
+* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
+* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
## Extensions
@@ -37,7 +41,7 @@ Here's an example of an extension that integrates with the Google App Engine sig
## Compliance
-This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
+This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
@@ -47,7 +51,10 @@ This library is considered production ready. Feedback and feature requests are
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
-While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
+While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
+
+**BREAKING CHANGES:***
+* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
## Usage Tips
@@ -68,18 +75,26 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
+### Signing Methods and Key Types
+
+Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
+
+* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
+* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
+* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
+
### JWT and OAuth
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
-* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
+* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
-
+
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
-The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
+The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/src/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
index b605b4509..637029831 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
+++ b/src/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
@@ -1,5 +1,18 @@
## `jwt-go` Version History
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
#### 3.0.0
* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/src/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
index 2f59a2223..f97738124 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
+++ b/src/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
@@ -14,6 +14,7 @@ var (
)
// Implements the ECDSA family of signing methods signing methods
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
type SigningMethodECDSA struct {
Name string
Hash crypto.Hash
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/errors.go b/src/vendor/github.com/dgrijalva/jwt-go/errors.go
index 662df19d4..1c93024aa 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/errors.go
+++ b/src/vendor/github.com/dgrijalva/jwt-go/errors.go
@@ -51,13 +51,9 @@ func (e ValidationError) Error() string {
} else {
return "token is invalid"
}
- return e.Inner.Error()
}
// No errors
func (e *ValidationError) valid() bool {
- if e.Errors > 0 {
- return false
- }
- return true
+ return e.Errors == 0
}
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/hmac.go b/src/vendor/github.com/dgrijalva/jwt-go/hmac.go
index c22991925..addbe5d40 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/hmac.go
+++ b/src/vendor/github.com/dgrijalva/jwt-go/hmac.go
@@ -7,6 +7,7 @@ import (
)
// Implements the HMAC-SHA family of signing methods signing methods
+// Expects key type of []byte for both signing and validation
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
@@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string,
return EncodeSegment(hasher.Sum(nil)), nil
}
- return "", ErrInvalidKey
+ return "", ErrInvalidKeyType
}
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/parser.go b/src/vendor/github.com/dgrijalva/jwt-go/parser.go
index 7020c52a1..d6901d9ad 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/parser.go
+++ b/src/vendor/github.com/dgrijalva/jwt-go/parser.go
@@ -8,8 +8,9 @@ import (
)
type Parser struct {
- ValidMethods []string // If populated, only these methods will be considered valid
- UseJSONNumber bool // Use JSON Number format in JSON decoder
+ ValidMethods []string // If populated, only these methods will be considered valid
+ UseJSONNumber bool // Use JSON Number format in JSON decoder
+ SkipClaimsValidation bool // Skip claims validation during token parsing
}
// Parse, validate, and return a token.
@@ -20,55 +21,9 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
}
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- parts := strings.Split(tokenString, ".")
- if len(parts) != 3 {
- return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
- }
-
- var err error
- token := &Token{Raw: tokenString}
-
- // parse Header
- var headerBytes []byte
- if headerBytes, err = DecodeSegment(parts[0]); err != nil {
- if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
- return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
- }
- return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
- return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // parse Claims
- var claimBytes []byte
- token.Claims = claims
-
- if claimBytes, err = DecodeSegment(parts[1]); err != nil {
- return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
- if p.UseJSONNumber {
- dec.UseNumber()
- }
- // JSON Decode. Special case for map type to avoid weird pointer behavior
- if c, ok := token.Claims.(MapClaims); ok {
- err = dec.Decode(&c)
- } else {
- err = dec.Decode(&claims)
- }
- // Handle decode error
+ token, parts, err := p.ParseUnverified(tokenString, claims)
if err != nil {
- return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // Lookup signature method
- if method, ok := token.Header["alg"].(string); ok {
- if token.Method = GetSigningMethod(method); token.Method == nil {
- return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
- }
- } else {
- return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ return token, err
}
// Verify signing method is in the required set
@@ -95,20 +50,25 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
}
if key, err = keyFunc(token); err != nil {
// keyFunc returned an error
+ if ve, ok := err.(*ValidationError); ok {
+ return token, ve
+ }
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
}
vErr := &ValidationError{}
// Validate Claims
- if err := token.Claims.Valid(); err != nil {
+ if !p.SkipClaimsValidation {
+ if err := token.Claims.Valid(); err != nil {
- // If the Claims Valid returned an error, check if it is a validation error,
- // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
- if e, ok := err.(*ValidationError); !ok {
- vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
- } else {
- vErr = e
+ // If the Claims Valid returned an error, check if it is a validation error,
+ // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
+ if e, ok := err.(*ValidationError); !ok {
+ vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
+ } else {
+ vErr = e
+ }
}
}
@@ -126,3 +86,63 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
return token, vErr
}
+
+// WARNING: Don't use this method unless you know what you're doing
+//
+// This method parses the token but doesn't validate the signature. It's only
+// ever useful in cases where you know the signature is valid (because it has
+// been checked previously in the stack) and you want to extract values from
+// it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ parts = strings.Split(tokenString, ".")
+ if len(parts) != 3 {
+ return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = DecodeSegment(parts[0]); err != nil {
+ if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
+ return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
+ }
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // parse Claims
+ var claimBytes []byte
+ token.Claims = claims
+
+ if claimBytes, err = DecodeSegment(parts[1]); err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ if p.UseJSONNumber {
+ dec.UseNumber()
+ }
+ // JSON Decode. Special case for map type to avoid weird pointer behavior
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ // Handle decode error
+ if err != nil {
+ return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
+ }
+ } else {
+ return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
+ }
+
+ return token, parts, nil
+}
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/rsa.go b/src/vendor/github.com/dgrijalva/jwt-go/rsa.go
index 0ae0b1984..e4caf1ca4 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/rsa.go
+++ b/src/vendor/github.com/dgrijalva/jwt-go/rsa.go
@@ -7,6 +7,7 @@ import (
)
// Implements the RSA family of signing methods signing methods
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
type SigningMethodRSA struct {
Name string
Hash crypto.Hash
@@ -44,7 +45,7 @@ func (m *SigningMethodRSA) Alg() string {
}
// Implements the Verify method from SigningMethod
-// For this signing method, must be an rsa.PublicKey structure.
+// For this signing method, must be an *rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
@@ -73,7 +74,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface
}
// Implements the Sign method from SigningMethod
-// For this signing method, must be an rsa.PrivateKey structure.
+// For this signing method, must be an *rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
var ok bool
diff --git a/src/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/src/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
index 213a90dbb..a5ababf95 100644
--- a/src/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
+++ b/src/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
@@ -39,6 +39,38 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
return pkey, nil
}
+// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
var err error
diff --git a/src/vendor/github.com/go-sql-driver/mysql/.gitignore b/src/vendor/github.com/go-sql-driver/mysql/.gitignore
index ba8e0cb3a..2de28da16 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/.gitignore
+++ b/src/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -6,3 +6,4 @@
Icon?
ehthumbs.db
Thumbs.db
+.idea
diff --git a/src/vendor/github.com/go-sql-driver/mysql/.travis.yml b/src/vendor/github.com/go-sql-driver/mysql/.travis.yml
index c1cc10aaf..cc1268c36 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/.travis.yml
+++ b/src/vendor/github.com/go-sql-driver/mysql/.travis.yml
@@ -1,13 +1,107 @@
sudo: false
language: go
go:
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - tip
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - master
+
+before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
before_script:
+ - echo -e "[server]\ninnodb_log_file_size=256MB\ninnodb_buffer_pool_size=512MB\nmax_allowed_packet=16MB" | sudo tee -a /etc/mysql/my.cnf
+ - sudo service mysql restart
+ - .travis/wait_mysql.sh
- mysql -e 'create database gotest;'
+
+matrix:
+ include:
+ - env: DB=MYSQL8
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mysql:8.0
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mysql:8.0 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MYSQL57
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mysql:5.7
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mysql:5.7 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA55
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:5.5
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:5.5 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+ - env: DB=MARIA10_1
+ sudo: required
+ dist: trusty
+ go: 1.10.x
+ services:
+ - docker
+ before_install:
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/mattn/goveralls
+ - docker pull mariadb:10.1
+ - docker run -d -p 127.0.0.1:3307:3306 --name mysqld -e MYSQL_DATABASE=gotest -e MYSQL_USER=gotest -e MYSQL_PASSWORD=secret -e MYSQL_ROOT_PASSWORD=verysecret
+ mariadb:10.1 --innodb_log_file_size=256MB --innodb_buffer_pool_size=512MB --max_allowed_packet=16MB --local-infile=1
+ - cp .travis/docker.cnf ~/.my.cnf
+ - .travis/wait_mysql.sh
+ before_script:
+ - export MYSQL_TEST_USER=gotest
+ - export MYSQL_TEST_PASS=secret
+ - export MYSQL_TEST_ADDR=127.0.0.1:3307
+ - export MYSQL_TEST_CONCURRENT=1
+
+script:
+ - go test -v -covermode=count -coverprofile=coverage.out
+ - go vet ./...
+ - .travis/gofmt.sh
+after_script:
+ - $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci
diff --git a/src/vendor/github.com/go-sql-driver/mysql/AUTHORS b/src/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 692c186fd..73ff68fbc 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/src/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -12,35 +12,63 @@
# Individual Persons
Aaron Hopkins
+Achille Roussel
+Alexey Palazhchenko
+Andrew Reid
Arne Hormann
+Asta Xie
+Bulat Gaifullin
Carlos Nieto
Chris Moos
+Craig Wilson
+Daniel Montoya
Daniel Nichter
Daniël van Eeden
+Dave Protasowski
DisposaBoy
+Egor Smolyakov
+Evan Shaw
Frederick Mayle
Gustavo Kristic
+Hajime Nakagami
Hanno Braun
Henri Yandell
Hirotaka Yamamoto
+ICHINOSE Shogo
INADA Naoki
+Jacek Szwec
James Harr
+Jeff Hodges
+Jeffrey Charles
Jian Zhen
Joshua Prunier
Julien Lefevre
Julien Schmidt
+Justin Li
+Justin Nuß
Kamil Dziedzic
Kevin Malachowski
+Kieron Woodhouse
Lennart Rudolph
Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
Luca Looz
Lucas Liu
Luke Scott
+Maciej Zimnoch
Michael Woolnough
Nicola Peduzzi
Olivier Mengué
+oscarzhao
Paul Bonser
+Peter Schultz
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
Runrioter Wung
+Shuode Li
Soroush Pour
Stan Putrya
Stanley Gunawan
@@ -52,5 +80,10 @@ Zhenye Xie
# Organizations
Barracuda Networks, Inc.
+Counting Ltd.
Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Percona LLC
+Pivotal Inc.
Stripe Inc.
diff --git a/src/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/src/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
index 6bcad7eaa..ce1b5330a 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ b/src/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -1,3 +1,62 @@
+## Version 1.4.1 (2018-11-14)
+
+Bugfixes:
+
+ - Fix TIME format for binary columns (#818)
+ - Fix handling of empty auth plugin names (#835)
+ - Fix caching_sha2_password with empty password (#826)
+ - Fix canceled context broke mysqlConn (#862)
+ - Fix OldAuthSwitchRequest support (#870)
+ - Fix Auth Response packet for cleartext password (#887)
+
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
## Version 1.3 (2016-12-01)
Changes:
diff --git a/src/vendor/github.com/go-sql-driver/mysql/README.md b/src/vendor/github.com/go-sql-driver/mysql/README.md
index a16012f81..2e9b07eeb 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/src/vendor/github.com/go-sql-driver/mysql/README.md
@@ -1,6 +1,6 @@
# Go-MySQL-Driver
-A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
@@ -15,6 +15,9 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
* [Address](#address)
* [Parameters](#parameters)
* [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
@@ -26,31 +29,31 @@ A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) packa
## Features
* Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
* Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
* Automatic handling of broken connections
* Automatic Connection Pooling *(by database/sql package)*
* Supports queries larger than 16MB
- * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support.
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
* Intelligent `LONG DATA` handling in prepared statements
* Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support
* Optional `time.Time` parsing
* Optional placeholder interpolation
## Requirements
- * Go 1.2 or higher
+ * Go 1.7 or higher. We aim to support the 3 latest versions of Go.
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
## Installation
-Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell:
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get github.com/go-sql-driver/mysql
+$ go get -u github.com/go-sql-driver/mysql
```
-Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`.
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
## Usage
-_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then.
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
```go
@@ -95,13 +98,14 @@ Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mys
Passwords can consist of any character. Escaping is **not** necessary.
#### Protocol
-See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available.
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
-For TCP and UDP networks, addresses have the form `host:port`.
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
@@ -136,9 +140,9 @@ Default: false
```
Type: bool
Valid Values: true, false
-Default: false
+Default: true
```
-`allowNativePasswords=true` allows the usage of the mysql native password method.
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
##### `allowOldPasswords`
@@ -220,19 +224,19 @@ Valid Values:
Default: UTC
```
-Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details.
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `maxAllowedPacket`
```
Type: decimal number
-Default: 0
+Default: 4194304
```
-Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
@@ -255,18 +259,19 @@ Default: false
```
`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
##### `readTimeout`
```
-Type: decimal number
+Type: duration
Default: 0
```
-I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-##### `strict`
+##### `rejectReadOnly`
```
Type: bool
@@ -274,20 +279,50 @@ Valid Values: true, false
Default: false
```
-`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
-A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
-By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
##### `timeout`
```
-Type: decimal number
+Type: duration
Default: OS default
```
-*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `tls`
@@ -297,16 +332,17 @@ Valid Values: true, false, skip-verify,
Default: false
```
-`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
##### `writeTimeout`
```
-Type: decimal number
+Type: duration
Default: 0
```
-I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### System Variables
@@ -317,9 +353,9 @@ Any other parameters are interpreted as system variables:
* `=%27%27`: `SET =''`
Rules:
-* The values for string variables must be quoted with '
+* The values for string variables must be quoted with `'`.
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
- (which implies values of string variables must be wrapped with `%27`)
+ (which implies values of string variables must be wrapped with `%27`).
Examples:
* `autocommit=1`: `SET autocommit=1`
@@ -380,6 +416,18 @@ No Database preselected:
user:password@/
```
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
```go
@@ -390,17 +438,17 @@ Files must be whitelisted by registering them with `mysql.RegisterLocalFile(file
To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
### `time.Time` support
-The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
-However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
+Alternatively you can use the [`NullTime`](https://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`.
### Unicode support
@@ -412,7 +460,6 @@ Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAM
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
-
## Testing / Development
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
@@ -431,13 +478,13 @@ Mozilla summarizes the license scope as follows:
That means:
- * You can **use** the **unchanged** source code both in private and commercially
- * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
- * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
-Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license.
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
-You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
diff --git a/src/vendor/github.com/go-sql-driver/mysql/appengine.go b/src/vendor/github.com/go-sql-driver/mysql/appengine.go
index 565614eef..be41f2ee6 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/appengine.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/appengine.go
@@ -11,7 +11,7 @@
package mysql
import (
- "appengine/cloudsql"
+ "google.golang.org/appengine/cloudsql"
)
func init() {
diff --git a/src/vendor/github.com/go-sql-driver/mysql/auth.go b/src/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 000000000..14f678a87
--- /dev/null
+++ b/src/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,420 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+//
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, ErrOldPassword
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
+ return authResp, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return append([]byte(mc.cfg.Passwd), 0), nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return []byte{0}, nil
+ }
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ return append([]byte(mc.cfg.Passwd), 0), nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data := mc.buf.takeSmallBuffer(4 + 1)
+ data[4] = cachingSha2PasswordRequestPublicKey
+ mc.writePacket(data)
+
+ // parse public key
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ block, _ := pem.Decode(data[1:])
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/buffer.go b/src/vendor/github.com/go-sql-driver/mysql/buffer.go
index 2001feacd..eb4748bf4 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -130,18 +130,18 @@ func (b *buffer) takeBuffer(length int) []byte {
// smaller than defaultBufSize
// Only one buffer (total) can be used at a time.
func (b *buffer) takeSmallBuffer(length int) []byte {
- if b.length == 0 {
- return b.buf[:length]
+ if b.length > 0 {
+ return nil
}
- return nil
+ return b.buf[:length]
}
// takeCompleteBuffer returns the complete existing buffer.
// This can be used if the necessary buffer size is unknown.
// Only one buffer (total) can be used at a time.
func (b *buffer) takeCompleteBuffer() []byte {
- if b.length == 0 {
- return b.buf
+ if b.length > 0 {
+ return nil
}
- return nil
+ return b.buf
}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/collations.go b/src/vendor/github.com/go-sql-driver/mysql/collations.go
index 82079cfb9..136c9e4d1 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/collations.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -9,6 +9,7 @@
package mysql
const defaultCollation = "utf8_general_ci"
+const binaryCollation = "binary"
// A list of available collations mapped to the internal ID.
// To update this map use the following MySQL query:
diff --git a/src/vendor/github.com/go-sql-driver/mysql/connection.go b/src/vendor/github.com/go-sql-driver/mysql/connection.go
index d82c728f3..e57061412 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -10,12 +10,23 @@ package mysql
import (
"database/sql/driver"
+ "io"
"net"
"strconv"
"strings"
"time"
)
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+ Done() <-chan struct{}
+ Err() error
+
+ // defined in context.Context, but not used in this driver:
+ // Deadline() (deadline time.Time, ok bool)
+ // Value(key interface{}) interface{}
+}
+
type mysqlConn struct {
buf buffer
netConn net.Conn
@@ -29,7 +40,14 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- strict bool
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- mysqlContext
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
}
// Handles parameters set in DSN after the connection is established
@@ -62,22 +80,41 @@ func (mc *mysqlConn) handleParams() (err error) {
return
}
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
func (mc *mysqlConn) Begin() (driver.Tx, error) {
- if mc.netConn == nil {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
- err := mc.exec("START TRANSACTION")
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
if err == nil {
return &mysqlTx{mc}, err
}
-
- return nil, err
+ return nil, mc.markBadConn(err)
}
func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
- if mc.netConn != nil {
+ if !mc.closed.IsSet() {
err = mc.writeCommandPacket(comQuit)
}
@@ -91,26 +128,39 @@ func (mc *mysqlConn) Close() (err error) {
// is called before auth or on auth failure because MySQL will have already
// closed the network connection.
func (mc *mysqlConn) cleanup() {
- // Makes cleanup idempotent
- if mc.netConn != nil {
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
- }
- mc.netConn = nil
+ if !mc.closed.TrySet(true) {
+ return
}
- mc.cfg = nil
- mc.buf.nc = nil
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
}
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
- return nil, err
+ return nil, mc.markBadConn(err)
}
stmt := &mysqlStmt{
@@ -144,7 +194,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
if buf == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return "", driver.ErrBadConn
+ return "", ErrInvalidConn
}
buf = buf[:0]
argPos := 0
@@ -257,7 +307,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
}
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -271,7 +321,6 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
return nil, err
}
query = prepared
- args = nil
}
mc.affectedRows = 0
mc.insertId = 0
@@ -283,32 +332,43 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
insertId: int64(mc.insertId),
}, err
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
// Send command
- err := mc.writeCommandPacketStr(comQuery, query)
- if err != nil {
- return err
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
}
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil && resLen > 0 {
- if err = mc.readUntilEOF(); err != nil {
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
return err
}
- err = mc.readUntilEOF()
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
}
- return err
+ return mc.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
- if mc.netConn == nil {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -322,7 +382,6 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
return nil, err
}
query = prepared
- args = nil
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
@@ -335,15 +394,22 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
rows.mc = mc
if resLen == 0 {
- // no columns, no more data
- return emptyRows{}, nil
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
}
+
// Columns
- rows.columns, err = mc.readColumns(resLen)
+ rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
}
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Gets the value of the given MySQL System Variable
@@ -359,7 +425,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
if err == nil {
rows := new(textRows)
rows.mc = mc
- rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
if resLen > 0 {
// Columns
@@ -375,3 +441,21 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
}
return nil, err
}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/src/vendor/github.com/go-sql-driver/mysql/connection_go18.go
new file mode 100644
index 000000000..ce52c7d16
--- /dev/null
+++ b/src/vendor/github.com/go-sql-driver/mysql/connection_go18.go
@@ -0,0 +1,207 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+)
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ // When ctx is already cancelled, don't watch it.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // When ctx is not cancellable, don't watch it.
+ if ctx.Done() == nil {
+ return nil
+ }
+ // When watcher is not alive, can't watch it.
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watching = true
+ mc.watcher <- ctx
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan mysqlContext, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx mysqlContext
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ return driver.ErrBadConn
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/const.go b/src/vendor/github.com/go-sql-driver/mysql/const.go
index 88cfff3fd..b1e6b85ef 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/const.go
@@ -9,7 +9,9 @@
package mysql
const (
- minProtocolVersion byte = 10
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
)
@@ -18,10 +20,11 @@ const (
// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
const (
- iOK byte = 0x00
- iLocalInFile byte = 0xfb
- iEOF byte = 0xfe
- iERR byte = 0xff
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
)
// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
@@ -87,8 +90,10 @@ const (
)
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
const (
- fieldTypeDecimal byte = iota
+ fieldTypeDecimal fieldType = iota
fieldTypeTiny
fieldTypeShort
fieldTypeLong
@@ -107,7 +112,7 @@ const (
fieldTypeBit
)
const (
- fieldTypeJSON byte = iota + 0xf5
+ fieldTypeJSON fieldType = iota + 0xf5
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
@@ -161,3 +166,9 @@ const (
statusInTransReadonly
statusSessionStateChanged
)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/src/vendor/github.com/go-sql-driver/mysql/driver.go b/src/vendor/github.com/go-sql-driver/mysql/driver.go
index 0022d1f1e..e9ede2c8d 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
-// Package mysql provides a MySQL driver for Go's database/sql package
+// Package mysql provides a MySQL driver for Go's database/sql package.
//
// The driver should be used via the database/sql package:
//
@@ -20,8 +20,14 @@ import (
"database/sql"
"database/sql/driver"
"net"
+ "sync"
)
+// watcher interface is used for context support (From Go 1.8)
+type watcher interface {
+ startWatcher()
+}
+
// MySQLDriver is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
@@ -30,12 +36,17 @@ type MySQLDriver struct{}
// Custom dial functions must be registered with RegisterDial
type DialFunc func(addr string) (net.Conn, error)
-var dials map[string]DialFunc
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialFunc
+)
// RegisterDial registers a custom dial function. It can then be used by the
// network address mynet(addr), where mynet is the registered new network.
// addr is passed as a parameter to the dial function.
func RegisterDial(net string, dial DialFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
if dials == nil {
dials = make(map[string]DialFunc)
}
@@ -52,16 +63,19 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
}
mc.cfg, err = ParseDSN(dsn)
if err != nil {
return nil, err
}
mc.parseTime = mc.cfg.ParseTime
- mc.strict = mc.cfg.Strict
// Connect to Server
- if dial, ok := dials[mc.cfg.Net]; ok {
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
mc.netConn, err = dial(mc.cfg.Addr)
} else {
nd := net.Dialer{Timeout: mc.cfg.Timeout}
@@ -81,6 +95,11 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
}
}
+ // Call startWatcher for context support (From Go 1.8)
+ if s, ok := interface{}(mc).(watcher); ok {
+ s.startWatcher()
+ }
+
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
@@ -88,20 +107,34 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc.writeTimeout = mc.cfg.WriteTimeout
// Reading Handshake Initialization Packet
- cipher, err := mc.readInitPacket()
+ authData, plugin, err := mc.readHandshakePacket()
if err != nil {
mc.cleanup()
return nil, err
}
+ if plugin == "" {
+ plugin = defaultAuthPlugin
+ }
// Send Client Authentication Packet
- if err = mc.writeAuthPacket(cipher); err != nil {
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
mc.cleanup()
return nil, err
}
// Handle response to auth packet, switch methods if possible
- if err = handleAuthResult(mc, cipher); err != nil {
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
// Authentication failed and MySQL has already closed the connection
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
// Do not send COM_QUIT, just cleanup and return the error.
@@ -134,50 +167,6 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return mc, nil
}
-func handleAuthResult(mc *mysqlConn, oldCipher []byte) error {
- // Read Result Packet
- cipher, err := mc.readResultOK()
- if err == nil {
- return nil // auth successful
- }
-
- if mc.cfg == nil {
- return err // auth failed and retry not possible
- }
-
- // Retry auth if configured to do so.
- if mc.cfg.AllowOldPasswords && err == ErrOldPassword {
- // Retry with old authentication method. Note: there are edge cases
- // where this should work but doesn't; this is currently "wontfix":
- // https://github.com/go-sql-driver/mysql/issues/184
-
- // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
- // sent and we have to keep using the cipher sent in the init packet.
- if cipher == nil {
- cipher = oldCipher
- }
-
- if err = mc.writeOldAuthPacket(cipher); err != nil {
- return err
- }
- _, err = mc.readResultOK()
- } else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
- // Retry with clear text password for
- // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
- // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
- if err = mc.writeClearAuthPacket(); err != nil {
- return err
- }
- _, err = mc.readResultOK()
- } else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
- if err = mc.writeNativeAuthPacket(cipher); err != nil {
- return err
- }
- _, err = mc.readResultOK()
- }
- return err
-}
-
func init() {
sql.Register("mysql", &MySQLDriver{})
}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/dsn.go b/src/vendor/github.com/go-sql-driver/mysql/dsn.go
index ac00dcedd..be014babe 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -10,11 +10,13 @@ package mysql
import (
"bytes"
+ "crypto/rsa"
"crypto/tls"
"errors"
"fmt"
"net"
"net/url"
+ "sort"
"strconv"
"strings"
"time"
@@ -27,7 +29,9 @@ var (
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
)
-// Config is a configuration parsed from a DSN string
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
type Config struct {
User string // Username
Passwd string // Password (requires User)
@@ -38,6 +42,8 @@ type Config struct {
Collation string // Connection collation
Loc *time.Location // Location for time.Time values
MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
TLSConfig string // TLS configuration name
tls *tls.Config // TLS configuration
Timeout time.Duration // Dial timeout
@@ -53,7 +59,54 @@ type Config struct {
InterpolateParams bool // Interpolate placeholders into query string
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
- Strict bool // Return warnings as errors
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ }
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ if cfg.tls != nil {
+ if cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ }
+ }
+
+ return nil
}
// FormatDSN formats the given Config into a DSN string which can be passed to
@@ -102,12 +155,12 @@ func (cfg *Config) FormatDSN() string {
}
}
- if cfg.AllowNativePasswords {
+ if !cfg.AllowNativePasswords {
if hasParam {
- buf.WriteString("&allowNativePasswords=true")
+ buf.WriteString("&allowNativePasswords=false")
} else {
hasParam = true
- buf.WriteString("?allowNativePasswords=true")
+ buf.WriteString("?allowNativePasswords=false")
}
}
@@ -195,15 +248,25 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.ReadTimeout.String())
}
- if cfg.Strict {
+ if cfg.RejectReadOnly {
if hasParam {
- buf.WriteString("&strict=true")
+ buf.WriteString("&rejectReadOnly=true")
} else {
hasParam = true
- buf.WriteString("?strict=true")
+ buf.WriteString("?rejectReadOnly=true")
}
}
+ if len(cfg.ServerPubKey) > 0 {
+ if hasParam {
+ buf.WriteString("&serverPubKey=")
+ } else {
+ hasParam = true
+ buf.WriteString("?serverPubKey=")
+ }
+ buf.WriteString(url.QueryEscape(cfg.ServerPubKey))
+ }
+
if cfg.Timeout > 0 {
if hasParam {
buf.WriteString("&timeout=")
@@ -234,7 +297,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.WriteTimeout.String())
}
- if cfg.MaxAllowedPacket > 0 {
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
if hasParam {
buf.WriteString("&maxAllowedPacket=")
} else {
@@ -247,7 +310,12 @@ func (cfg *Config) FormatDSN() string {
// other params
if cfg.Params != nil {
- for param, value := range cfg.Params {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
if hasParam {
buf.WriteByte('&')
} else {
@@ -257,7 +325,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(param)
buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(value))
+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
}
}
@@ -267,10 +335,7 @@ func (cfg *Config) FormatDSN() string {
// ParseDSN parses the DSN string to a Config
func ParseDSN(dsn string) (cfg *Config, err error) {
// New config with some default values
- cfg = &Config{
- Loc: time.UTC,
- Collation: defaultCollation,
- }
+ cfg = NewConfig()
// [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
@@ -338,28 +403,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
return nil, errInvalidDSNNoSlash
}
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
- return nil, errInvalidDSNUnsafeCollation
+ if err = cfg.normalize(); err != nil {
+ return nil, err
}
-
- // Set default network if empty
- if cfg.Net == "" {
- cfg.Net = "tcp"
- }
-
- // Set default address if empty
- if cfg.Addr == "" {
- switch cfg.Net {
- case "tcp":
- cfg.Addr = "127.0.0.1:3306"
- case "unix":
- cfg.Addr = "/tmp/mysql.sock"
- default:
- return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
- }
-
- }
-
return
}
@@ -374,7 +420,6 @@ func parseDSNParams(cfg *Config, params string) (err error) {
// cfg params
switch value := param[1]; param[0] {
-
// Disable INFILE whitelist / enable all files
case "allowAllFiles":
var isBool bool
@@ -472,14 +517,32 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
- // Strict mode
- case "strict":
+ // Reject read-only connections
+ case "rejectReadOnly":
var isBool bool
- cfg.Strict, isBool = readBool(value)
+ cfg.RejectReadOnly, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+
+ if pubKey := getServerPubKey(name); pubKey != nil {
+ cfg.ServerPubKey = name
+ cfg.pubKey = pubKey
+ } else {
+ return errors.New("invalid value / unknown server pub key name: " + name)
+ }
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
// Dial Timeout
case "timeout":
cfg.Timeout, err = time.ParseDuration(value)
@@ -506,14 +569,7 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return fmt.Errorf("invalid value for TLS config name: %v", err)
}
- if tlsConfig, ok := tlsConfigRegister[name]; ok {
- if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
- host, _, err := net.SplitHostPort(cfg.Addr)
- if err == nil {
- tlsConfig.ServerName = host
- }
- }
-
+ if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
cfg.TLSConfig = name
cfg.tls = tlsConfig
} else {
@@ -546,3 +602,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/errors.go b/src/vendor/github.com/go-sql-driver/mysql/errors.go
index 857854e14..760782ff2 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -9,10 +9,8 @@
package mysql
import (
- "database/sql/driver"
"errors"
"fmt"
- "io"
"log"
"os"
)
@@ -31,6 +29,12 @@ var (
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
)
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
@@ -59,74 +63,3 @@ type MySQLError struct {
func (me *MySQLError) Error() string {
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
}
-
-// MySQLWarnings is an error type which represents a group of one or more MySQL
-// warnings
-type MySQLWarnings []MySQLWarning
-
-func (mws MySQLWarnings) Error() string {
- var msg string
- for i, warning := range mws {
- if i > 0 {
- msg += "\r\n"
- }
- msg += fmt.Sprintf(
- "%s %s: %s",
- warning.Level,
- warning.Code,
- warning.Message,
- )
- }
- return msg
-}
-
-// MySQLWarning is an error type which represents a single MySQL warning.
-// Warnings are returned in groups only. See MySQLWarnings
-type MySQLWarning struct {
- Level string
- Code string
- Message string
-}
-
-func (mc *mysqlConn) getWarnings() (err error) {
- rows, err := mc.Query("SHOW WARNINGS", nil)
- if err != nil {
- return
- }
-
- var warnings = MySQLWarnings{}
- var values = make([]driver.Value, 3)
-
- for {
- err = rows.Next(values)
- switch err {
- case nil:
- warning := MySQLWarning{}
-
- if raw, ok := values[0].([]byte); ok {
- warning.Level = string(raw)
- } else {
- warning.Level = fmt.Sprintf("%s", values[0])
- }
- if raw, ok := values[1].([]byte); ok {
- warning.Code = string(raw)
- } else {
- warning.Code = fmt.Sprintf("%s", values[1])
- }
- if raw, ok := values[2].([]byte); ok {
- warning.Message = string(raw)
- } else {
- warning.Message = fmt.Sprintf("%s", values[0])
- }
-
- warnings = append(warnings, warning)
-
- case io.EOF:
- return warnings
-
- default:
- rows.Close()
- return
- }
- }
-}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/fields.go b/src/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 000000000..e1e2ece4b
--- /dev/null
+++ b/src/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/infile.go b/src/vendor/github.com/go-sql-driver/mysql/infile.go
index 547357cfa..273cb0ba5 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -147,7 +147,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
// send content packets
- if err == nil {
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
data := make([]byte, 4+packetSize)
var n int
for err == nil {
@@ -173,8 +174,7 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// read OK packet
if err == nil {
- _, err = mc.readResultOK()
- return err
+ return mc.readResultOK()
}
mc.readPacket()
diff --git a/src/vendor/github.com/go-sql-driver/mysql/packets.go b/src/vendor/github.com/go-sql-driver/mysql/packets.go
index aafe9793e..9ed640850 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -30,9 +30,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet header
data, err := mc.buf.readNext(4)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// packet length [24 bit]
@@ -54,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if prevData == nil {
errLog.Print(ErrMalformPkt)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
return prevData, nil
@@ -63,9 +66,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// return data if this was the last packet
@@ -125,33 +131,47 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
+ mc.cleanup()
errLog.Print(ErrMalformPkt)
} else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
errLog.Print(err)
}
- return driver.ErrBadConn
+ return ErrInvalidConn
}
}
/******************************************************************************
-* Initialisation Process *
+* Initialization Process *
******************************************************************************/
// Handshake Initialization Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
-func (mc *mysqlConn) readInitPacket() ([]byte, error) {
- data, err := mc.readPacket()
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+ data, err = mc.readPacket()
if err != nil {
- return nil, err
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return
}
if data[0] == iERR {
- return nil, mc.handleErrorPacket(data)
+ return nil, "", mc.handleErrorPacket(data)
}
// protocol version [1 byte]
if data[0] < minProtocolVersion {
- return nil, fmt.Errorf(
+ return nil, "", fmt.Errorf(
"unsupported protocol version %d. Version %d or higher is required",
data[0],
minProtocolVersion,
@@ -163,7 +183,7 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
// first part of the password cipher [8 bytes]
- cipher := data[pos : pos+8]
+ authData := data[pos : pos+8]
// (filler) always 0x00 [1 byte]
pos += 8 + 1
@@ -171,10 +191,10 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
// capability flags (lower 2 bytes) [2 bytes]
mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
if mc.flags&clientProtocol41 == 0 {
- return nil, ErrOldProtocol
+ return nil, "", ErrOldProtocol
}
if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
- return nil, ErrNoTLS
+ return nil, "", ErrNoTLS
}
pos += 2
@@ -198,32 +218,32 @@ func (mc *mysqlConn) readInitPacket() ([]byte, error) {
//
// The official Python library uses the fixed length 12
// which seems to work but technically could have a hidden bug.
- cipher = append(cipher, data[pos:pos+12]...)
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
- // TODO: Verify string termination
// EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
// \NUL otherwise
- //
- //if data[len(data)-1] == 0 {
- // return
- //}
- //return ErrMalformPkt
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
// make a memory safe copy of the cipher slice
var b [20]byte
- copy(b[:], cipher)
- return b[:], nil
+ copy(b[:], authData)
+ return b[:], plugin, nil
}
// make a memory safe copy of the cipher slice
var b [8]byte
- copy(b[:], cipher)
- return b[:], nil
+ copy(b[:], authData)
+ return b[:], plugin, nil
}
// Client Authentication Packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
-func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
// Adjust client flags based on server support
clientFlags := clientProtocol41 |
clientSecureConn |
@@ -247,10 +267,17 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
clientFlags |= clientMultiStatements
}
- // User Password
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLen := len(authResp)
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
- pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + 1 + len(scrambleBuff) + 21 + 1
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
// To specify a db name
if n := len(mc.cfg.DBName); n > 0 {
@@ -261,9 +288,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
// Calculate packet length and get buffer with that size
data := mc.buf.takeSmallBuffer(pktLen + 4)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// ClientFlags [32 bit]
@@ -318,9 +345,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
data[pos] = 0x00
pos++
- // ScrambleBuffer [length encoded integer]
- data[pos] = byte(len(scrambleBuff))
- pos += 1 + copy(data[pos+1:], scrambleBuff)
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
// Databasename [null terminated string]
if len(mc.cfg.DBName) > 0 {
@@ -329,72 +356,26 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
pos++
}
- // Assume native client during response
- pos += copy(data[pos:], "mysql_native_password")
+ pos += copy(data[pos:], plugin)
data[pos] = 0x00
+ pos++
// Send Auth packet
- return mc.writePacket(data)
+ return mc.writePacket(data[:pos])
}
-// Client old authentication packet
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
- // User password
- scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd))
-
- // Calculate the packet length and add a tailing 0
- pktLen := len(scrambleBuff) + 1
- data := mc.buf.takeSmallBuffer(4 + pktLen)
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+ pktLen := 4 + len(authData)
+ data := mc.buf.takeSmallBuffer(pktLen)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
- // Add the scrambled password [null terminated string]
- copy(data[4:], scrambleBuff)
- data[4+pktLen-1] = 0x00
-
- return mc.writePacket(data)
-}
-
-// Client clear text authentication packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeClearAuthPacket() error {
- // Calculate the packet length and add a tailing 0
- pktLen := len(mc.cfg.Passwd) + 1
- data := mc.buf.takeSmallBuffer(4 + pktLen)
- if data == nil {
- // can not take the buffer. Something must be wrong with the connection
- errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
- }
-
- // Add the clear password [null terminated string]
- copy(data[4:], mc.cfg.Passwd)
- data[4+pktLen-1] = 0x00
-
- return mc.writePacket(data)
-}
-
-// Native password authentication method
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
-
- // Calculate the packet length and add a tailing 0
- pktLen := len(scrambleBuff)
- data := mc.buf.takeSmallBuffer(4 + pktLen)
- if data == nil {
- // can not take the buffer. Something must be wrong with the connection
- errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
- }
-
- // Add the scramble
- copy(data[4:], scrambleBuff)
-
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
return mc.writePacket(data)
}
@@ -408,9 +389,9 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
data := mc.buf.takeSmallBuffer(4 + 1)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -427,9 +408,9 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
pktLen := 1 + len(arg)
data := mc.buf.takeBuffer(pktLen + 4)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -448,9 +429,9 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
data := mc.buf.takeSmallBuffer(4 + 1 + 4)
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -470,44 +451,50 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
* Result Packets *
******************************************************************************/
-// Returns error if Packet is not an 'Result OK'-Packet
-func (mc *mysqlConn) readResultOK() ([]byte, error) {
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
data, err := mc.readPacket()
- if err == nil {
- // packet indicator
- switch data[0] {
-
- case iOK:
- return nil, mc.handleOkPacket(data)
-
- case iEOF:
- if len(data) > 1 {
- pluginEndIndex := bytes.IndexByte(data, 0x00)
- plugin := string(data[1:pluginEndIndex])
- cipher := data[pluginEndIndex+1 : len(data)-1]
-
- if plugin == "mysql_old_password" {
- // using old_passwords
- return cipher, ErrOldPassword
- } else if plugin == "mysql_clear_password" {
- // using clear text password
- return cipher, ErrCleartextPassword
- } else if plugin == "mysql_native_password" {
- // using mysql default authentication method
- return cipher, ErrNativePassword
- } else {
- return cipher, ErrUnknownPlugin
- }
- } else {
- // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
- return nil, ErrOldPassword
- }
-
- default: // Error otherwise
- return nil, mc.handleErrorPacket(data)
- }
+ if err != nil {
+ return nil, "", err
}
- return nil, err
+
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
+
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) == 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
+ }
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
+ }
+ return mc.handleErrorPacket(data)
}
// Result Set Header Packet
@@ -550,6 +537,22 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// Error Number [16 bit uint]
errno := binary.LittleEndian.Uint16(data[1:3])
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
pos := 3
// SQL State [optional: # + 5bytes string]
@@ -584,19 +587,12 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
- if err := mc.discardResults(); err != nil {
- return err
- }
-
- // warning count [2 bytes]
- if !mc.strict {
+ if mc.status&statusMoreResultsExists != 0 {
return nil
}
- pos := 1 + n + m + 2
- if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
- return mc.getWarnings()
- }
+ // warning count [2 bytes]
+
return nil
}
@@ -668,14 +664,21 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
if err != nil {
return nil, err
}
+ pos += n
// Filler [uint8]
+ pos++
+
// Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
// Length [uint32]
- pos += n + 1 + 2 + 4
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
// Field type [uint8]
- columns[i].fieldType = data[pos]
+ columns[i].fieldType = fieldType(data[pos])
pos++
// Flags [uint16]
@@ -698,6 +701,10 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
func (rows *textRows) readRow(dest []driver.Value) error {
mc := rows.mc
+ if rows.rs.done {
+ return io.EOF
+ }
+
data, err := mc.readPacket()
if err != nil {
return err
@@ -707,15 +714,11 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if data[0] == iEOF && len(data) == 5 {
// server_status [2 bytes]
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
if data[0] == iERR {
rows.mc = nil
@@ -736,7 +739,7 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if !mc.parseTime {
continue
} else {
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp, fieldTypeDateTime,
fieldTypeDate, fieldTypeNewDate:
dest[i], err = parseDateTime(
@@ -808,14 +811,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Reserved [8 bit]
// Warning count [16 bit uint]
- if !stmt.mc.strict {
- return columnCount, nil
- }
- // Check for warnings count > 0, only available in MySQL > 4.1
- if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
- return columnCount, stmt.mc.getWarnings()
- }
return columnCount, nil
}
return 0, err
@@ -832,7 +828,7 @@ func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
// 2 bytes paramID
const dataOffset = 1 + 4 + 2
- // Can not use the write buffer since
+ // Cannot use the write buffer since
// a) the buffer is too small
// b) it is in use
data := make([]byte, 4+1+4+2+len(arg))
@@ -887,6 +883,12 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
const minPktLen = 4 + 1 + 4 + 1 + 4
mc := stmt.mc
+ // Determine threshould dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
// Reset packet-sequence
mc.sequence = 0
@@ -898,9 +900,9 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
data = mc.buf.takeCompleteBuffer()
}
if data == nil {
- // can not take the buffer. Something must be wrong with the connection
+ // cannot take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// command [1 byte]
@@ -959,7 +961,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// build NULL-bitmap
if arg == nil {
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
continue
}
@@ -967,7 +969,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// cache types and values
switch v := arg.(type) {
case int64:
- paramTypes[i+i] = fieldTypeLongLong
+ paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -983,7 +985,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case float64:
- paramTypes[i+i] = fieldTypeDouble
+ paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -999,7 +1001,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case bool:
- paramTypes[i+i] = fieldTypeTiny
+ paramTypes[i+i] = byte(fieldTypeTiny)
paramTypes[i+i+1] = 0x00
if v {
@@ -1011,10 +1013,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case []byte:
// Common case (non-nil value) first
if v != nil {
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -1029,14 +1031,14 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// Handle []byte(nil) as a NULL value
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
case string:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
+ if len(v) < longDataSize {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -1048,23 +1050,25 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case time.Time:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- var val []byte
+ var a [64]byte
+ var b = a[:0]
+
if v.IsZero() {
- val = []byte("0000-00-00")
+ b = append(b, "0000-00-00"...)
} else {
- val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
}
paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(val)),
+ uint64(len(b)),
)
- paramValues = append(paramValues, val...)
+ paramValues = append(paramValues, b...)
default:
- return fmt.Errorf("can not convert type: %T", arg)
+ return fmt.Errorf("cannot convert type: %T", arg)
}
}
@@ -1097,8 +1101,6 @@ func (mc *mysqlConn) discardResults() error {
if err := mc.readUntilEOF(); err != nil {
return err
}
- } else {
- mc.status &^= statusMoreResultsExists
}
}
return nil
@@ -1116,20 +1118,17 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
+ mc := rows.mc
rows.mc = nil
// Error otherwise
- return rows.mc.handleErrorPacket(data)
+ return mc.handleErrorPacket(data)
}
// NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
@@ -1145,14 +1144,14 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
// Convert to byte-coded string
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeNULL:
dest[i] = nil
continue
// Numeric Types
case fieldTypeTiny:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(data[pos])
} else {
dest[i] = int64(int8(data[pos]))
@@ -1161,7 +1160,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeShort, fieldTypeYear:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
} else {
dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
@@ -1170,7 +1169,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeInt24, fieldTypeLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
} else {
dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
@@ -1179,7 +1178,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeLongLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
val := binary.LittleEndian.Uint64(data[pos : pos+8])
if val > math.MaxInt64 {
dest[i] = uint64ToString(val)
@@ -1193,7 +1192,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeFloat:
- dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
pos += 4
continue
@@ -1233,10 +1232,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case isNull:
dest[i] = nil
continue
- case rows.columns[i].fieldType == fieldTypeTime:
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
// database/sql does not support an equivalent to TIME, return a string
var dstlen uint8
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 8
case 1, 2, 3, 4, 5, 6:
@@ -1244,18 +1243,18 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
- dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
case rows.mc.parseTime:
dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
default:
var dstlen uint8
- if rows.columns[i].fieldType == fieldTypeDate {
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
dstlen = 10
} else {
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 19
case 1, 2, 3, 4, 5, 6:
@@ -1263,11 +1262,11 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
}
- dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false)
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
}
if err == nil {
@@ -1279,7 +1278,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// Please report if this happens!
default:
- return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType)
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
}
}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/rows.go b/src/vendor/github.com/go-sql-driver/mysql/rows.go
index c08255eee..d3b1e2822 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -11,19 +11,20 @@ package mysql
import (
"database/sql/driver"
"io"
+ "math"
+ "reflect"
)
-type mysqlField struct {
- tableName string
- name string
- flags fieldFlag
- fieldType byte
- decimals byte
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
}
type mysqlRows struct {
- mc *mysqlConn
- columns []mysqlField
+ mc *mysqlConn
+ rs resultSet
+ finish func()
}
type binaryRows struct {
@@ -34,37 +35,86 @@ type textRows struct {
mysqlRows
}
-type emptyRows struct{}
-
func (rows *mysqlRows) Columns() []string {
- columns := make([]string, len(rows.columns))
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
for i := range columns {
- if tableName := rows.columns[i].tableName; len(tableName) > 0 {
- columns[i] = tableName + "." + rows.columns[i].name
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
} else {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
} else {
for i := range columns {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
+
+ rows.rs.columnNames = columns
return columns
}
-func (rows *mysqlRows) Close() error {
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
mc := rows.mc
if mc == nil {
return nil
}
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Remove unread packets from stream
- err := mc.readUntilEOF()
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
if err == nil {
if err = mc.discardResults(); err != nil {
return err
@@ -75,10 +125,66 @@ func (rows *mysqlRows) Close() error {
return err
}
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Fetch next row from stream
@@ -87,10 +193,20 @@ func (rows *binaryRows) Next(dest []driver.Value) error {
return io.EOF
}
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
func (rows *textRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Fetch next row from stream
@@ -98,15 +214,3 @@ func (rows *textRows) Next(dest []driver.Value) error {
}
return io.EOF
}
-
-func (rows emptyRows) Columns() []string {
- return nil
-}
-
-func (rows emptyRows) Close() error {
- return nil
-}
-
-func (rows emptyRows) Next(dest []driver.Value) error {
- return io.EOF
-}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/statement.go b/src/vendor/github.com/go-sql-driver/mysql/statement.go
index 7f9b04585..ce7fe4cd0 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -11,6 +11,7 @@ package mysql
import (
"database/sql/driver"
"fmt"
+ "io"
"reflect"
"strconv"
)
@@ -19,11 +20,10 @@ type mysqlStmt struct {
mc *mysqlConn
id uint32
paramCount int
- columns []mysqlField // cached from the first query
}
func (stmt *mysqlStmt) Close() error {
- if stmt.mc == nil || stmt.mc.netConn == nil {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
// driver.Stmt.Close can be called more than once, thus this function
// has to be idempotent.
// See also Issue #450 and golang/go#16019.
@@ -45,14 +45,14 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
}
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
- if stmt.mc.netConn == nil {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -62,37 +62,45 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil {
- if resLen > 0 {
- // Columns
- err = mc.readUntilEOF()
- if err != nil {
- return nil, err
- }
+ if err != nil {
+ return nil, err
+ }
- // Rows
- err = mc.readUntilEOF()
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
}
- if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
}
}
- return nil, err
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
- if stmt.mc.netConn == nil {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -107,14 +115,15 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
if resLen > 0 {
rows.mc = mc
- // Columns
- // If not cached, read them and cache them
- if stmt.columns == nil {
- rows.columns, err = mc.readColumns(resLen)
- stmt.columns = rows.columns
- } else {
- rows.columns = stmt.columns
- err = mc.readUntilEOF()
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
}
}
@@ -123,19 +132,36 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
type converter struct{}
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
if driver.IsValue(v) {
return v, nil
}
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if !driver.IsValue(sv) {
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ return sv, nil
+ }
+
rv := reflect.ValueOf(v)
switch rv.Kind() {
case reflect.Ptr:
// indirect pointers
if rv.IsNil() {
return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
}
- return c.ConvertValue(rv.Elem().Interface())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32:
@@ -148,6 +174,38 @@ func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
return int64(u64), nil
case reflect.Float32, reflect.Float64:
return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ ek := rv.Type().Elem().Kind()
+ if ek == reflect.Uint8 {
+ return rv.Bytes(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, ek)
+ case reflect.String:
+ return rv.String(), nil
}
return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/transaction.go b/src/vendor/github.com/go-sql-driver/mysql/transaction.go
index 33c749b35..417d72793 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -13,7 +13,7 @@ type mysqlTx struct {
}
func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("COMMIT")
@@ -22,7 +22,7 @@ func (tx *mysqlTx) Commit() (err error) {
}
func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("ROLLBACK")
diff --git a/src/vendor/github.com/go-sql-driver/mysql/utils.go b/src/vendor/github.com/go-sql-driver/mysql/utils.go
index d523b7ffd..ca5d47d82 100644
--- a/src/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/src/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -9,23 +9,30 @@
package mysql
import (
- "crypto/sha1"
"crypto/tls"
"database/sql/driver"
"encoding/binary"
"fmt"
"io"
+ "strconv"
"strings"
+ "sync"
+ "sync/atomic"
"time"
)
+// Registry for custom tls.Configs
var (
- tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
)
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
// rootCertPool := x509.NewCertPool()
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
// if err != nil {
@@ -51,19 +58,32 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
return fmt.Errorf("key '%s' is reserved", key)
}
- if tlsConfigRegister == nil {
- tlsConfigRegister = make(map[string]*tls.Config)
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
}
- tlsConfigRegister[key] = config
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
return nil
}
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
- if tlsConfigRegister != nil {
- delete(tlsConfigRegister, key)
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
}
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = cloneTLSConfig(v)
+ }
+ tlsConfigLock.RUnlock()
+ return
}
// Returns the bool value of the input.
@@ -80,119 +100,6 @@ func readBool(input string) (value bool, valid bool) {
return
}
-/******************************************************************************
-* Authentication *
-******************************************************************************/
-
-// Encrypt password using 4.1+ method
-func scramblePassword(scramble, password []byte) []byte {
- if len(password) == 0 {
- return nil
- }
-
- // stage1Hash = SHA1(password)
- crypt := sha1.New()
- crypt.Write(password)
- stage1 := crypt.Sum(nil)
-
- // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
- // inner Hash
- crypt.Reset()
- crypt.Write(stage1)
- hash := crypt.Sum(nil)
-
- // outer Hash
- crypt.Reset()
- crypt.Write(scramble)
- crypt.Write(hash)
- scramble = crypt.Sum(nil)
-
- // token = scrambleHash XOR stage1Hash
- for i := range scramble {
- scramble[i] ^= stage1[i]
- }
- return scramble
-}
-
-// Encrypt password using pre 4.1 (old password) method
-// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
-type myRnd struct {
- seed1, seed2 uint32
-}
-
-const myRndMaxVal = 0x3FFFFFFF
-
-// Pseudo random number generator
-func newMyRnd(seed1, seed2 uint32) *myRnd {
- return &myRnd{
- seed1: seed1 % myRndMaxVal,
- seed2: seed2 % myRndMaxVal,
- }
-}
-
-// Tested to be equivalent to MariaDB's floating point variant
-// http://play.golang.org/p/QHvhd4qved
-// http://play.golang.org/p/RG0q4ElWDx
-func (r *myRnd) NextByte() byte {
- r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
- r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
-
- return byte(uint64(r.seed1) * 31 / myRndMaxVal)
-}
-
-// Generate binary hash from byte string using insecure pre 4.1 method
-func pwHash(password []byte) (result [2]uint32) {
- var add uint32 = 7
- var tmp uint32
-
- result[0] = 1345345333
- result[1] = 0x12345671
-
- for _, c := range password {
- // skip spaces and tabs in password
- if c == ' ' || c == '\t' {
- continue
- }
-
- tmp = uint32(c)
- result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
- result[1] += (result[1] << 8) ^ result[0]
- add += tmp
- }
-
- // Remove sign bit (1<<31)-1)
- result[0] &= 0x7FFFFFFF
- result[1] &= 0x7FFFFFFF
-
- return
-}
-
-// Encrypt password using insecure pre 4.1 method
-func scrambleOldPassword(scramble, password []byte) []byte {
- if len(password) == 0 {
- return nil
- }
-
- scramble = scramble[:8]
-
- hashPw := pwHash(password)
- hashSc := pwHash(scramble)
-
- r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
-
- var out [8]byte
- for i := range out {
- out[i] = r.NextByte() + 64
- }
-
- mask := r.NextByte()
- for i := range out {
- out[i] ^= mask
- }
-
- return out[:]
-}
-
/******************************************************************************
* Time related utils *
******************************************************************************/
@@ -321,87 +228,104 @@ var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
-func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) {
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+ if decimals <= 0 {
+ return dst
+ }
+ if len(src) == 0 {
+ return append(dst, ".000000"[:decimals+1]...)
+ }
+
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 := byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 := byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 := byte(microsecs)
+
+ switch decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ )
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ )
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ )
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ )
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ )
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ )
+ }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
// length expects the deterministic length of the zero value,
// negative time and 100+ hours are automatically added if needed
if len(src) == 0 {
- if justTime {
- return zeroDateTime[11 : 11+length], nil
- }
return zeroDateTime[:length], nil
}
- var dst []byte // return value
- var pt, p1, p2, p3 byte // current digit pair
- var zOffs byte // offset of value in zeroDateTime
- if justTime {
- switch length {
- case
- 8, // time (can be up to 10 when negative and 100+ hours)
- 10, 11, 12, 13, 14, 15: // time with fractional seconds
- default:
- return nil, fmt.Errorf("illegal TIME length %d", length)
+ var dst []byte // return value
+ var p1, p2, p3 byte // current digit pair
+
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
}
- switch len(src) {
- case 8, 12:
- default:
- return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
- }
- // +2 to enable negative time and 100+ hours
- dst = make([]byte, 0, length+2)
- if src[0] == 1 {
- dst = append(dst, '-')
- }
- if src[1] != 0 {
- hour := uint16(src[1])*24 + uint16(src[5])
- pt = byte(hour / 100)
- p1 = byte(hour - 100*uint16(pt))
- dst = append(dst, digits01[pt])
- } else {
- p1 = src[5]
- }
- zOffs = 11
- src = src[6:]
- } else {
- switch length {
- case 10, 19, 21, 22, 23, 24, 25, 26:
- default:
- t := "DATE"
- if length > 10 {
- t += "TIME"
- }
- return nil, fmt.Errorf("illegal %s length %d", t, length)
- }
- switch len(src) {
- case 4, 7, 11:
- default:
- t := "DATE"
- if length > 10 {
- t += "TIME"
- }
- return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
- }
- dst = make([]byte, 0, length)
- // start with the date
- year := binary.LittleEndian.Uint16(src[:2])
- pt = byte(year / 100)
- p1 = byte(year - 100*uint16(pt))
- p2, p3 = src[2], src[3]
- dst = append(dst,
- digits10[pt], digits01[pt],
- digits10[p1], digits01[p1], '-',
- digits10[p2], digits01[p2], '-',
- digits10[p3], digits01[p3],
- )
- if length == 10 {
- return dst, nil
- }
- if len(src) == 4 {
- return append(dst, zeroDateTime[10:length]...), nil
- }
- dst = append(dst, ' ')
- p1 = src[4] // hour
- src = src[5:]
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
}
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt := year / 100
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+
// p1 is 2-digit hour, src is after hour
p2, p3 = src[0], src[1]
dst = append(dst,
@@ -409,51 +333,49 @@ func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value
digits10[p2], digits01[p2], ':',
digits10[p3], digits01[p3],
)
- if length <= byte(len(dst)) {
- return dst, nil
- }
- src = src[2:]
+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
if len(src) == 0 {
- return append(dst, zeroDateTime[19:zOffs+length]...), nil
+ return zeroDateTime[11 : 11+length], nil
}
- microsecs := binary.LittleEndian.Uint32(src[:4])
- p1 = byte(microsecs / 10000)
- microsecs -= 10000 * uint32(p1)
- p2 = byte(microsecs / 100)
- microsecs -= 100 * uint32(p2)
- p3 = byte(microsecs)
- switch decimals := zOffs + length - 20; decimals {
+ var dst []byte // return value
+
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
default:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- digits10[p3], digits01[p3],
- ), nil
- case 1:
- return append(dst, '.',
- digits10[p1],
- ), nil
- case 2:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- ), nil
- case 3:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2],
- ), nil
- case 4:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- ), nil
- case 5:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- digits10[p3],
- ), nil
+ return nil, fmt.Errorf("illegal TIME length %d", length)
}
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ days := binary.LittleEndian.Uint32(src[1:5])
+ hours := int64(days)*24 + int64(src[5])
+
+ if hours >= 100 {
+ dst = strconv.AppendInt(dst, hours, 10)
+ } else {
+ dst = append(dst, digits10[hours], digits01[hours])
+ }
+
+ min, sec := src[6], src[7]
+ dst = append(dst, ':',
+ digits10[min], digits01[min], ':',
+ digits10[sec], digits01[sec],
+ )
+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
}
/******************************************************************************
@@ -519,7 +441,7 @@ func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
// Check data length
if len(b) >= n {
- return b[n-int(num) : n], false, n, nil
+ return b[n-int(num) : n : n], false, n, nil
}
return nil, false, n, io.EOF
}
@@ -548,8 +470,8 @@ func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
if len(b) == 0 {
return 0, true, 1
}
- switch b[0] {
+ switch b[0] {
// 251: NULL
case 0xfb:
return 0, true, 1
@@ -738,3 +660,67 @@ func escapeStringQuotes(buf []byte, v string) []byte {
return buf[:pos]
}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/src/vendor/github.com/go-sql-driver/mysql/utils_go17.go
new file mode 100644
index 000000000..f59563456
--- /dev/null
+++ b/src/vendor/github.com/go-sql-driver/mysql/utils_go17.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.7
+// +build !go1.8
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/src/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/src/vendor/github.com/go-sql-driver/mysql/utils_go18.go
new file mode 100644
index 000000000..c35c2a6aa
--- /dev/null
+++ b/src/vendor/github.com/go-sql-driver/mysql/utils_go18.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/src/vendor/github.com/gobwas/glob/bench.sh b/src/vendor/github.com/gobwas/glob/bench.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/golang-migrate/migrate/docker-deploy.sh b/src/vendor/github.com/golang-migrate/migrate/docker-deploy.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/golang/protobuf/LICENSE b/src/vendor/github.com/golang/protobuf/LICENSE
index 1b1b1921e..0f646931a 100644
--- a/src/vendor/github.com/golang/protobuf/LICENSE
+++ b/src/vendor/github.com/golang/protobuf/LICENSE
@@ -1,7 +1,4 @@
-Go support for Protocol Buffers - Google's data interchange format
-
Copyright 2010 The Go Authors. All rights reserved.
-https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
diff --git a/src/vendor/github.com/golang/protobuf/proto/Makefile b/src/vendor/github.com/golang/protobuf/proto/Makefile
deleted file mode 100644
index e2e0651a9..000000000
--- a/src/vendor/github.com/golang/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C testdata
- protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
- make
diff --git a/src/vendor/github.com/golang/protobuf/proto/clone.go b/src/vendor/github.com/golang/protobuf/proto/clone.go
index e392575b3..3cd3249f7 100644
--- a/src/vendor/github.com/golang/protobuf/proto/clone.go
+++ b/src/vendor/github.com/golang/protobuf/proto/clone.go
@@ -35,22 +35,39 @@
package proto
import (
+ "fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
- in := reflect.ValueOf(pb)
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
if in.IsNil() {
- return pb
+ return src
}
-
out := reflect.New(in.Type().Elem())
- // out is empty so a merge is a deep copy.
- mergeStruct(out.Elem(), in.Elem())
- return out.Interface().(Message)
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
}
// Merge merges src into dst.
@@ -58,17 +75,24 @@ func Clone(pb Message) Message {
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
- // Explicit test prior to mergeStruct so that mistyped nils will fail
- panic("proto: type mismatch")
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
- // Merging nil into non-nil is a quiet no-op
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
@@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
- if emIn, ok := extendable(in.Addr().Interface()); ok {
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
diff --git a/src/vendor/github.com/golang/protobuf/proto/decode.go b/src/vendor/github.com/golang/protobuf/proto/decode.go
index aa207298f..63b0f08be 100644
--- a/src/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/src/vendor/github.com/golang/protobuf/proto/decode.go
@@ -39,8 +39,6 @@ import (
"errors"
"fmt"
"io"
- "os"
- "reflect"
)
// errOverflow is returned when an integer is too large to be represented.
@@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
@@ -192,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
if b&0x80 == 0 {
goto done
}
- // x -= 0x80 << 63 // Always zero.
return 0, errOverflow
@@ -267,9 +260,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
return
}
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@@ -311,81 +301,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
return string(buf), nil
}
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
- oi := o.index
-
- err := o.skip(t, tag, wire)
- if err != nil {
- return err
- }
-
- if !unrecField.IsValid() {
- return nil
- }
-
- ptr := structPointer_Bytes(base, unrecField)
-
- // Add the skipped field to struct field
- obuf := o.buf
-
- o.buf = *ptr
- o.EncodeVarint(uint64(tag<<3 | wire))
- *ptr = append(o.buf, obuf[oi:o.index]...)
-
- o.buf = obuf
-
- return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
- var u uint64
- var err error
-
- switch wire {
- case WireVarint:
- _, err = o.DecodeVarint()
- case WireFixed64:
- _, err = o.DecodeFixed64()
- case WireBytes:
- _, err = o.DecodeRawBytes(false)
- case WireFixed32:
- _, err = o.DecodeFixed32()
- case WireStartGroup:
- for {
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- fwire := int(u & 0x7)
- if fwire == WireEndGroup {
- break
- }
- ftag := int(u >> 3)
- err = o.skip(t, ftag, fwire)
- if err != nil {
- break
- }
- }
- default:
- err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
- }
- return err
-}
-
// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The method should reset the receiver before
-// decoding starts. The argument points to data that may be
+// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
@@ -395,7 +333,13 @@ type Unmarshaler interface {
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
- return UnmarshalMerge(buf, pb)
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
@@ -405,8 +349,16 @@ func Unmarshal(buf []byte, pb Message) error {
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
- // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
@@ -422,12 +374,17 @@ func (p *Buffer) DecodeMessage(pb Message) error {
}
// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
- typ, base, err := getbase(pb)
- if err != nil {
- return err
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
}
- return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
}
// Unmarshal parses the protocol buffer representation in the
@@ -438,533 +395,33 @@ func (p *Buffer) DecodeGroup(pb Message) error {
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
-
- err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
- if collectStats {
- stats.Decode++
- }
-
- return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
- var state errorState
- required, reqFields := prop.reqCount, uint64(0)
-
- var err error
- for err == nil && o.index < len(o.buf) {
- oi := o.index
- var u uint64
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- wire := int(u & 0x7)
- if wire == WireEndGroup {
- if is_group {
- if required > 0 {
- // Not enough information to determine the exact field.
- // (See below.)
- return &RequiredNotSetError{"{Unknown}"}
- }
- return nil // input is satisfied
- }
- return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
- }
- tag := int(u >> 3)
- if tag <= 0 {
- return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
- }
- fieldnum, ok := prop.decoderTags.get(tag)
- if !ok {
- // Maybe it's an extension?
- if prop.extendable {
- if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
- if err = o.skip(st, tag, wire); err == nil {
- extmap := e.extensionsWrite()
- ext := extmap[int32(tag)] // may be missing
- ext.enc = append(ext.enc, o.buf[oi:o.index]...)
- extmap[int32(tag)] = ext
- }
- continue
- }
- }
- // Maybe it's a oneof?
- if prop.oneofUnmarshaler != nil {
- m := structPointer_Interface(base, st).(Message)
- // First return value indicates whether tag is a oneof field.
- ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
- if err == ErrInternalBadWireType {
- // Map the error to something more descriptive.
- // Do the formatting here to save generated code space.
- err = fmt.Errorf("bad wiretype for oneof field in %T", m)
- }
- if ok {
- continue
- }
- }
- err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
- continue
- }
- p := prop.Prop[fieldnum]
-
- if p.dec == nil {
- fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
- continue
- }
- dec := p.dec
- if wire != WireStartGroup && wire != p.WireType {
- if wire == WireBytes && p.packedDec != nil {
- // a packable field
- dec = p.packedDec
- } else {
- err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
- continue
- }
- }
- decErr := dec(o, p, base)
- if decErr != nil && !state.shouldContinue(decErr, p) {
- err = decErr
- }
- if err == nil && p.Required {
- // Successfully decoded a required field.
- if tag <= 64 {
- // use bitmap for fields 1-64 to catch field reuse.
- var mask uint64 = 1 << uint64(tag-1)
- if reqFields&mask == 0 {
- // new required field
- reqFields |= mask
- required--
- }
- } else {
- // This is imprecise. It can be fooled by a required field
- // with a tag > 64 that is encoded twice; that's very rare.
- // A fully correct implementation would require allocating
- // a data structure, which we would like to avoid.
- required--
- }
- }
- }
- if err == nil {
- if is_group {
- return io.ErrUnexpectedEOF
- }
- if state.err != nil {
- return state.err
- }
- if required > 0 {
- // Not enough information to determine the exact field. If we use extra
- // CPU, we could determine the field only if the missing required field
- // has a tag <= 64 and we check reqFields.
- return &RequiredNotSetError{"{Unknown}"}
- }
- }
- return err
-}
-
-// Individual type decoders
-// For each,
-// u is the decoded value,
-// v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
- boolPoolSize = 16
- uint32PoolSize = 8
- uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- if len(o.bools) == 0 {
- o.bools = make([]bool, boolPoolSize)
- }
- o.bools[0] = u != 0
- *structPointer_Bool(base, p.field) = &o.bools[0]
- o.bools = o.bools[1:]
- return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- *structPointer_BoolVal(base, p.field) = u != 0
- return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
- return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
- return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64_Set(structPointer_Word64(base, p.field), o, u)
- return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
- return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_String(base, p.field) = &s
- return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_StringVal(base, p.field) = s
- return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- *structPointer_Bytes(base, p.field) = b
- return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v := structPointer_BoolSlice(base, p.field)
- *v = append(*v, u != 0)
- return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
- v := structPointer_BoolSlice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded bools
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
-
- y := *v
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- y = append(y, u != 0)
- }
-
- *v = y
- return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- structPointer_Word32Slice(base, p.field).Append(uint32(u))
- return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int32s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(uint32(u))
- }
- return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
-
- structPointer_Word64Slice(base, p.field).Append(u)
- return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int64s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(u)
- }
- return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- v := structPointer_StringSlice(base, p.field)
- *v = append(*v, s)
- return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- v := structPointer_BytesSlice(base, p.field)
- *v = append(*v, b)
- return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- oi := o.index // index at the end of this map entry
- o.index -= len(raw) // move buffer back to start of map entry
-
- mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
- if mptr.Elem().IsNil() {
- mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
- }
- v := mptr.Elem() // map[K]V
-
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // See enc_new_map for why.
- keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
- keybase := toStructPointer(keyptr.Addr()) // **K
-
- var valbase structPointer
- var valptr reflect.Value
- switch p.mtype.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valptr = reflect.ValueOf(&dummy) // *[]byte
- valbase = toStructPointer(valptr) // *[]byte
- case reflect.Ptr:
- // message; valptr is **Msg; need to allocate the intermediate pointer
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valptr.Set(reflect.New(valptr.Type().Elem()))
- valbase = toStructPointer(valptr)
- default:
- // everything else
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valbase = toStructPointer(valptr.Addr()) // **V
- }
-
- // Decode.
- // This parses a restricted wire format, namely the encoding of a message
- // with two fields. See enc_new_map for the format.
- for o.index < oi {
- // tagcode for key and value properties are always a single byte
- // because they have tags 1 and 2.
- tagcode := o.buf[o.index]
- o.index++
- switch tagcode {
- case p.mkeyprop.tagcode[0]:
- if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- case p.mvalprop.tagcode[0]:
- if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
- return err
- }
- default:
- // TODO: Should we silently skip this instead?
- return fmt.Errorf("proto: bad map data tag %d", raw[0])
- }
- }
- keyelem, valelem := keyptr.Elem(), valptr.Elem()
- if !keyelem.IsValid() {
- keyelem = reflect.Zero(p.mtype.Key())
- }
- if !valelem.IsValid() {
- valelem = reflect.Zero(p.mtype.Elem())
- }
-
- v.SetMapIndex(keyelem, valelem)
- return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
- return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
- raw, e := o.DecodeRawBytes(false)
- if e != nil {
- return e
- }
-
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := structPointer_Interface(bas, p.stype)
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, false, bas)
- o.buf = obuf
- o.index = oi
-
- return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
- v := reflect.New(p.stype)
- bas := toStructPointer(v)
- structPointer_StructPointerSlice(base, p.field).Append(bas)
-
- if is_group {
- err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
- return err
- }
-
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := v.Interface()
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
- o.buf = obuf
- o.index = oi
-
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
return err
}
diff --git a/src/vendor/github.com/golang/protobuf/proto/deprecated.go b/src/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 000000000..35b882c09
--- /dev/null
+++ b/src/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/src/vendor/github.com/golang/protobuf/proto/discard.go b/src/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 000000000..dea2617ce
--- /dev/null
+++ b/src/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/src/vendor/github.com/golang/protobuf/proto/encode.go b/src/vendor/github.com/golang/protobuf/proto/encode.go
index 8b84d1b22..3abfed2cf 100644
--- a/src/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/src/vendor/github.com/golang/protobuf/proto/encode.go
@@ -37,28 +37,9 @@ package proto
import (
"errors"
- "fmt"
"reflect"
- "sort"
)
-// RequiredNotSetError is the error returned if Marshal is called with
-// a protocol buffer struct whose required fields have not
-// all been initialized. It is also the error returned if Unmarshal is
-// called with an encoded protocol buffer that does not include all the
-// required fields.
-//
-// When printed, RequiredNotSetError reports the first unset required field in a
-// message. If the field cannot be precisely determined, it is reported as
-// "{Unknown}".
-type RequiredNotSetError struct {
- field string
-}
-
-func (e *RequiredNotSetError) Error() string {
- return fmt.Sprintf("proto: required field %q not set", e.field)
-}
-
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
@@ -82,10 +63,6 @@ var (
const maxVarintBytes = 10 // maximum length of a varint
-// maxMarshalSize is the largest allowed size of an encoded protobuf,
-// since C++ and Java use signed int32s for the size.
-const maxMarshalSize = 1<<31 - 1
-
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
@@ -119,18 +96,27 @@ func (p *Buffer) EncodeVarint(x uint64) error {
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
- return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
}
- return n
+ return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
@@ -149,10 +135,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error {
return nil
}
-func sizeFixed64(x uint64) int {
- return 8
-}
-
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
@@ -165,20 +147,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error {
return nil
}
-func sizeFixed32(x uint64) int {
- return 4
-}
-
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
- return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
-}
-
-func sizeZigzag64(x uint64) int {
- return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
@@ -189,10 +163,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error {
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
-func sizeZigzag32(x uint64) int {
- return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@@ -202,11 +172,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error {
return nil
}
-func sizeRawBytes(b []byte) int {
- return sizeVarint(uint64(len(b))) +
- len(b)
-}
-
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
@@ -215,319 +180,17 @@ func (p *Buffer) EncodeStringBytes(s string) error {
return nil
}
-func sizeStringBytes(s string) int {
- return sizeVarint(uint64(len(s))) +
- len(s)
-}
-
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- return m.Marshal()
- }
- p := NewBuffer(nil)
- err := p.Marshal(pb)
- if p.buf == nil && err == nil {
- // Return a non-nil slice on success.
- return []byte{}, nil
- }
- return p.buf, err
-}
-
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- var state errorState
- err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
- }
- return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- data, err := m.Marshal()
- p.buf = append(p.buf, data...)
- return err
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- err = p.enc_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- (stats).Encode++ // Parens are to work around a goimports bug.
- }
-
- if len(p.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
- // Can the object marshal itself? If so, Size is slow.
- // TODO: add Size to Marshaler, or add a Sizer interface.
- if m, ok := pb.(Marshaler); ok {
- b, _ := m.Marshal()
- return len(b)
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return 0
- }
- if err == nil {
- n = size_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- (stats).Size++ // Parens are to work around a goimports bug.
- }
-
- return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := 0
- if *v {
- x = 1
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
- v := *structPointer_BoolVal(base, p.field)
- if !v {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, 1)
- return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
- v := *structPointer_BoolVal(base, p.field)
- if !v && !p.oneof {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := word32_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := word32_Get(v)
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return ErrNil
- }
- x := word64_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return 0
- }
- x := word64_Get(v)
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := *v
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
- v := *structPointer_StringVal(base, p.field)
- if v == "" {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(v)
- return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return 0
- }
- x := *v
- n += len(p.tagcode)
- n += sizeStringBytes(x)
- return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_StringVal(base, p.field)
- if v == "" && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeStringBytes(v)
- return
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
@@ -538,825 +201,3 @@ func isNil(v reflect.Value) bool {
}
return false
}
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
- var state errorState
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return ErrNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- return state.err
- }
-
- o.buf = append(o.buf, p.tagcode...)
- return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return 0
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n0 := len(p.tagcode)
- n1 := sizeRawBytes(data)
- return n0 + n1
- }
-
- n0 := len(p.tagcode)
- n1 := size_struct(p.sprop, structp)
- n2 := sizeVarint(uint64(n1)) // size of encoded length
- return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
- var state errorState
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return ErrNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
- err := o.enc_struct(p.sprop, b)
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return 0
- }
-
- n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
- n += size_struct(p.sprop, b)
- n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- for _, x := range s {
- o.buf = append(o.buf, p.tagcode...)
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
- for _, x := range s {
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- n += len(p.tagcode)
- n += sizeVarint(uint64(l))
- n += l // each bool takes exactly one byte
- return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if s == nil {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if s == nil && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(buf, uint64(x))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- bufSize += p.valSize(uint64(x))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := s.Index(i)
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := s.Index(i)
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, uint64(s.Index(i)))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(uint64(s.Index(i)))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, s.Index(i))
- }
- return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- n += p.valSize(s.Index(i))
- }
- return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, s.Index(i))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(s.Index(i))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return 0
- }
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeRawBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeStringBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return errRepeatedHasNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- continue
- }
-
- o.buf = append(o.buf, p.tagcode...)
- err := o.enc_len_struct(p.sprop, structp, &state)
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
- }
- return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return // return the size up to this point
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n += sizeRawBytes(data)
- continue
- }
-
- n0 := size_struct(p.sprop, structp)
- n1 := sizeVarint(uint64(n0)) // size of encoded length
- n += n0 + n1
- }
- return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return errRepeatedHasNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
- err := o.enc_struct(p.sprop, b)
-
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- }
- return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
- n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return // return size up to this point
- }
-
- n += size_struct(p.sprop, b)
- }
- return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
- exts := structPointer_ExtMap(base, p.field)
- if err := encodeExtensionsMap(*exts); err != nil {
- return err
- }
-
- return o.enc_map_body(*exts)
-}
-
-func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
- exts := structPointer_Extensions(base, p.field)
-
- v, mu := exts.extensionsRead()
- if v == nil {
- return nil
- }
-
- mu.Lock()
- defer mu.Unlock()
- if err := encodeExtensionsMap(v); err != nil {
- return err
- }
-
- return o.enc_map_body(v)
-}
-
-func (o *Buffer) enc_map_body(v map[int32]Extension) error {
- // Fast-path for common cases: zero or one extensions.
- if len(v) <= 1 {
- for _, e := range v {
- o.buf = append(o.buf, e.enc...)
- }
- return nil
- }
-
- // Sort keys to provide a deterministic encoding.
- keys := make([]int, 0, len(v))
- for k := range v {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- o.buf = append(o.buf, v[int32(k)].enc...)
- }
- return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
- v := structPointer_ExtMap(base, p.field)
- return extensionsMapSize(*v)
-}
-
-func size_exts(p *Properties, base structPointer) int {
- v := structPointer_Extensions(base, p.field)
- return extensionsSize(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
- var state errorState // XXX: or do we need to plumb this through?
-
- /*
- A map defined as
- map map_field = N;
- is encoded in the same way as
- message MapFieldEntry {
- key_type key = 1;
- value_type value = 2;
- }
- repeated MapFieldEntry map_field = N;
- */
-
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
- if v.Len() == 0 {
- return nil
- }
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- enc := func() error {
- if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
- return err
- }
- return nil
- }
-
- // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
-
- keycopy.Set(key)
- valcopy.Set(val)
-
- o.buf = append(o.buf, p.tagcode...)
- if err := o.enc_len_thing(enc, &state); err != nil {
- return err
- }
- }
- return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- n := 0
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
- keycopy.Set(key)
- valcopy.Set(val)
-
- // Tag codes for key and val are the responsibility of the sub-sizer.
- keysize := p.mkeyprop.size(p.mkeyprop, keybase)
- valsize := p.mvalprop.size(p.mvalprop, valbase)
- entry := keysize + valsize
- // Add on tag code and length of map entry itself.
- n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
- }
- return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
- keycopy = reflect.New(mapType.Key()).Elem() // addressable K
- keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
- keyptr.Set(keycopy.Addr()) //
- keybase = toStructPointer(keyptr.Addr()) // **K
-
- // Value types are more varied and require special handling.
- switch mapType.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
- valbase = toStructPointer(valcopy.Addr())
- case reflect.Ptr:
- // message; the generated field type is map[K]*Msg (so V is *Msg),
- // so we only need one level of indirection.
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valbase = toStructPointer(valcopy.Addr())
- default:
- // everything else
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
- valptr.Set(valcopy.Addr()) //
- valbase = toStructPointer(valptr.Addr()) // **V
- }
- return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
- var state errorState
- // Encode fields in tag order so that decoders may use optimizations
- // that depend on the ordering.
- // https://developers.google.com/protocol-buffers/docs/encoding#order
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.enc != nil {
- err := p.enc(o, p, base)
- if err != nil {
- if err == ErrNil {
- if p.Required && state.err == nil {
- state.err = &RequiredNotSetError{p.Name}
- }
- } else if err == errRepeatedHasNil {
- // Give more context to nil values in repeated fields.
- return errors.New("repeated field " + p.OrigName + " has nil element")
- } else if !state.shouldContinue(err, p) {
- return err
- }
- }
- if len(o.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- }
- }
-
- // Do oneof fields.
- if prop.oneofMarshaler != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- if err := prop.oneofMarshaler(m, o); err == ErrNil {
- return errOneofHasNil
- } else if err != nil {
- return err
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- if len(o.buf)+len(v) > maxMarshalSize {
- return ErrTooLarge
- }
- if len(v) > 0 {
- o.buf = append(o.buf, v...)
- }
- }
-
- return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.size != nil {
- n += p.size(p, base)
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- n += len(v)
- }
-
- // Factor in any oneof fields.
- if prop.oneofSizer != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- n += prop.oneofSizer(m)
- }
-
- return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
- return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
- iLen := len(o.buf)
- o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
- iMsg := len(o.buf)
- err := enc()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- lMsg := len(o.buf) - iMsg
- lLen := sizeVarint(uint64(lMsg))
- switch x := lLen - (iMsg - iLen); {
- case x > 0: // actual length is x bytes larger than the space we reserved
- // Move msg x bytes right.
- o.buf = append(o.buf, zeroes[:x]...)
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- case x < 0: // actual length is x bytes smaller than the space we reserved
- // Move msg x bytes left.
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- o.buf = o.buf[:len(o.buf)+x] // x is negative
- }
- // Encode the length in the reserved space.
- o.buf = o.buf[:iLen]
- o.EncodeVarint(uint64(lMsg))
- o.buf = o.buf[:len(o.buf)+lMsg]
- return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
- err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
- // Ignore unset required fields.
- reqNotSet, ok := err.(*RequiredNotSetError)
- if !ok {
- return false
- }
- if s.err == nil {
- if prop != nil {
- err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
- }
- s.err = err
- }
- return true
-}
diff --git a/src/vendor/github.com/golang/protobuf/proto/equal.go b/src/vendor/github.com/golang/protobuf/proto/equal.go
index 2ed1cf596..f9b6e41b3 100644
--- a/src/vendor/github.com/golang/protobuf/proto/equal.go
+++ b/src/vendor/github.com/golang/protobuf/proto/equal.go
@@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
// set/unset mismatch
return false
}
- b1, ok := f1.Interface().(raw)
- if ok {
- b2 := f2.Interface().(raw)
- // RawMessage
- if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
- return false
- }
- continue
- }
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
@@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- if !bytes.Equal(u1, u2) {
- return false
- }
-
- return true
+ return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
@@ -259,7 +246,17 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
return false
}
- m1, m2 := e1.value, e2.value
+ m1 := extensionAsLegacyType(e1.value)
+ m2 := extensionAsLegacyType(e2.value)
+
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
if m1 != nil && m2 != nil {
// Both are unencoded.
@@ -276,8 +273,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
desc = m[extNum]
}
if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- continue
+ return false
}
var err error
if m1 == nil {
diff --git a/src/vendor/github.com/golang/protobuf/proto/extensions.go b/src/vendor/github.com/golang/protobuf/proto/extensions.go
index eaad21831..fa88add30 100644
--- a/src/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/src/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -38,6 +38,7 @@ package proto
import (
"errors"
"fmt"
+ "io"
"reflect"
"strconv"
"sync"
@@ -91,14 +92,29 @@ func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, bool) {
- if ep, ok := p.(extendableProto); ok {
- return ep, ok
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
}
- if ep, ok := p.(extendableProtoV1); ok {
- return extensionAdapter{ep}, ok
- }
- return nil, false
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
@@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc
return e.p.extensionMap, &e.p.mu
}
-var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
-var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
-
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
@@ -172,15 +185,31 @@ type Extension struct {
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
- desc *ExtensionDesc
+ desc *ExtensionDesc
+
+ // value is a concrete value for the extension field. Let the type of
+ // desc.ExtensionType be the "API type" and the type of Extension.value
+ // be the "storage type". The API type and storage type are the same except:
+ // * For scalars (except []byte), the API type uses *T,
+ // while the storage type uses T.
+ // * For repeated fields, the API type uses []T, while the storage type
+ // uses *[]T.
+ //
+ // The reason for the divergence is so that the storage type more naturally
+ // matches what is expected of when retrieving the values through the
+ // protobuf reflection APIs.
+ //
+ // The value may only be populated if desc is also populated.
value interface{}
- enc []byte
+
+ // enc is the raw bytes for the extension field.
+ enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
- epb, ok := extendable(base)
- if !ok {
+ epb, err := extendable(base)
+ if err != nil {
return
}
extmap := epb.extensionsWrite()
@@ -205,7 +234,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
@@ -250,85 +279,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop
}
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensions(e *XXX_InternalExtensions) error {
- m, mu := e.extensionsRead()
- if m == nil {
- return nil // fast path
- }
- mu.Lock()
- defer mu.Unlock()
- return encodeExtensionsMap(m)
-}
-
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensionsMap(m map[int32]Extension) error {
- for k, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- p := NewBuffer(nil)
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- if err := props.enc(p, props, toStructPointer(x)); err != nil {
- return err
- }
- e.enc = p.buf
- m[k] = e
- }
- return nil
-}
-
-func extensionsSize(e *XXX_InternalExtensions) (n int) {
- m, mu := e.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
- defer mu.Unlock()
- return extensionsMapSize(m)
-}
-
-func extensionsMapSize(m map[int32]Extension) (n int) {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- n += props.size(props, toStructPointer(x))
- }
- return
-}
-
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
@@ -336,15 +291,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
return false
}
mu.Lock()
- _, ok = extmap[extension.Field]
+ _, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return
}
// TODO: Check types, field numbers, etc.?
@@ -352,16 +307,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) {
delete(extmap, extension.Field)
}
-// GetExtension parses and returns the given extension of pb.
-// If the extension is not present and has no default value it returns ErrMissingExtension.
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
- if err := checkExtensionTypes(epb, extension); err != nil {
- return nil, err
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
}
emap, mu := epb.extensionsRead()
@@ -385,7 +350,12 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
+ }
+
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
}
v, err := decodeExtension(e.enc, extension)
@@ -395,16 +365,21 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
- e.value = v
+ e.value = extensionAsStorageType(v)
e.desc = extension
e.enc = nil
emap[extension.Field] = e
- return e.value, nil
+ return extensionAsLegacyType(e.value), nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
@@ -439,31 +414,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- o := NewBuffer(b)
-
t := reflect.TypeOf(extension.ExtensionType)
-
- props := extensionProperties(extension)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate a "field" to store the pointer/slice itself; the
- // pointer/slice will be stored here. We pass
- // the address of this field to props.dec.
- // This passes a zero field and a *t and lets props.dec
- // interpret it as a *struct{ x t }.
+ // Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
+ var err error
for {
- // Discard wire type and field number varint. It isn't needed.
- if _, err := o.DecodeVarint(); err != nil {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ wire := int(x) & 7
+
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
return nil, err
}
- if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
- return nil, err
- }
-
- if o.index >= len(o.buf) {
+ if len(b) == 0 {
break
}
}
@@ -473,9 +445,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
@@ -494,9 +466,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
@@ -523,16 +495,16 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, ok := extendable(pb)
- if !ok {
- return errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
- return errors.New("proto: bad extension value type")
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
@@ -544,14 +516,14 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
}
extmap := epb.extensionsWrite()
- extmap[extension.Field] = Extension{desc: extension, value: value}
+ extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return
}
m := epb.extensionsWrite()
@@ -585,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) {
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ // Represent primitive types as a pointer to the value.
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Slice:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ }
+ return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+ switch rv := reflect.ValueOf(v); rv.Kind() {
+ case reflect.Ptr:
+ // Represent slice types as the value itself.
+ switch rv.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ if rv.IsNil() {
+ v = reflect.Zero(rv.Type().Elem()).Interface()
+ } else {
+ v = rv.Elem().Interface()
+ }
+ }
+ case reflect.Slice:
+ // Represent slice types as a pointer to the value.
+ if rv.Type().Elem().Kind() != reflect.Uint8 {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ }
+ return v
+}
diff --git a/src/vendor/github.com/golang/protobuf/proto/lib.go b/src/vendor/github.com/golang/protobuf/proto/lib.go
index 1c225504a..fdd328bb7 100644
--- a/src/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/src/vendor/github.com/golang/protobuf/proto/lib.go
@@ -273,6 +273,67 @@ import (
"sync"
)
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+ if e.field == "" {
+ return fmt.Sprintf("proto: required field not set")
+ }
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+ if e.field == "" {
+ return "proto: invalid UTF-8 detected"
+ }
+ return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+ return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+ if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+ return true
+ }
+ if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+ return true
+ }
+ return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+ if err == nil {
+ return true // not an error
+ }
+ if !isNonFatal(err) {
+ return false // fatal error
+ }
+ if nf.E == nil {
+ nf.E = err // store first instance of non-fatal error
+ }
+ return true
+}
+
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
@@ -280,26 +341,6 @@ type Message interface {
ProtoMessage()
}
-// Stats records allocation details about the protocol buffer encoders
-// and decoders. Useful for tuning the library itself.
-type Stats struct {
- Emalloc uint64 // mallocs in encode
- Dmalloc uint64 // mallocs in decode
- Encode uint64 // number of encodes
- Decode uint64 // number of decodes
- Chit uint64 // number of cache hits
- Cmiss uint64 // number of cache misses
- Size uint64 // number of sizes
-}
-
-// Set to true to enable stats collection.
-const collectStats = false
-
-var stats Stats
-
-// GetStats returns a copy of the global Stats structure.
-func GetStats() Stats { return stats }
-
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
@@ -309,16 +350,7 @@ type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
- // pools of basic types to amortize allocation.
- bools []bool
- uint32s []uint32
- uint64s []uint64
-
- // extra pools, only used with pointer_reflect.go
- int32s []int32
- int64s []int64
- float32s []float32
- float64s []float64
+ deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
@@ -343,6 +375,30 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
@@ -831,22 +887,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
return sf, false, nil
}
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-
func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{
- vs: vs,
- // default Less function: textual comparison
- less: func(a, b reflect.Value) bool {
- return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
- },
- }
+ s := mapKeySorter{vs: vs}
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
- // numeric keys are sorted numerically.
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
@@ -855,6 +901,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
@@ -888,10 +940,26 @@ func isProto3Zero(v reflect.Value) bool {
return false
}
-// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion2 = true
+const (
+ // ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion3 = true
-// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
-// to assert that that code is compatible with this version of the proto package.
-const ProtoPackageIsVersion1 = true
+ // ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion2 = true
+
+ // ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+ // to assert that that code is compatible with this version of the proto package.
+ ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/src/vendor/github.com/golang/protobuf/proto/message_set.go b/src/vendor/github.com/golang/protobuf/proto/message_set.go
index fd982decd..f48a75676 100644
--- a/src/vendor/github.com/golang/protobuf/proto/message_set.go
+++ b/src/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -36,12 +36,7 @@ package proto
*/
import (
- "bytes"
- "encoding/json"
"errors"
- "fmt"
- "reflect"
- "sort"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -94,10 +89,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
}
func (ms *messageSet) Has(pb Message) bool {
- if ms.find(pb) != nil {
- return true
- }
- return false
+ return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
@@ -147,50 +139,9 @@ func skipVarint(buf []byte) []byte {
return buf[i+1:]
}
-// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
-// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSet(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- if err := encodeExtensions(exts); err != nil {
- return nil, err
- }
- m, _ = exts.extensionsRead()
- case map[int32]Extension:
- if err := encodeExtensionsMap(exts); err != nil {
- return nil, err
- }
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
-
- // Sort extension IDs to provide a deterministic encoding.
- // See also enc_map in encode.go.
- ids := make([]int, 0, len(m))
- for id := range m {
- ids = append(ids, int(id))
- }
- sort.Ints(ids)
-
- ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
- for _, id := range ids {
- e := m[int32(id)]
- // Remove the wire type and field number varint, as well as the length varint.
- msg := skipVarint(skipVarint(e.enc))
-
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: Int32(int32(id)),
- Message: msg,
- })
- }
- return Marshal(ms)
-}
-
-// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
@@ -228,84 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error {
}
return nil
}
-
-// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
-// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
- switch exts := exts.(type) {
- case *XXX_InternalExtensions:
- m, _ = exts.extensionsRead()
- case map[int32]Extension:
- m = exts
- default:
- return nil, errors.New("proto: not an extension map")
- }
- var b bytes.Buffer
- b.WriteByte('{')
-
- // Process the map in key order for deterministic output.
- ids := make([]int32, 0, len(m))
- for id := range m {
- ids = append(ids, id)
- }
- sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
-
- for i, id := range ids {
- ext := m[id]
- if i > 0 {
- b.WriteByte(',')
- }
-
- msd, ok := messageSetMap[id]
- if !ok {
- // Unknown type; we can't render it, so skip it.
- continue
- }
- fmt.Fprintf(&b, `"[%s]":`, msd.name)
-
- x := ext.value
- if x == nil {
- x = reflect.New(msd.t.Elem()).Interface()
- if err := Unmarshal(ext.enc, x.(Message)); err != nil {
- return nil, err
- }
- }
- d, err := json.Marshal(x)
- if err != nil {
- return nil, err
- }
- b.Write(d)
- }
- b.WriteByte('}')
- return b.Bytes(), nil
-}
-
-// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
-// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
-func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
- // Common-case fast path.
- if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
- return nil
- }
-
- // This is fairly tricky, and it's not clear that it is needed.
- return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
-}
-
-// A global registry of types that can be used in a MessageSet.
-
-var messageSetMap = make(map[int32]messageSetDesc)
-
-type messageSetDesc struct {
- t reflect.Type // pointer to struct
- name string
-}
-
-// RegisterMessageSetType is called from the generated code.
-func RegisterMessageSetType(m Message, fieldNum int32, name string) {
- messageSetMap[fieldNum] = messageSetDesc{
- t: reflect.TypeOf(m),
- name: name,
- }
-}
diff --git a/src/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/src/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
index fb512e2e1..94fa9194a 100644
--- a/src/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/src/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// +build appengine js
+// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@@ -38,32 +38,13 @@
package proto
import (
- "math"
"reflect"
+ "sync"
)
-// A structPointer is a pointer to a struct.
-type structPointer struct {
- v reflect.Value
-}
+const unsafeAllowed = false
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-// The reflect value must itself be a pointer to a struct.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer{v}
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p.v.IsNil()
-}
-
-// Interface returns the struct pointer as an interface value.
-func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
- return p.v.Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
+// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
@@ -76,409 +57,304 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
var invalidField = field(nil)
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
-// field returns the given field in the struct as a reflect value.
-func structPointer_field(p structPointer, f field) reflect.Value {
- // Special case: an extension map entry with a value of type T
- // passes a *T to the struct-handling code with a zero field,
- // expecting that it will be treated as equivalent to *struct{ X T },
- // which has the same memory layout. We have to handle that case
- // specially, because reflect will panic if we call FieldByIndex on a
- // non-struct.
- if f == nil {
- return p.v.Elem()
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ if deref {
+ u = u.Elem()
}
-
- return p.v.Elem().FieldByIndex(f)
+ return pointer{v: u}
}
-// ifield returns the given field in the struct as an interface value.
-func structPointer_ifield(p structPointer, f field) interface{} {
- return structPointer_field(p, f).Addr().Interface()
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
}
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return structPointer_ifield(p, f).(*[]byte)
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return structPointer_ifield(p, f).(*[][]byte)
-}
-
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return structPointer_ifield(p, f).(**bool)
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return structPointer_ifield(p, f).(*bool)
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return structPointer_ifield(p, f).(*[]bool)
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return structPointer_ifield(p, f).(**string)
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return structPointer_ifield(p, f).(*string)
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return structPointer_ifield(p, f).(*[]string)
-}
-
-// Extensions returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return structPointer_ifield(p, f).(*XXX_InternalExtensions)
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return structPointer_ifield(p, f).(*map[int32]Extension)
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return structPointer_field(p, f).Addr()
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- structPointer_field(p, f).Set(q.v)
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return structPointer{structPointer_field(p, f)}
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
- return structPointerSlice{structPointer_field(p, f)}
-}
-
-// A structPointerSlice represents the address of a slice of pointers to structs
-// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
-type structPointerSlice struct {
- v reflect.Value
-}
-
-func (p structPointerSlice) Len() int { return p.v.Len() }
-func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
-func (p structPointerSlice) Append(q structPointer) {
- p.v.Set(reflect.Append(p.v, q.v))
-}
-
-var (
- int32Type = reflect.TypeOf(int32(0))
- uint32Type = reflect.TypeOf(uint32(0))
- float32Type = reflect.TypeOf(float32(0))
- int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
- float64Type = reflect.TypeOf(float64(0))
-)
-
-// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
-// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
-type word32 struct {
- v reflect.Value
-}
-
-// IsNil reports whether p is nil.
-func word32_IsNil(p word32) bool {
+func (p pointer) isNil() bool {
return p.v.IsNil()
}
-// Set sets p to point at a newly allocated word with bits set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- t := p.v.Type().Elem()
- switch t {
- case int32Type:
- if len(o.int32s) == 0 {
- o.int32s = make([]int32, uint32PoolSize)
- }
- o.int32s[0] = int32(x)
- p.v.Set(reflect.ValueOf(&o.int32s[0]))
- o.int32s = o.int32s[1:]
- return
- case uint32Type:
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint32s[0]))
- o.uint32s = o.uint32s[1:]
- return
- case float32Type:
- if len(o.float32s) == 0 {
- o.float32s = make([]float32, uint32PoolSize)
- }
- o.float32s[0] = math.Float32frombits(x)
- p.v.Set(reflect.ValueOf(&o.float32s[0]))
- o.float32s = o.float32s[1:]
- return
- }
-
- // must be enum
- p.v.Set(reflect.New(t))
- p.v.Elem().SetInt(int64(int32(x)))
-}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32_Get(p word32) uint32 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32{structPointer_field(p, f)}
-}
-
-// A word32Val represents a field of type int32, uint32, float32, or enum.
-// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
-type word32Val struct {
- v reflect.Value
-}
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- switch p.v.Type() {
- case int32Type:
- p.v.SetInt(int64(x))
- return
- case uint32Type:
- p.v.SetUint(uint64(x))
- return
- case float32Type:
- p.v.SetFloat(float64(math.Float32frombits(x)))
- return
- }
-
- // must be enum
- p.v.SetInt(int64(int32(x)))
-}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32Val_Get(p word32Val) uint32 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
-}
-
-// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val{structPointer_field(p, f)}
-}
-
-// A word32Slice is a slice of 32-bit values.
-// That is, v.Type() is []int32, []uint32, []float32, or []enum.
-type word32Slice struct {
- v reflect.Value
-}
-
-func (p word32Slice) Append(x uint32) {
- n, m := p.v.Len(), p.v.Cap()
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
if n < m {
- p.v.SetLen(n + 1)
+ s.SetLen(n + 1)
} else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
}
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int32:
- elem.SetInt(int64(int32(x)))
- case reflect.Uint32:
- elem.SetUint(uint64(x))
- case reflect.Float32:
- elem.SetFloat(float64(math.Float32frombits(x)))
+ return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
+}
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
}
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
}
-func (p word32Slice) Len() int {
- return p.v.Len()
-}
-
-func (p word32Slice) Index(i int) uint32 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
}
- panic("unreachable")
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
}
-// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) word32Slice {
- return word32Slice{structPointer_field(p, f)}
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 struct {
- v reflect.Value
-}
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- t := p.v.Type().Elem()
- switch t {
- case int64Type:
- if len(o.int64s) == 0 {
- o.int64s = make([]int64, uint64PoolSize)
- }
- o.int64s[0] = int64(x)
- p.v.Set(reflect.ValueOf(&o.int64s[0]))
- o.int64s = o.int64s[1:]
- return
- case uint64Type:
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint64s[0]))
- o.uint64s = o.uint64s[1:]
- return
- case float64Type:
- if len(o.float64s) == 0 {
- o.float64s = make([]float64, uint64PoolSize)
- }
- o.float64s[0] = math.Float64frombits(x)
- p.v.Set(reflect.ValueOf(&o.float64s[0]))
- o.float64s = o.float64s[1:]
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
return
}
- panic("unreachable")
-}
-
-func word64_IsNil(p word64) bool {
- return p.v.IsNil()
-}
-
-func word64_Get(p word64) uint64 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
}
- panic("unreachable")
+ p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
}
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64{structPointer_field(p, f)}
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
}
-// word64Val is like word32Val but for 64-bit values.
-type word64Val struct {
- v reflect.Value
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
}
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- switch p.v.Type() {
- case int64Type:
- p.v.SetInt(int64(x))
- return
- case uint64Type:
- p.v.SetUint(x)
- return
- case float64Type:
- p.v.SetFloat(math.Float64frombits(x))
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
- panic("unreachable")
-}
-
-func word64Val_Get(p word64Val) uint64 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
}
- panic("unreachable")
+ p.v.Elem().Set(s)
}
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val{structPointer_field(p, f)}
-}
-
-type word64Slice struct {
- v reflect.Value
-}
-
-func (p word64Slice) Append(x uint64) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int64:
- elem.SetInt(int64(int64(x)))
- case reflect.Uint64:
- elem.SetUint(uint64(x))
- case reflect.Float64:
- elem.SetFloat(float64(math.Float64frombits(x)))
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
}
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
-func (p word64Slice) Len() int {
- return p.v.Len()
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
}
-func (p word64Slice) Index(i int) uint64 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return uint64(elem.Uint())
- case reflect.Float64:
- return math.Float64bits(float64(elem.Float()))
- }
- panic("unreachable")
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
-func structPointer_Word64Slice(p structPointer, f field) word64Slice {
- return word64Slice{structPointer_field(p, f)}
-}
+var atomicLock sync.Mutex
diff --git a/src/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/src/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
index 6b5567d47..dbfffe071 100644
--- a/src/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/src/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// +build !appengine,!js
+// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
@@ -37,38 +37,13 @@ package proto
import (
"reflect"
+ "sync/atomic"
"unsafe"
)
-// NOTE: These type_Foo functions would more idiomatically be methods,
-// but Go does not allow methods on pointer types, and we must preserve
-// some pointer type for the garbage collector. We use these
-// funcs with clunky names as our poor approximation to methods.
-//
-// An alternative would be
-// type structPointer struct { p unsafe.Pointer }
-// but that does not registerize as well.
+const unsafeAllowed = true
-// A structPointer is a pointer to a struct.
-type structPointer unsafe.Pointer
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer(unsafe.Pointer(v.Pointer()))
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p == nil
-}
-
-// Interface returns the struct pointer, assumed to have element type t,
-// as an interface value.
-func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
- return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
+// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
@@ -80,191 +55,259 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
- return f != ^field(0)
+ return f != invalidField
}
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
}
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
-}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
-}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
- return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
-type structPointerSlice []structPointer
-
-func (v *structPointerSlice) Len() int { return len(*v) }
-func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
-func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
-
-// A word32 is the address of a "pointer to 32-bit value" field.
-type word32 **uint32
-
-// IsNil reports whether *v is nil.
-func word32_IsNil(p word32) bool {
- return *p == nil
-}
-
-// Set sets *v to point at a newly allocated word set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ } else {
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
- o.uint32s[0] = x
- *p = &o.uint32s[0]
- o.uint32s = o.uint32s[1:]
-}
-
-// Get gets the value pointed at by *v.
-func word32_Get(p word32) uint32 {
- return **p
-}
-
-// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Val is the address of a 32-bit value field.
-type word32Val *uint32
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- *p = x
-}
-
-// Get gets the value pointed at by p.
-func word32Val_Get(p word32Val) uint32 {
- return *p
-}
-
-// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
-}
-
-// A word32Slice is a slice of 32-bit values.
-type word32Slice []uint32
-
-func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
-func (v *word32Slice) Len() int { return len(*v) }
-func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
-
-// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
- return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
-
-// word64 is like word32 but for 64-bit values.
-type word64 **uint64
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
+ if deref {
+ p.p = *(*unsafe.Pointer)(p.p)
}
- o.uint64s[0] = x
- *p = &o.uint64s[0]
- o.uint64s = o.uint64s[1:]
+ return p
}
-func word64_IsNil(p word64) bool {
- return *p == nil
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
}
-func word64_Get(p word64) uint64 {
- return **p
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+func (p pointer) isNil() bool {
+ return p.p == nil
}
-// word64Val is like word32Val but for 64-bit values.
-type word64Val *uint64
-
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- *p = x
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
}
-func word64Val_Get(p word64Val) uint64 {
- return *p
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
}
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
}
-// word64Slice is like word32Slice but for 64-bit values.
-type word64Slice []uint64
-
-func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
-func (v *word64Slice) Len() int { return len(*v) }
-func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
-
-func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
- return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
diff --git a/src/vendor/github.com/golang/protobuf/proto/properties.go b/src/vendor/github.com/golang/protobuf/proto/properties.go
index ec2289c00..79668ff5c 100644
--- a/src/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/src/vendor/github.com/golang/protobuf/proto/properties.go
@@ -58,42 +58,6 @@ const (
WireFixed32 = 5
)
-const startSize = 10 // initial slice/string sizes
-
-// Encoders are defined in encode.go
-// An encoder outputs the full representation of a field, including its
-// tag and encoder type.
-type encoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueEncoder encodes a single integer in a particular encoding.
-type valueEncoder func(o *Buffer, x uint64) error
-
-// Sizers are defined in encode.go
-// A sizer returns the encoded size of a field, including its tag and encoder
-// type.
-type sizer func(prop *Properties, base structPointer) int
-
-// A valueSizer returns the encoded size of a single integer in a particular
-// encoding.
-type valueSizer func(x uint64) int
-
-// Decoders are defined in decode.go
-// A decoder creates a value from its wire representation.
-// Unrecognized subelements are saved in unrec.
-type decoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueDecoder decodes a single integer in a particular encoding.
-type valueDecoder func(o *Buffer) (x uint64, err error)
-
-// A oneofMarshaler does the marshaling for all oneof fields in a message.
-type oneofMarshaler func(Message, *Buffer) error
-
-// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
-type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
-
-// A oneofSizer does the sizing for all oneof fields in a message.
-type oneofSizer func(Message) int
-
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@@ -140,13 +104,6 @@ type StructProperties struct {
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
- unrecField field // field id of the XXX_unrecognized []byte field
- extendable bool // is this an extendable proto
-
- oneofMarshaler oneofMarshaler
- oneofUnmarshaler oneofUnmarshaler
- oneofSizer oneofSizer
- stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
@@ -182,41 +139,24 @@ type Properties struct {
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
- proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ proto3 bool // whether this is known to be a proto3 field
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
- def_uint64 uint64
- enc encoder
- valEnc valueEncoder // set for bool and numeric types only
- field field
- tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
- tagbuf [8]byte
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
- isMarshaler bool
- isUnmarshaler bool
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
- mtype reflect.Type // set for map types only
- mkeyprop *Properties // set for map types only
- mvalprop *Properties // set for map types only
-
- size sizer
- valSize valueSizer // set for bool and numeric types only
-
- dec decoder
- valDec valueDecoder // set for bool and numeric types only
-
- // If this is a packable field, this will be the decoder for the packed version of the field.
- packedDec decoder
+ mtype reflect.Type // set for map types only
+ MapKeyProp *Properties // set for map types only
+ MapValProp *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
- s = ","
+ s += ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
@@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) {
switch p.Wire {
case "varint":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeVarint
- p.valDec = (*Buffer).DecodeVarint
- p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
- p.valEnc = (*Buffer).EncodeFixed32
- p.valDec = (*Buffer).DecodeFixed32
- p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
- p.valEnc = (*Buffer).EncodeFixed64
- p.valDec = (*Buffer).DecodeFixed64
- p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag32
- p.valDec = (*Buffer).DecodeZigzag32
- p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag64
- p.valDec = (*Buffer).DecodeZigzag64
- p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
@@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) {
return
}
+outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
@@ -326,256 +252,41 @@ func (p *Properties) Parse(s string) {
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
- break
+ break outer
}
}
}
}
-func logNoSliceEnc(t1, t2 reflect.Type) {
- fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
-}
-
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-// Initialize the fields for encoding and decoding.
-func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- p.enc = nil
- p.dec = nil
- p.size = nil
-
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
switch t1 := typ; t1.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
-
- // proto3 scalar types
-
- case reflect.Bool:
- p.enc = (*Buffer).enc_proto3_bool
- p.dec = (*Buffer).dec_proto3_bool
- p.size = size_proto3_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_proto3_int32
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_proto3_uint32
- p.dec = (*Buffer).dec_proto3_int32 // can reuse
- p.size = size_proto3_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_proto3_int64
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.String:
- p.enc = (*Buffer).enc_proto3_string
- p.dec = (*Buffer).dec_proto3_string
- p.size = size_proto3_string
-
case reflect.Ptr:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
- break
- case reflect.Bool:
- p.enc = (*Buffer).enc_bool
- p.dec = (*Buffer).dec_bool
- p.size = size_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_int32
- p.dec = (*Buffer).dec_int32
- p.size = size_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_uint32
- p.dec = (*Buffer).dec_int32 // can reuse
- p.size = size_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_int64
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_int32
- p.size = size_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.String:
- p.enc = (*Buffer).enc_string
- p.dec = (*Buffer).dec_string
- p.size = size_string
- case reflect.Struct:
+ if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
- p.isMarshaler = isMarshaler(t1)
- p.isUnmarshaler = isUnmarshaler(t1)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_struct_message
- p.dec = (*Buffer).dec_struct_message
- p.size = size_struct_message
- } else {
- p.enc = (*Buffer).enc_struct_group
- p.dec = (*Buffer).dec_struct_group
- p.size = size_struct_group
- }
}
case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- logNoSliceEnc(t1, t2)
- break
- case reflect.Bool:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_bool
- p.size = size_slice_packed_bool
- } else {
- p.enc = (*Buffer).enc_slice_bool
- p.size = size_slice_bool
- }
- p.dec = (*Buffer).dec_slice_bool
- p.packedDec = (*Buffer).dec_slice_packed_bool
- case reflect.Int32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int32
- p.size = size_slice_packed_int32
- } else {
- p.enc = (*Buffer).enc_slice_int32
- p.size = size_slice_int32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Uint32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Int64, reflect.Uint64:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- case reflect.Uint8:
- p.dec = (*Buffer).dec_slice_byte
- if p.proto3 {
- p.enc = (*Buffer).enc_proto3_slice_byte
- p.size = size_proto3_slice_byte
- } else {
- p.enc = (*Buffer).enc_slice_byte
- p.size = size_slice_byte
- }
- case reflect.Float32, reflect.Float64:
- switch t2.Bits() {
- case 32:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case 64:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- default:
- logNoSliceEnc(t1, t2)
- break
- }
- case reflect.String:
- p.enc = (*Buffer).enc_slice_string
- p.dec = (*Buffer).dec_slice_string
- p.size = size_slice_string
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
- break
- case reflect.Struct:
- p.stype = t2.Elem()
- p.isMarshaler = isMarshaler(t2)
- p.isUnmarshaler = isUnmarshaler(t2)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_slice_struct_message
- p.dec = (*Buffer).dec_slice_struct_message
- p.size = size_slice_struct_message
- } else {
- p.enc = (*Buffer).enc_slice_struct_group
- p.dec = (*Buffer).dec_slice_struct_group
- p.size = size_slice_struct_group
- }
- }
- case reflect.Slice:
- switch t2.Elem().Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
- break
- case reflect.Uint8:
- p.enc = (*Buffer).enc_slice_slice_byte
- p.dec = (*Buffer).dec_slice_slice_byte
- p.size = size_slice_slice_byte
- }
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
}
case reflect.Map:
- p.enc = (*Buffer).enc_new_map
- p.dec = (*Buffer).dec_new_map
- p.size = size_new_map
-
p.mtype = t1
- p.mkeyprop = &Properties{}
- p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
- p.mvalprop = &Properties{}
+ p.MapKeyProp = &Properties{}
+ p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.MapValProp = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
- p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
- // precalculate tag code
- wire := p.WireType
- if p.Packed {
- wire = WireBytes
- }
- x := uint32(p.Tag)<<3 | uint32(wire)
- i := 0
- for i = 0; x > 127; i++ {
- p.tagbuf[i] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- p.tagbuf[i] = uint8(x)
- p.tagcode = p.tagbuf[0 : i+1]
-
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
@@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
}
var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
- unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
-// isMarshaler reports whether type t implements Marshaler.
-func isMarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isMarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isMarshaler")
- }
- return t.Implements(marshalerType)
-}
-
-// isUnmarshaler reports whether type t implements Unmarshaler.
-func isUnmarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isUnmarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isUnmarshaler")
- }
- return t.Implements(unmarshalerType)
-}
-
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
@@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
- if f != nil {
- p.field = toField(f)
- }
if tag == "" {
return
}
p.Parse(tag)
- p.setEncAndDec(typ, f, lockGetProp)
+ p.setFieldProps(typ, f, lockGetProp)
}
var (
@@ -649,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties {
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
- if collectStats {
- stats.Chit++
- }
return sprop
}
@@ -661,26 +343,26 @@ func GetProperties(t reflect.Type) *StructProperties {
return sprop
}
+type (
+ oneofFuncsIface interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ oneofWrappersIface interface {
+ XXX_OneofWrappers() []interface{}
+ }
+)
+
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
- if collectStats {
- stats.Chit++
- }
return prop
}
- if collectStats {
- stats.Cmiss++
- }
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
- prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
- reflect.PtrTo(t).Implements(extendableProtoV1Type)
- prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
@@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
- if f.Name == "XXX_InternalExtensions" { // special case
- p.enc = (*Buffer).enc_exts
- p.dec = nil // not needed
- p.size = size_exts
- } else if f.Name == "XXX_extensions" { // special case
- p.enc = (*Buffer).enc_map
- p.dec = nil // not needed
- p.size = size_map
- } else if f.Name == "XXX_unrecognized" { // special case
- prop.unrecField = toField(&f)
- }
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
@@ -715,22 +386,19 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
- if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
- fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
- }
}
// Re-order prop.order.
sort.Sort(prop)
- type oneofMessage interface {
- XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ var oots []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oots = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oots = m.XXX_OneofWrappers()
}
- if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
- var oots []interface{}
- prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
- prop.stype = t
-
+ if len(oots) > 0 {
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
@@ -779,30 +447,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
return prop
}
-// Return the Properties object for the x[0]'th field of the structure.
-func propByIndex(t reflect.Type, x []int) *Properties {
- if len(x) != 1 {
- fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
- return nil
- }
- prop := GetProperties(t)
- return prop.Prop[x[0]]
-}
-
-// Get the address and type of a pointer to a struct from an interface.
-func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
- if pb == nil {
- err = ErrNil
- return
- }
- // get the reflect type of the pointer to the struct.
- t = reflect.TypeOf(pb)
- // get the address of the struct.
- value := reflect.ValueOf(pb)
- b = toStructPointer(value)
- return
-}
-
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
@@ -826,20 +470,42 @@ func EnumValueMap(enumType string) map[string]int32 {
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
- protoTypes = make(map[string]reflect.Type)
- revProtoTypes = make(map[reflect.Type]string)
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
- if _, ok := protoTypes[name]; ok {
+ if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
- protoTypes[name] = t
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
revProtoTypes[t] = name
}
@@ -855,7 +521,14 @@ func MessageName(x Message) string {
}
// MessageType returns the message type (pointer to struct) for a named message.
-func MessageType(name string) reflect.Type { return protoTypes[name] }
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
// A registry of all linked proto files.
var (
diff --git a/src/vendor/github.com/golang/protobuf/proto/table_marshal.go b/src/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 000000000..5cb11fa95
--- /dev/null
+++ b/src/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+ deref bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errLater error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name}
+ }
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errLater == nil {
+ errLater = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+ t = t.Elem()
+ }
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ var deref bool
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ t = reflect.PtrTo(t)
+ deref = true
+ }
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ deref: deref,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ validateUTF8 := true
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return sizeStringPtr, appendUTF8StringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendUTF8StringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendUTF8StringValueNoZero
+ }
+ return sizeStringValue, appendUTF8StringValue
+ }
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ var invalidUTF8 bool
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ invalidUTF8 = true
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ if invalidUTF8 {
+ return b, errInvalidUTF8
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err error
+ var nerr nonFatal
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if !nerr.Merge(err) {
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+
+ // If value is a message with nested maps, calling
+ // valSizer in marshal may be quadratic. We should use
+ // cached version in marshal (but not in size).
+ // If value is not message type, we don't have size cache,
+ // but it cannot be nested either. Just use valSizer.
+ valCachedSizer := valSizer
+ if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+ u := getMarshalInfo(valType.Elem())
+ valCachedSizer = func(ptr pointer, tagsize int) int {
+ // Same as message sizer, but use cache.
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.cachedsize(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ }
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+
+ var nerr nonFatal
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+ var nerr nonFatal
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nerr.E
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ var nerr nonFatal
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr, ei.deref)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if !nerr.Merge(err) {
+ return b, err
+ }
+ }
+ return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/src/vendor/github.com/golang/protobuf/proto/table_merge.go b/src/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 000000000..5525def6a
--- /dev/null
+++ b/src/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/src/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/src/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 000000000..acee2fc52
--- /dev/null
+++ b/src/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1< 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ if errLater == nil {
+ errLater = r
+ }
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ if err == errInvalidUTF8 {
+ if errLater == nil {
+ fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+ errLater = &invalidUTF8Error{fullName}
+ }
+ continue
+ }
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if reqMask != u.reqMask && errLater == nil {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ errLater = &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+ }
+
+ // Find any types associated with oneof fields.
+ var oneofImplementers []interface{}
+ switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+ case oneofFuncsIface:
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ case oneofWrappersIface:
+ oneofImplementers = m.XXX_OneofWrappers()
+ }
+ for _, v := range oneofImplementers {
+ tptr := reflect.TypeOf(v) // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ fieldNum, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tags[1])
+ }
+ var name string
+ for _, tag := range tags {
+ if strings.HasPrefix(tag, "name=") {
+ name = strings.TrimPrefix(tag, "name=")
+ break
+ }
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(fieldNum, of.field, unmarshal, 0, name)
+ }
+ }
+
+ }
+
+ // Get extension ranges, if any.
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0, "")
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ proto3 := false
+ validateUTF8 := true
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ if tag == "proto3" {
+ proto3 = true
+ }
+ }
+ validateUTF8 = validateUTF8 && proto3
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if validateUTF8 {
+ if pointer {
+ return unmarshalUTF8StringPtr
+ }
+ if slice {
+ return unmarshalUTF8StringSlice
+ }
+ return unmarshalUTF8StringValue
+ }
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toString() = v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ *f.toStringPtr() = &v
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ if !utf8.ValidString(v) {
+ return b[x:], errInvalidUTF8
+ }
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ var nerr nonFatal
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if nerr.Merge(err) {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nerr.E
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ var nerr nonFatal
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if !nerr.Merge(err) {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nerr.E
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) == 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/src/vendor/github.com/golang/protobuf/proto/text.go b/src/vendor/github.com/golang/protobuf/proto/text.go
index 965876bf0..1aaee725b 100644
--- a/src/vendor/github.com/golang/protobuf/proto/text.go
+++ b/src/vendor/github.com/golang/protobuf/proto/text.go
@@ -50,7 +50,6 @@ import (
var (
newline = []byte("\n")
spaces = []byte(" ")
- gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
@@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error {
return nil
}
-// raw is the interface satisfied by RawMessage.
-type raw interface {
- Bytes() []byte
-}
-
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
@@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
props := sprops.Prop[i]
name := st.Field(i).Name
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
@@ -355,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@@ -372,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ if err := tm.writeAny(w, val, props.MapValProp); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
@@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if b, ok := fv.Interface().(raw); ok {
- if err := writeRaw(w, b.Bytes()); err != nil {
- return err
- }
- continue
- }
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
@@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
// Extensions (the XXX_extensions field).
pv := sv.Addr()
- if _, ok := extendable(pv.Interface()); ok {
+ if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
@@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil
}
-// writeRaw writes an uninterpreted raw message.
-func writeRaw(w *textWriter, b []byte) error {
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if err := writeUnknownStruct(w, b); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- return nil
-}
-
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
@@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
}
w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
@@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if _, err = w.Write(text); err != nil {
return err
}
- } else if err := tm.writeStruct(w, v); err != nil {
- return err
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
diff --git a/src/vendor/github.com/golang/protobuf/proto/text_parser.go b/src/vendor/github.com/golang/protobuf/proto/text_parser.go
index 5e14513f2..bb55a3af2 100644
--- a/src/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/src/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -206,7 +206,6 @@ func (p *textParser) advance() {
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
- errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
@@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
- base := 8
- ss := s[:2]
+ ss := string(r) + s[:2]
s = s[2:]
- if r == 'x' || r == 'X' {
- base = 16
- } else {
- ss = string(r) + ss
- }
- i, err := strconv.ParseUint(ss, base, 8)
+ i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
- return "", "", err
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
- case 'u', 'U':
- n := 4
- if r == 'U' {
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
n = 8
}
if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
- }
-
- bs := make([]byte, n/2)
- for i := 0; i < n; i += 2 {
- a, ok1 := unhex(s[i])
- b, ok2 := unhex(s[i+1])
- if !ok1 || !ok2 {
- return "", "", errBadHex
- }
- bs[i/2] = a<<4 | b
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
+ ss := s[:n]
s = s[n:]
- return string(bs), s, nil
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
-// Adapted from src/pkg/strconv/quote.go.
-func unhex(b byte) (v byte, ok bool) {
- switch {
- case '0' <= b && b <= '9':
- return b - '0', true
- case 'a' <= b && b <= 'f':
- return b - 'a' + 10, true
- case 'A' <= b && b <= 'F':
- return b - 'A' + 10, true
- }
- return 0, false
-}
-
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
@@ -644,17 +630,17 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
if err := p.consumeToken(":"); err != nil {
return err
}
- if err := p.readAny(key, props.mkeyprop); err != nil {
+ if err := p.readAny(key, props.MapKeyProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
- if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
return err
}
- if err := p.readAny(val, props.mvalprop); err != nil {
+ if err := p.readAny(val, props.MapValProp); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
@@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) {
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
}
return strings.Join(parts, ""), nil
}
@@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(x)
+ fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
@@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
- err := um.UnmarshalText([]byte(s))
- return err
+ return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
- if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
- return pe
- }
- return nil
+ return newTextParser(s).readStruct(v.Elem(), "")
}
diff --git a/src/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/src/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index f34601723..78ee52334 100644
--- a/src/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/src/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,20 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
-/*
-Package any is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/any.proto
-
-It has these top-level messages:
- Any
-*/
package any
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -25,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
@@ -108,17 +101,18 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// }
//
type Any struct {
- // A URL/resource name whose content describes the type of the
- // serialized protocol buffer message.
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
//
- // For URLs which use the scheme `http`, `https`, or no scheme, the
- // following restrictions and interpretations apply:
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
- // * The last segment of the URL's path must represent the fully
- // qualified name of the type (as in `path/google.protobuf.Duration`).
- // The name should be in a canonical form (e.g., leading "." is
- // not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@@ -127,19 +121,47 @@ type Any struct {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
- TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Any) Reset() { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage() {}
-func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Any) XXX_WellKnownType() string { return "Any" }
+func (m *Any) Reset() { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage() {}
+func (*Any) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b53526c13ae22eb4, []int{0}
+}
+
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (m *Any) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Any.Merge(m, src)
+}
+func (m *Any) XXX_Size() int {
+ return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+ xxx_messageInfo_Any.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Any proto.InternalMessageInfo
func (m *Any) GetTypeUrl() string {
if m != nil {
@@ -159,9 +181,9 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
-var fileDescriptor0 = []byte{
+var fileDescriptor_b53526c13ae22eb4 = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
diff --git a/src/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/src/vendor/github.com/golang/protobuf/ptypes/any/any.proto
index c74866762..493294255 100644
--- a/src/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ b/src/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -120,17 +120,18 @@ option objc_class_prefix = "GPB";
// }
//
message Any {
- // A URL/resource name whose content describes the type of the
- // serialized protocol buffer message.
+ // A URL/resource name that uniquely identifies the type of the serialized
+ // protocol buffer message. The last segment of the URL's path must represent
+ // the fully qualified name of the type (as in
+ // `path/google.protobuf.Duration`). The name should be in a canonical form
+ // (e.g., leading "." is not accepted).
//
- // For URLs which use the scheme `http`, `https`, or no scheme, the
- // following restrictions and interpretations apply:
+ // In practice, teams usually precompile into the binary all types that they
+ // expect it to use in the context of Any. However, for URLs which use the
+ // scheme `http`, `https`, or no scheme, one can optionally set up a type
+ // server that maps type URLs to message definitions as follows:
//
// * If no scheme is provided, `https` is assumed.
- // * The last segment of the URL's path must represent the fully
- // qualified name of the type (as in `path/google.protobuf.Duration`).
- // The name should be in a canonical form (e.g., leading "." is
- // not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
@@ -139,6 +140,10 @@ message Any {
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
+ // Note: this functionality is not currently available in the official
+ // protobuf release, and it is not used for type URLs beginning with
+ // type.googleapis.com.
+ //
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
diff --git a/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index e23e4a25d..31cd846de 100644
--- a/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,20 +1,13 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
-/*
-Package timestamp is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/timestamp.proto
-
-It has these top-level messages:
- Timestamp
-*/
package timestamp
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
@@ -25,7 +18,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
@@ -90,7 +83,9 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required, though only UTC (as indicated by "Z") is presently supported.
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@@ -101,27 +96,51 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
-// to obtain a formatter capable of generating timestamps in this format.
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
- Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+ return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+ xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
@@ -141,9 +160,9 @@ func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
-var fileDescriptor0 = []byte{
+var fileDescriptor_292007bbfe81227e = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
diff --git a/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
index b7cbd1750..eafb3fa03 100644
--- a/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ b/src/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -103,7 +103,9 @@ option objc_class_prefix = "GPB";
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
-// is required, though only UTC (as indicated by "Z") is presently supported.
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
@@ -114,8 +116,8 @@ option objc_class_prefix = "GPB";
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
-// to obtain a formatter capable of generating timestamps in this format.
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
diff --git a/src/vendor/github.com/gorilla/context/.travis.yml b/src/vendor/github.com/gorilla/context/.travis.yml
index faca4dad3..6f440f1e4 100644
--- a/src/vendor/github.com/gorilla/context/.travis.yml
+++ b/src/vendor/github.com/gorilla/context/.travis.yml
@@ -7,13 +7,13 @@ matrix:
- go: 1.4
- go: 1.5
- go: 1.6
+ - go: 1.7
+ - go: tip
+ allow_failures:
- go: tip
-
-install:
- - go get golang.org/x/tools/cmd/vet
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- - go tool vet .
+ - go vet $(go list ./... | grep -v /vendor/)
- go test -v -race ./...
diff --git a/src/vendor/github.com/gorilla/context/README.md b/src/vendor/github.com/gorilla/context/README.md
index c60a31b05..08f86693b 100644
--- a/src/vendor/github.com/gorilla/context/README.md
+++ b/src/vendor/github.com/gorilla/context/README.md
@@ -4,4 +4,7 @@ context
gorilla/context is a general purpose registry for global request variables.
+> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
+> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
+
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/src/vendor/github.com/gorilla/context/doc.go b/src/vendor/github.com/gorilla/context/doc.go
index 73c740031..448d1bfca 100644
--- a/src/vendor/github.com/gorilla/context/doc.go
+++ b/src/vendor/github.com/gorilla/context/doc.go
@@ -5,6 +5,12 @@
/*
Package context stores values shared during a request lifetime.
+Note: gorilla/context, having been born well before `context.Context` existed,
+does not play well > with the shallow copying of the request that
+[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
+(added to net/http Go 1.7 onwards) performs. You should either use *just*
+gorilla/context, or moving forward, the new `http.Request.Context()`.
+
For example, a router can set variables extracted from the URL and later
application handlers can access those values, or it can be used to store
sessions values to be saved at the end of a request. There are several
diff --git a/src/vendor/github.com/gorilla/mux/.travis.yml b/src/vendor/github.com/gorilla/mux/.travis.yml
index 3302233f3..ad0935dbd 100644
--- a/src/vendor/github.com/gorilla/mux/.travis.yml
+++ b/src/vendor/github.com/gorilla/mux/.travis.yml
@@ -3,11 +3,12 @@ sudo: false
matrix:
include:
- - go: 1.5
- - go: 1.6
- - go: 1.7
- - go: 1.8
- - go: 1.9
+ - go: 1.5.x
+ - go: 1.6.x
+ - go: 1.7.x
+ - go: 1.8.x
+ - go: 1.9.x
+ - go: 1.10.x
- go: tip
allow_failures:
- go: tip
diff --git a/src/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md b/src/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..232be82e4
--- /dev/null
+++ b/src/vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
@@ -0,0 +1,11 @@
+**What version of Go are you running?** (Paste the output of `go version`)
+
+
+**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
+
+
+**Describe your problem** (and what you have tried so far)
+
+
+**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
+
diff --git a/src/vendor/github.com/gorilla/mux/README.md b/src/vendor/github.com/gorilla/mux/README.md
index 67a79e00a..e424397ac 100644
--- a/src/vendor/github.com/gorilla/mux/README.md
+++ b/src/vendor/github.com/gorilla/mux/README.md
@@ -1,5 +1,5 @@
-gorilla/mux
-===
+# gorilla/mux
+
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
@@ -27,6 +27,9 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
* [Walking Routes](#walking-routes)
+* [Graceful Shutdown](#graceful-shutdown)
+* [Middleware](#middleware)
+* [Testing Handlers](#testing-handlers)
* [Full Example](#full-example)
---
@@ -45,11 +48,11 @@ Let's start registering a couple of URL paths and handlers:
```go
func main() {
- r := mux.NewRouter()
- r.HandleFunc("/", HomeHandler)
- r.HandleFunc("/products", ProductsHandler)
- r.HandleFunc("/articles", ArticlesHandler)
- http.Handle("/", r)
+ r := mux.NewRouter()
+ r.HandleFunc("/", HomeHandler)
+ r.HandleFunc("/products", ProductsHandler)
+ r.HandleFunc("/articles", ArticlesHandler)
+ http.Handle("/", r)
}
```
@@ -68,9 +71,9 @@ The names are used to create a map of route variables which can be retrieved cal
```go
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
- vars := mux.Vars(r)
- w.WriteHeader(http.StatusOK)
- fmt.Fprintf(w, "Category: %v\n", vars["category"])
+ vars := mux.Vars(r)
+ w.WriteHeader(http.StatusOK)
+ fmt.Fprintf(w, "Category: %v\n", vars["category"])
}
```
@@ -122,7 +125,7 @@ r.Queries("key", "value")
```go
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
- return r.ProtoMajor == 0
+ return r.ProtoMajor == 0
})
```
@@ -176,91 +179,34 @@ s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
-### Listing Routes
-Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
-
-```go
-package main
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "github.com/gorilla/mux"
-)
-
-func handler(w http.ResponseWriter, r *http.Request) {
- return
-}
-
-func main() {
- r := mux.NewRouter()
- r.HandleFunc("/", handler)
- r.HandleFunc("/products", handler).Methods("POST")
- r.HandleFunc("/articles", handler).Methods("GET")
- r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
- r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
- r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
- t, err := route.GetPathTemplate()
- if err != nil {
- return err
- }
- qt, err := route.GetQueriesTemplates()
- if err != nil {
- return err
- }
- // p will contain regular expression is compatible with regular expression in Perl, Python, and other languages.
- // for instance the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$'
- p, err := route.GetPathRegexp()
- if err != nil {
- return err
- }
- // qr will contain a list of regular expressions with the same semantics as GetPathRegexp,
- // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return
- // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list.
- qr, err := route.GetQueriesRegexp()
- if err != nil {
- return err
- }
- m, err := route.GetMethods()
- if err != nil {
- return err
- }
- fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p)
- return nil
- })
- http.Handle("/", r)
-}
-```
### Static Files
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
-request that matches "/static/*". This makes it easy to serve static files with mux:
+request that matches "/static/\*". This makes it easy to serve static files with mux:
```go
func main() {
- var dir string
+ var dir string
- flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
- flag.Parse()
- r := mux.NewRouter()
+ flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+ flag.Parse()
+ r := mux.NewRouter()
- // This will serve files under http://localhost:8000/static/
- r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+ // This will serve files under http://localhost:8000/static/
+ r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
- srv := &http.Server{
- Handler: r,
- Addr: "127.0.0.1:8000",
- // Good practice: enforce timeouts for servers you create!
- WriteTimeout: 15 * time.Second,
- ReadTimeout: 15 * time.Second,
- }
+ srv := &http.Server{
+ Handler: r,
+ Addr: "127.0.0.1:8000",
+ // Good practice: enforce timeouts for servers you create!
+ WriteTimeout: 15 * time.Second,
+ ReadTimeout: 15 * time.Second,
+ }
- log.Fatal(srv.ListenAndServe())
+ log.Fatal(srv.ListenAndServe())
}
```
@@ -345,42 +291,330 @@ url, err := r.Get("article").URL("subdomain", "news",
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
the following prints all of the registered routes:
+```go
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ return
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.HandleFunc("/products", handler).Methods("POST")
+ r.HandleFunc("/articles", handler).Methods("GET")
+ r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+ r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
+ err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ pathTemplate, err := route.GetPathTemplate()
+ if err == nil {
+ fmt.Println("ROUTE:", pathTemplate)
+ }
+ pathRegexp, err := route.GetPathRegexp()
+ if err == nil {
+ fmt.Println("Path regexp:", pathRegexp)
+ }
+ queriesTemplates, err := route.GetQueriesTemplates()
+ if err == nil {
+ fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
+ }
+ queriesRegexps, err := route.GetQueriesRegexp()
+ if err == nil {
+ fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
+ }
+ methods, err := route.GetMethods()
+ if err == nil {
+ fmt.Println("Methods:", strings.Join(methods, ","))
+ }
+ fmt.Println()
+ return nil
+ })
+
+ if err != nil {
+ fmt.Println(err)
+ }
+
+ http.Handle("/", r)
+}
+```
+
+### Graceful Shutdown
+
+Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
+
+```go
+package main
+
+import (
+ "context"
+ "flag"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "time"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ var wait time.Duration
+ flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
+ flag.Parse()
+
+ r := mux.NewRouter()
+ // Add your routes as needed
+
+ srv := &http.Server{
+ Addr: "0.0.0.0:8080",
+ // Good practice to set timeouts to avoid Slowloris attacks.
+ WriteTimeout: time.Second * 15,
+ ReadTimeout: time.Second * 15,
+ IdleTimeout: time.Second * 60,
+ Handler: r, // Pass our instance of gorilla/mux in.
+ }
+
+ // Run our server in a goroutine so that it doesn't block.
+ go func() {
+ if err := srv.ListenAndServe(); err != nil {
+ log.Println(err)
+ }
+ }()
+
+ c := make(chan os.Signal, 1)
+ // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
+ // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
+ signal.Notify(c, os.Interrupt)
+
+ // Block until we receive our signal.
+ <-c
+
+ // Create a deadline to wait for.
+ ctx, cancel := context.WithTimeout(context.Background(), wait)
+ defer cancel()
+ // Doesn't block if no connections, but will otherwise wait
+ // until the timeout deadline.
+ srv.Shutdown(ctx)
+ // Optionally, you could run srv.Shutdown in a goroutine and block on
+ // <-ctx.Done() if your application should wait for other services
+ // to finalize based on context cancellation.
+ log.Println("shutting down")
+ os.Exit(0)
+}
+```
+
+### Middleware
+
+Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
+Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
+
+Mux middlewares are defined using the de facto standard type:
+
+```go
+type MiddlewareFunc func(http.Handler) http.Handler
+```
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+```go
+func loggingMiddleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+}
+```
+
+Middlewares can be added to a router using `Router.Use()`:
+
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
-r.HandleFunc("/products", handler).Methods("POST")
-r.HandleFunc("/articles", handler).Methods("GET")
-r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
-r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
-r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
- t, err := route.GetPathTemplate()
+r.Use(loggingMiddleware)
+```
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+```go
+// Define our struct
+type authenticationMiddleware struct {
+ tokenUsers map[string]string
+}
+
+// Initialize it somewhere
+func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+}
+
+// Middleware function, which will be called for each request
+func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ // Pass down the request to the next middleware (or final handler)
+ next.ServeHTTP(w, r)
+ } else {
+ // Write an error and stop the handler chain
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+}
+```
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+
+amw := authenticationMiddleware{}
+amw.Populate()
+
+r.Use(amw.Middleware)
+```
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
+
+### Testing Handlers
+
+Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
+
+First, our simple HTTP handler:
+
+```go
+// endpoints.go
+package main
+
+func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
+ // A very simple health check.
+ w.WriteHeader(http.StatusOK)
+ w.Header().Set("Content-Type", "application/json")
+
+ // In the future we could report back on the status of our DB, or our cache
+ // (e.g. Redis) by performing a simple PING, and include them in the response.
+ io.WriteString(w, `{"alive": true}`)
+}
+
+func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/health", HealthCheckHandler)
+
+ log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test code:
+
+```go
+// endpoints_test.go
+package main
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestHealthCheckHandler(t *testing.T) {
+ // Create a request to pass to our handler. We don't have any query parameters for now, so we'll
+ // pass 'nil' as the third parameter.
+ req, err := http.NewRequest("GET", "/health", nil)
if err != nil {
- return err
+ t.Fatal(err)
}
- qt, err := route.GetQueriesTemplates()
- if err != nil {
- return err
+
+ // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
+ rr := httptest.NewRecorder()
+ handler := http.HandlerFunc(HealthCheckHandler)
+
+ // Our handlers satisfy http.Handler, so we can call their ServeHTTP method
+ // directly and pass in our Request and ResponseRecorder.
+ handler.ServeHTTP(rr, req)
+
+ // Check the status code is what we expect.
+ if status := rr.Code; status != http.StatusOK {
+ t.Errorf("handler returned wrong status code: got %v want %v",
+ status, http.StatusOK)
}
- // p will contain a regular expression that is compatible with regular expressions in Perl, Python, and other languages.
- // For example, the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$'.
- p, err := route.GetPathRegexp()
- if err != nil {
- return err
+
+ // Check the response body is what we expect.
+ expected := `{"alive": true}`
+ if rr.Body.String() != expected {
+ t.Errorf("handler returned unexpected body: got %v want %v",
+ rr.Body.String(), expected)
}
- // qr will contain a list of regular expressions with the same semantics as GetPathRegexp,
- // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return
- // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list.
- qr, err := route.GetQueriesRegexp()
- if err != nil {
- return err
+}
+```
+
+In the case that our routes have [variables](#examples), we can pass those in the request. We could write
+[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
+possible route variables as needed.
+
+```go
+// endpoints.go
+func main() {
+ r := mux.NewRouter()
+ // A route with a route variable:
+ r.HandleFunc("/metrics/{type}", MetricsHandler)
+
+ log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test file, with a table-driven test of `routeVariables`:
+
+```go
+// endpoints_test.go
+func TestMetricsHandler(t *testing.T) {
+ tt := []struct{
+ routeVariable string
+ shouldPass bool
+ }{
+ {"goroutines", true},
+ {"heap", true},
+ {"counters", true},
+ {"queries", true},
+ {"adhadaeqm3k", false},
}
- m, err := route.GetMethods()
- if err != nil {
- return err
+
+ for _, tc := range tt {
+ path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
+ req, err := http.NewRequest("GET", path, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ rr := httptest.NewRecorder()
+
+ // Need to create a router that we can pass the request through so that the vars will be added to the context
+ router := mux.NewRouter()
+ router.HandleFunc("/metrics/{type}", MetricsHandler)
+ router.ServeHTTP(rr, req)
+
+ // In this case, our MetricsHandler returns a non-200 response
+ // for a route variable it doesn't know about.
+ if rr.Code == http.StatusOK && !tc.shouldPass {
+ t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
+ tc.routeVariable, rr.Code, http.StatusOK)
+ }
}
- fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p)
- return nil
-})
+}
```
## Full Example
@@ -391,22 +625,22 @@ Here's a complete, runnable example of a small `mux` based server:
package main
import (
- "net/http"
- "log"
- "github.com/gorilla/mux"
+ "net/http"
+ "log"
+ "github.com/gorilla/mux"
)
func YourHandler(w http.ResponseWriter, r *http.Request) {
- w.Write([]byte("Gorilla!\n"))
+ w.Write([]byte("Gorilla!\n"))
}
func main() {
- r := mux.NewRouter()
- // Routes consist of a path and a handler function.
- r.HandleFunc("/", YourHandler)
+ r := mux.NewRouter()
+ // Routes consist of a path and a handler function.
+ r.HandleFunc("/", YourHandler)
- // Bind to a port and pass our router in
- log.Fatal(http.ListenAndServe(":8000", r))
+ // Bind to a port and pass our router in
+ log.Fatal(http.ListenAndServe(":8000", r))
}
```
diff --git a/src/vendor/github.com/gorilla/mux/doc.go b/src/vendor/github.com/gorilla/mux/doc.go
index cce30b2f0..38957deea 100644
--- a/src/vendor/github.com/gorilla/mux/doc.go
+++ b/src/vendor/github.com/gorilla/mux/doc.go
@@ -238,5 +238,69 @@ as well:
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
+
+Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
+
+ type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+ func simpleMw(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Do stuff here
+ log.Println(r.RequestURI)
+ // Call the next handler, which can be another middleware in the chain, or the final handler.
+ next.ServeHTTP(w, r)
+ })
+ }
+
+Middlewares can be added to a router using `Router.Use()`:
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+ r.Use(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+ // Define our struct
+ type authenticationMiddleware struct {
+ tokenUsers map[string]string
+ }
+
+ // Initialize it somewhere
+ func (amw *authenticationMiddleware) Populate() {
+ amw.tokenUsers["00000000"] = "user0"
+ amw.tokenUsers["aaaaaaaa"] = "userA"
+ amw.tokenUsers["05f717e5"] = "randomUser"
+ amw.tokenUsers["deadbeef"] = "user0"
+ }
+
+ // Middleware function, which will be called for each request
+ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ token := r.Header.Get("X-Session-Token")
+
+ if user, found := amw.tokenUsers[token]; found {
+ // We found the token in our map
+ log.Printf("Authenticated user %s\n", user)
+ next.ServeHTTP(w, r)
+ } else {
+ http.Error(w, "Forbidden", http.StatusForbidden)
+ }
+ })
+ }
+
+ r := mux.NewRouter()
+ r.HandleFunc("/", handler)
+
+ amw := authenticationMiddleware{}
+ amw.Populate()
+
+ r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+
*/
package mux
diff --git a/src/vendor/github.com/gorilla/mux/middleware.go b/src/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 000000000..ceb812cee
--- /dev/null
+++ b/src/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,72 @@
+package mux
+
+import (
+ "net/http"
+ "strings"
+)
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+ Middleware(handler http.Handler) http.Handler
+}
+
+// Middleware allows MiddlewareFunc to implement the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+ return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf ...MiddlewareFunc) {
+ for _, fn := range mwf {
+ r.middlewares = append(r.middlewares, fn)
+ }
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+ r.middlewares = append(r.middlewares, mw)
+}
+
+// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header
+// on a request, by matching routes based only on paths. It also handles
+// OPTIONS requests, by settings Access-Control-Allow-Methods, and then
+// returning without calling the next http handler.
+func CORSMethodMiddleware(r *Router) MiddlewareFunc {
+ return func(next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ var allMethods []string
+
+ err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
+ for _, m := range route.matchers {
+ if _, ok := m.(*routeRegexp); ok {
+ if m.Match(req, &RouteMatch{}) {
+ methods, err := route.GetMethods()
+ if err != nil {
+ return err
+ }
+
+ allMethods = append(allMethods, methods...)
+ }
+ break
+ }
+ }
+ return nil
+ })
+
+ if err == nil {
+ w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ","))
+
+ if req.Method == "OPTIONS" {
+ return
+ }
+ }
+
+ next.ServeHTTP(w, req)
+ })
+ }
+}
diff --git a/src/vendor/github.com/gorilla/mux/mux.go b/src/vendor/github.com/gorilla/mux/mux.go
index 49de78923..4bbafa51d 100644
--- a/src/vendor/github.com/gorilla/mux/mux.go
+++ b/src/vendor/github.com/gorilla/mux/mux.go
@@ -13,8 +13,11 @@ import (
)
var (
+ // ErrMethodMismatch is returned when the method in the request does not match
+ // the method defined against the route.
ErrMethodMismatch = errors.New("method is not allowed")
- ErrNotFound = errors.New("no matching route was found")
+ // ErrNotFound is returned when no route match is found.
+ ErrNotFound = errors.New("no matching route was found")
)
// NewRouter returns a new router instance.
@@ -63,6 +66,8 @@ type Router struct {
KeepContext bool
// see Router.UseEncodedPath(). This defines a flag for all routes.
useEncodedPath bool
+ // Slice of middlewares to be called after a match is found
+ middlewares []middleware
}
// Match attempts to match the given request against the router's registered routes.
@@ -79,6 +84,12 @@ type Router struct {
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
+ // Build middleware chain if no error was found
+ if match.MatchErr == nil {
+ for i := len(r.middlewares) - 1; i >= 0; i-- {
+ match.Handler = r.middlewares[i].Middleware(match.Handler)
+ }
+ }
return true
}
}
@@ -87,9 +98,9 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
if r.MethodNotAllowedHandler != nil {
match.Handler = r.MethodNotAllowedHandler
return true
- } else {
- return false
}
+
+ return false
}
// Closest match for a router (includes sub-routers)
@@ -147,6 +158,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.KeepContext {
defer contextClear(req)
}
+
handler.ServeHTTP(w, req)
}
@@ -164,13 +176,18 @@ func (r *Router) GetRoute(name string) *Route {
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
-// When true, if the route path is "/path/", accessing "/path" will redirect
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
@@ -196,10 +213,6 @@ func (r *Router) SkipClean(value bool) *Router {
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
-// This behavior has the drawback of needing to match routes against
-// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
-// to r.URL.Path will not affect routing when this flag is on and thus may
-// induce unintended behavior.
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
diff --git a/src/vendor/github.com/gorilla/mux/regexp.go b/src/vendor/github.com/gorilla/mux/regexp.go
index e83213b7d..2b57e5627 100644
--- a/src/vendor/github.com/gorilla/mux/regexp.go
+++ b/src/vendor/github.com/gorilla/mux/regexp.go
@@ -14,6 +14,20 @@ import (
"strings"
)
+type routeRegexpOptions struct {
+ strictSlash bool
+ useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+ regexpTypePath regexpType = 0
+ regexpTypeHost regexpType = 1
+ regexpTypePrefix regexpType = 2
+ regexpTypeQuery regexpType = 3
+)
+
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
@@ -24,7 +38,7 @@ import (
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
-func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
@@ -34,19 +48,18 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
- if matchQuery {
+ if typ == regexpTypeQuery {
defaultPattern = ".*"
- } else if matchHost {
+ } else if typ == regexpTypeHost {
defaultPattern = "[^.]+"
- matchPrefix = false
}
// Only match strict slash if not matching
- if matchPrefix || matchHost || matchQuery {
- strictSlash = false
+ if typ != regexpTypePath {
+ options.strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
- if strictSlash && strings.HasSuffix(tpl, "/") {
+ if options.strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
@@ -88,16 +101,16 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
- if strictSlash {
+ if options.strictSlash {
pattern.WriteString("[/]?")
}
- if matchQuery {
+ if typ == regexpTypeQuery {
// Add the default pattern if the query value is empty
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
pattern.WriteString(defaultPattern)
}
}
- if !matchPrefix {
+ if typ != regexpTypePrefix {
pattern.WriteByte('$')
}
reverse.WriteString(raw)
@@ -118,15 +131,13 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
// Done!
return &routeRegexp{
- template: template,
- matchHost: matchHost,
- matchQuery: matchQuery,
- strictSlash: strictSlash,
- useEncodedPath: useEncodedPath,
- regexp: reg,
- reverse: reverse.String(),
- varsN: varsN,
- varsR: varsR,
+ template: template,
+ regexpType: typ,
+ options: options,
+ regexp: reg,
+ reverse: reverse.String(),
+ varsN: varsN,
+ varsR: varsR,
}, nil
}
@@ -135,15 +146,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
type routeRegexp struct {
// The unmodified template.
template string
- // True for host match, false for path or query string match.
- matchHost bool
- // True for query string match, false for path and host match.
- matchQuery bool
- // The strictSlash value defined on the route, but disabled if PathPrefix was used.
- strictSlash bool
- // Determines whether to use encoded req.URL.EnscapedPath() or unencoded
- // req.URL.Path for path matching
- useEncodedPath bool
+ // The type of match
+ regexpType regexpType
+ // Options for matching
+ options routeRegexpOptions
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
@@ -156,12 +162,12 @@ type routeRegexp struct {
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
- if !r.matchHost {
- if r.matchQuery {
+ if r.regexpType != regexpTypeHost {
+ if r.regexpType == regexpTypeQuery {
return r.matchQueryString(req)
}
path := req.URL.Path
- if r.useEncodedPath {
+ if r.options.useEncodedPath {
path = req.URL.EscapedPath()
}
return r.regexp.MatchString(path)
@@ -178,7 +184,7 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
- if r.matchQuery {
+ if r.regexpType == regexpTypeQuery {
value = url.QueryEscape(value)
}
urlValues[k] = value
@@ -203,7 +209,7 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
// For a URL with foo=bar&baz=ding, we return only the relevant key
// value pair for the routeRegexp.
func (r *routeRegexp) getURLQuery(req *http.Request) string {
- if !r.matchQuery {
+ if r.regexpType != regexpTypeQuery {
return ""
}
templateKey := strings.SplitN(r.template, "=", 2)[0]
@@ -280,7 +286,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
if len(matches) > 0 {
extractVars(path, matches, v.path.varsN, m.Vars)
// Check if we should redirect.
- if v.path.strictSlash {
+ if v.path.options.strictSlash {
p1 := strings.HasSuffix(path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
diff --git a/src/vendor/github.com/gorilla/mux/route.go b/src/vendor/github.com/gorilla/mux/route.go
index 69aeae791..a591d7354 100644
--- a/src/vendor/github.com/gorilla/mux/route.go
+++ b/src/vendor/github.com/gorilla/mux/route.go
@@ -43,6 +43,8 @@ type Route struct {
buildVarsFunc BuildVarsFunc
}
+// SkipClean reports whether path cleaning is enabled for this route via
+// Router.SkipClean.
func (r *Route) SkipClean() bool {
return r.skipClean
}
@@ -75,6 +77,8 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if match.MatchErr == ErrMethodMismatch {
// We found a route which matches request method, clear MatchErr
match.MatchErr = nil
+ // Then override the mis-matched handler
+ match.Handler = r.handler
}
// Yay, we have a match. Let's collect some info about it.
@@ -169,12 +173,12 @@ func (r *Route) addMatcher(m matcher) *Route {
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
-func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
if r.err != nil {
return r.err
}
r.regexp = r.getRegexpGroup()
- if !matchHost && !matchQuery {
+ if typ == regexpTypePath || typ == regexpTypePrefix {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
@@ -182,7 +186,10 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
- rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+ rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+ strictSlash: r.strictSlash,
+ useEncodedPath: r.useEncodedPath,
+ })
if err != nil {
return err
}
@@ -191,7 +198,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
return err
}
}
- if matchHost {
+ if typ == regexpTypeHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
@@ -204,7 +211,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
return err
}
}
- if matchQuery {
+ if typ == regexpTypeQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
@@ -256,7 +263,8 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
-// It the value is an empty string, it will match any value if the key is set.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
func (r *Route) HeadersRegexp(pairs ...string) *Route {
if r.err == nil {
var headers map[string]*regexp.Regexp
@@ -286,7 +294,7 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route {
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, true, false, false)
+ r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
return r
}
@@ -346,7 +354,7 @@ func (r *Route) Methods(methods ...string) *Route {
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, false, false, false)
+ r.err = r.addRegexpMatcher(tpl, regexpTypePath)
return r
}
@@ -362,7 +370,7 @@ func (r *Route) Path(tpl string) *Route {
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
- r.err = r.addRegexpMatcher(tpl, false, true, false)
+ r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
return r
}
@@ -393,7 +401,7 @@ func (r *Route) Queries(pairs ...string) *Route {
return nil
}
for i := 0; i < length; i += 2 {
- if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+ if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
return r
}
}
@@ -616,7 +624,7 @@ func (r *Route) GetPathRegexp() (string, error) {
// route queries.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
-// An empty list will be returned if the route does not have queries.
+// An error will be returned if the route does not have queries.
func (r *Route) GetQueriesRegexp() ([]string, error) {
if r.err != nil {
return nil, r.err
@@ -635,7 +643,7 @@ func (r *Route) GetQueriesRegexp() ([]string, error) {
// query matching.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
-// An empty list will be returned if the route does not define queries.
+// An error will be returned if the route does not define queries.
func (r *Route) GetQueriesTemplates() ([]string, error) {
if r.err != nil {
return nil, r.err
@@ -653,7 +661,7 @@ func (r *Route) GetQueriesTemplates() ([]string, error) {
// GetMethods returns the methods the route matches against
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
-// An empty list will be returned if route does not have methods.
+// An error will be returned if route does not have methods.
func (r *Route) GetMethods() ([]string, error) {
if r.err != nil {
return nil, r.err
@@ -663,7 +671,7 @@ func (r *Route) GetMethods() ([]string, error) {
return []string(methods), nil
}
}
- return nil, nil
+ return nil, errors.New("mux: route doesn't have methods")
}
// GetHostTemplate returns the template used to build the
diff --git a/src/vendor/github.com/gorilla/mux/test_helpers.go b/src/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 000000000..32ecffde4
--- /dev/null
+++ b/src/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,19 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
+// copy is returned.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+ return setVars(r, val)
+}
diff --git a/src/vendor/github.com/graph-gophers/dataloader/.gitignore b/src/vendor/github.com/graph-gophers/dataloader/.gitignore
new file mode 100644
index 000000000..48b8bf907
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/.gitignore
@@ -0,0 +1 @@
+vendor/
diff --git a/src/vendor/github.com/graph-gophers/dataloader/.travis.yml b/src/vendor/github.com/graph-gophers/dataloader/.travis.yml
new file mode 100644
index 000000000..c29df16e3
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+ - 1.8
+ - 1.x
+
+install:
+ - go get -u github.com/golang/dep/...
+ - dep ensure
+
+script:
+ - go test -v -race -coverprofile=coverage.txt -covermode=atomic
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/src/vendor/github.com/graph-gophers/dataloader/Gopkg.lock b/src/vendor/github.com/graph-gophers/dataloader/Gopkg.lock
new file mode 100644
index 000000000..2930c6946
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/Gopkg.lock
@@ -0,0 +1,33 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ name = "github.com/hashicorp/golang-lru"
+ packages = [".","simplelru"]
+ revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
+
+[[projects]]
+ name = "github.com/opentracing/opentracing-go"
+ packages = [".","log"]
+ revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
+ version = "v1.0.2"
+
+[[projects]]
+ name = "github.com/patrickmn/go-cache"
+ packages = ["."]
+ revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
+ version = "v2.1.0"
+
+[[projects]]
+ branch = "master"
+ name = "golang.org/x/net"
+ packages = ["context"]
+ revision = "a8b9294777976932365dabb6640cf1468d95c70f"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "a0b8606d9f2ed9df7e69cae570c65c7d7b090bb7a08f58d3535b584693d44da9"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/src/vendor/github.com/graph-gophers/dataloader/Gopkg.toml b/src/vendor/github.com/graph-gophers/dataloader/Gopkg.toml
new file mode 100644
index 000000000..167458d03
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/Gopkg.toml
@@ -0,0 +1,34 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ branch = "master"
+ name = "github.com/hashicorp/golang-lru"
+
+[[constraint]]
+ name = "github.com/opentracing/opentracing-go"
+ version = "1.0.2"
+
+[[constraint]]
+ name = "github.com/patrickmn/go-cache"
+ version = "2.1.0"
diff --git a/src/vendor/github.com/graph-gophers/dataloader/LICENSE b/src/vendor/github.com/graph-gophers/dataloader/LICENSE
new file mode 100644
index 000000000..2a74dcf87
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 Nick Randall
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/vendor/github.com/graph-gophers/dataloader/MIGRATE.md b/src/vendor/github.com/graph-gophers/dataloader/MIGRATE.md
new file mode 100644
index 000000000..aad37e84c
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/MIGRATE.md
@@ -0,0 +1,88 @@
+## Upgrade from v1 to v2
+The only difference between v1 and v2 is that we added use of [context](https://golang.org/pkg/context).
+
+```diff
+- loader.Load(key string) Thunk
++ loader.Load(ctx context.Context, key string) Thunk
+- loader.LoadMany(keys []string) ThunkMany
++ loader.LoadMany(ctx context.Context, keys []string) ThunkMany
+```
+
+```diff
+- type BatchFunc func([]string) []*Result
++ type BatchFunc func(context.Context, []string) []*Result
+```
+
+## Upgrade from v2 to v3
+```diff
+// dataloader.Interface as added context.Context to methods
+- loader.Prime(key string, value interface{}) Interface
++ loader.Prime(ctx context.Context, key string, value interface{}) Interface
+- loader.Clear(key string) Interface
++ loader.Clear(ctx context.Context, key string) Interface
+```
+
+```diff
+// cache interface as added context.Context to methods
+type Cache interface {
+- Get(string) (Thunk, bool)
++ Get(context.Context, string) (Thunk, bool)
+- Set(string, Thunk)
++ Set(context.Context, string, Thunk)
+- Delete(string) bool
++ Delete(context.Context, string) bool
+ Clear()
+}
+```
+
+## Upgrade from v3 to v4
+```diff
+// dataloader.Interface as now allows interace{} as key rather than string
+- loader.Load(context.Context, key string) Thunk
++ loader.Load(ctx context.Context, key interface{}) Thunk
+- loader.LoadMany(context.Context, key []string) ThunkMany
++ loader.LoadMany(ctx context.Context, keys []interface{}) ThunkMany
+- loader.Prime(context.Context, key string, value interface{}) Interface
++ loader.Prime(ctx context.Context, key interface{}, value interface{}) Interface
+- loader.Clear(context.Context, key string) Interface
++ loader.Clear(ctx context.Context, key interface{}) Interface
+```
+
+```diff
+// cache interface now allows interface{} as key instead of string
+type Cache interface {
+- Get(context.Context, string) (Thunk, bool)
++ Get(context.Context, interface{}) (Thunk, bool)
+- Set(context.Context, string, Thunk)
++ Set(context.Context, interface{}, Thunk)
+- Delete(context.Context, string) bool
++ Delete(context.Context, interface{}) bool
+ Clear()
+}
+```
+
+## Upgrade from v4 to v5
+```diff
+// dataloader.Interface as now allows interace{} as key rather than string
+- loader.Load(context.Context, key interface{}) Thunk
++ loader.Load(ctx context.Context, key Key) Thunk
+- loader.LoadMany(context.Context, key []interface{}) ThunkMany
++ loader.LoadMany(ctx context.Context, keys Keys) ThunkMany
+- loader.Prime(context.Context, key interface{}, value interface{}) Interface
++ loader.Prime(ctx context.Context, key Key, value interface{}) Interface
+- loader.Clear(context.Context, key interface{}) Interface
++ loader.Clear(ctx context.Context, key Key) Interface
+```
+
+```diff
+// cache interface now allows interface{} as key instead of string
+type Cache interface {
+- Get(context.Context, interface{}) (Thunk, bool)
++ Get(context.Context, Key) (Thunk, bool)
+- Set(context.Context, interface{}, Thunk)
++ Set(context.Context, Key, Thunk)
+- Delete(context.Context, interface{}) bool
++ Delete(context.Context, Key) bool
+ Clear()
+}
+```
diff --git a/src/vendor/github.com/graph-gophers/dataloader/README.md b/src/vendor/github.com/graph-gophers/dataloader/README.md
new file mode 100644
index 000000000..a196d1f5b
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/README.md
@@ -0,0 +1,48 @@
+# DataLoader
+[![GoDoc](https://godoc.org/gopkg.in/graph-gophers/dataloader.v3?status.svg)](https://godoc.org/github.com/graph-gophers/dataloader)
+[![Build Status](https://travis-ci.org/graph-gophers/dataloader.svg?branch=master)](https://travis-ci.org/graph-gophers/dataloader)
+
+This is an implementation of [Facebook's DataLoader](https://github.com/facebook/dataloader) in Golang.
+
+## Install
+`go get -u github.com/graph-gophers/dataloader`
+
+## Usage
+```go
+// setup batch function
+batchFn := func(ctx context.Context, keys dataloader.Keys) []*dataloader.Result {
+ var results []*dataloader.Result
+ // do some aync work to get data for specified keys
+ // append to this list resolved values
+ return results
+}
+
+// create Loader with an in-memory cache
+loader := dataloader.NewBatchedLoader(batchFn)
+
+/**
+ * Use loader
+ *
+ * A thunk is a function returned from a function that is a
+ * closure over a value (in this case an interface value and error).
+ * When called, it will block until the value is resolved.
+ */
+thunk := loader.Load(ctx.TODO(), dataloader.StringKey("key1")) // StringKey is a convenience method that make wraps string to implement `Key` interface
+result, err := thunk()
+if err != nil {
+ // handle data error
+}
+
+log.Printf("value: %#v", result)
+```
+
+### Don't need/want to use context?
+You're welcome to install the v1 version of this library.
+
+## Cache
+This implementation contains a very basic cache that is intended only to be used for short lived DataLoaders (i.e. DataLoaders that ony exsist for the life of an http request). You may use your own implementation if you want.
+
+> it also has a `NoCache` type that implements the cache interface but all methods are noop. If you do not wish to cache anyting.
+
+## Examples
+There are a few basic examples in the example folder.
diff --git a/src/vendor/github.com/graph-gophers/dataloader/cache.go b/src/vendor/github.com/graph-gophers/dataloader/cache.go
new file mode 100644
index 000000000..4d82e4c86
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/cache.go
@@ -0,0 +1,28 @@
+package dataloader
+
+import "context"
+
+// The Cache interface. If a custom cache is provided, it must implement this interface.
+type Cache interface {
+ Get(context.Context, Key) (Thunk, bool)
+ Set(context.Context, Key, Thunk)
+ Delete(context.Context, Key) bool
+ Clear()
+}
+
+// NoCache implements Cache interface where all methods are noops.
+// This is useful for when you don't want to cache items but still
+// want to use a data loader
+type NoCache struct{}
+
+// Get is a NOOP
+func (c *NoCache) Get(context.Context, Key) (Thunk, bool) { return nil, false }
+
+// Set is a NOOP
+func (c *NoCache) Set(context.Context, Key, Thunk) { return }
+
+// Delete is a NOOP
+func (c *NoCache) Delete(context.Context, Key) bool { return false }
+
+// Clear is a NOOP
+func (c *NoCache) Clear() { return }
diff --git a/src/vendor/github.com/graph-gophers/dataloader/codecov.yml b/src/vendor/github.com/graph-gophers/dataloader/codecov.yml
new file mode 100644
index 000000000..0b85876fe
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/codecov.yml
@@ -0,0 +1,26 @@
+codecov:
+ notify:
+ require_ci_to_pass: true
+comment:
+ behavior: default
+ layout: header, diff
+ require_changes: false
+coverage:
+ precision: 2
+ range:
+ - 70.0
+ - 100.0
+ round: down
+ status:
+ changes: false
+ patch: true
+ project: true
+parsers:
+ gcov:
+ branch_detection:
+ conditional: true
+ loop: true
+ macro: false
+ method: false
+ javascript:
+ enable_partials: false
diff --git a/src/vendor/github.com/graph-gophers/dataloader/dataloader.go b/src/vendor/github.com/graph-gophers/dataloader/dataloader.go
new file mode 100644
index 000000000..c31788b7e
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/dataloader.go
@@ -0,0 +1,492 @@
+// Package dataloader is an implimentation of facebook's dataloader in go.
+// See https://github.com/facebook/dataloader for more information
+package dataloader
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "runtime"
+ "sync"
+ "time"
+)
+
+// Interface is a `DataLoader` Interface which defines a public API for loading data from a particular
+// data back-end with unique keys such as the `id` column of a SQL table or
+// document name in a MongoDB database, given a batch loading function.
+//
+// Each `DataLoader` instance should contain a unique memoized cache. Use caution when
+// used in long-lived applications or those which serve many users with
+// different access permissions and consider creating a new instance per
+// web request.
+type Interface interface {
+ Load(context.Context, Key) Thunk
+ LoadMany(context.Context, Keys) ThunkMany
+ Clear(context.Context, Key) Interface
+ ClearAll() Interface
+ Prime(ctx context.Context, key Key, value interface{}) Interface
+}
+
+// BatchFunc is a function, which when given a slice of keys (string), returns an slice of `results`.
+// It's important that the length of the input keys matches the length of the output results.
+//
+// The keys passed to this function are guaranteed to be unique
+type BatchFunc func(context.Context, Keys) []*Result
+
+// Result is the data structure that a BatchFunc returns.
+// It contains the resolved data, and any errors that may have occurred while fetching the data.
+type Result struct {
+ Data interface{}
+ Error error
+}
+
+// ResultMany is used by the LoadMany method.
+// It contains a list of resolved data and a list of errors.
+// The lengths of the data list and error list will match, and elements at each index correspond to each other.
+type ResultMany struct {
+ Data []interface{}
+ Error []error
+}
+
+// Loader implements the dataloader.Interface.
+type Loader struct {
+ // the batch function to be used by this loader
+ batchFn BatchFunc
+
+ // the maximum batch size. Set to 0 if you want it to be unbounded.
+ batchCap int
+
+ // the internal cache. This packages contains a basic cache implementation but any custom cache
+ // implementation could be used as long as it implements the `Cache` interface.
+ cacheLock sync.Mutex
+ cache Cache
+ // should we clear the cache on each batch?
+ // this would allow batching but no long term caching
+ clearCacheOnBatch bool
+
+ // count of queued up items
+ count int
+
+ // the maximum input queue size. Set to 0 if you want it to be unbounded.
+ inputCap int
+
+ // the amount of time to wait before triggering a batch
+ wait time.Duration
+
+ // lock to protect the batching operations
+ batchLock sync.Mutex
+
+ // current batcher
+ curBatcher *batcher
+
+ // used to close the sleeper of the current batcher
+ endSleeper chan bool
+
+ // used by tests to prevent logs
+ silent bool
+
+ // can be set to trace calls to dataloader
+ tracer Tracer
+}
+
+// Thunk is a function that will block until the value (*Result) it contains is resolved.
+// After the value it contains is resolved, this function will return the result.
+// This function can be called many times, much like a Promise is other languages.
+// The value will only need to be resolved once so subsequent calls will return immediately.
+type Thunk func() (interface{}, error)
+
+// ThunkMany is much like the Thunk func type but it contains a list of results.
+type ThunkMany func() ([]interface{}, []error)
+
+// type used to on input channel
+type batchRequest struct {
+ key Key
+ channel chan *Result
+}
+
+// Option allows for configuration of Loader fields.
+type Option func(*Loader)
+
+// WithCache sets the BatchedLoader cache. Defaults to InMemoryCache if a Cache is not set.
+func WithCache(c Cache) Option {
+ return func(l *Loader) {
+ l.cache = c
+ }
+}
+
+// WithBatchCapacity sets the batch capacity. Default is 0 (unbounded).
+func WithBatchCapacity(c int) Option {
+ return func(l *Loader) {
+ l.batchCap = c
+ }
+}
+
+// WithInputCapacity sets the input capacity. Default is 1000.
+func WithInputCapacity(c int) Option {
+ return func(l *Loader) {
+ l.inputCap = c
+ }
+}
+
+// WithWait sets the amount of time to wait before triggering a batch.
+// Default duration is 16 milliseconds.
+func WithWait(d time.Duration) Option {
+ return func(l *Loader) {
+ l.wait = d
+ }
+}
+
+// WithClearCacheOnBatch allows batching of items but no long term caching.
+// It accomplishes this by clearing the cache after each batch operation.
+func WithClearCacheOnBatch() Option {
+ return func(l *Loader) {
+ l.cacheLock.Lock()
+ l.clearCacheOnBatch = true
+ l.cacheLock.Unlock()
+ }
+}
+
+// withSilentLogger turns of log messages. It's used by the tests
+func withSilentLogger() Option {
+ return func(l *Loader) {
+ l.silent = true
+ }
+}
+
+// WithTracer allows tracing of calls to Load and LoadMany
+func WithTracer(tracer Tracer) Option {
+ return func(l *Loader) {
+ l.tracer = tracer
+ }
+}
+
+// WithOpenTracingTracer allows tracing of calls to Load and LoadMany
+func WithOpenTracingTracer() Option {
+ return WithTracer(&OpenTracingTracer{})
+}
+
+// NewBatchedLoader constructs a new Loader with given options.
+func NewBatchedLoader(batchFn BatchFunc, opts ...Option) *Loader {
+ loader := &Loader{
+ batchFn: batchFn,
+ inputCap: 1000,
+ wait: 16 * time.Millisecond,
+ }
+
+ // Apply options
+ for _, apply := range opts {
+ apply(loader)
+ }
+
+ // Set defaults
+ if loader.cache == nil {
+ loader.cache = NewCache()
+ }
+
+ if loader.tracer == nil {
+ loader.tracer = &NoopTracer{}
+ }
+
+ return loader
+}
+
+// Load load/resolves the given key, returning a channel that will contain the value and error
+func (l *Loader) Load(originalContext context.Context, key Key) Thunk {
+ ctx, finish := l.tracer.TraceLoad(originalContext, key)
+
+ c := make(chan *Result, 1)
+ var result struct {
+ mu sync.RWMutex
+ value *Result
+ }
+
+ // lock to prevent duplicate keys coming in before item has been added to cache.
+ l.cacheLock.Lock()
+ if v, ok := l.cache.Get(ctx, key); ok {
+ defer finish(v)
+ defer l.cacheLock.Unlock()
+ return v
+ }
+
+ thunk := func() (interface{}, error) {
+ result.mu.RLock()
+ resultNotSet := result.value == nil
+ result.mu.RUnlock()
+
+ if resultNotSet {
+ result.mu.Lock()
+ if v, ok := <-c; ok {
+ result.value = v
+ }
+ result.mu.Unlock()
+ }
+ result.mu.RLock()
+ defer result.mu.RUnlock()
+ return result.value.Data, result.value.Error
+ }
+ defer finish(thunk)
+
+ l.cache.Set(ctx, key, thunk)
+ l.cacheLock.Unlock()
+
+ // this is sent to batch fn. It contains the key and the channel to return the
+ // the result on
+ req := &batchRequest{key, c}
+
+ l.batchLock.Lock()
+ // start the batch window if it hasn't already started.
+ if l.curBatcher == nil {
+ l.curBatcher = l.newBatcher(l.silent, l.tracer)
+ // start the current batcher batch function
+ go l.curBatcher.batch(originalContext)
+ // start a sleeper for the current batcher
+ l.endSleeper = make(chan bool)
+ go l.sleeper(l.curBatcher, l.endSleeper)
+ }
+
+ l.curBatcher.input <- req
+
+ // if we need to keep track of the count (max batch), then do so.
+ if l.batchCap > 0 {
+ l.count++
+ // if we hit our limit, force the batch to start
+ if l.count == l.batchCap {
+ // end the batcher synchronously here because another call to Load
+ // may concurrently happen and needs to go to a new batcher.
+ l.curBatcher.end()
+ // end the sleeper for the current batcher.
+ // this is to stop the goroutine without waiting for the
+ // sleeper timeout.
+ close(l.endSleeper)
+ l.reset()
+ }
+ }
+ l.batchLock.Unlock()
+
+ return thunk
+}
+
+// LoadMany loads mulitiple keys, returning a thunk (type: ThunkMany) that will resolve the keys passed in.
+func (l *Loader) LoadMany(originalContext context.Context, keys Keys) ThunkMany {
+ ctx, finish := l.tracer.TraceLoadMany(originalContext, keys)
+
+ var (
+ length = len(keys)
+ data = make([]interface{}, length)
+ errors = make([]error, length)
+ c = make(chan *ResultMany, 1)
+ wg sync.WaitGroup
+ )
+
+ resolve := func(ctx context.Context, i int) {
+ defer wg.Done()
+ thunk := l.Load(ctx, keys[i])
+ result, err := thunk()
+ data[i] = result
+ errors[i] = err
+ }
+
+ wg.Add(length)
+ for i := range keys {
+ go resolve(ctx, i)
+ }
+
+ go func() {
+ wg.Wait()
+
+ // errs is nil unless there exists a non-nil error.
+ // This prevents dataloader from returning a slice of all-nil errors.
+ var errs []error
+ for _, e := range errors {
+ if e != nil {
+ errs = errors
+ break
+ }
+ }
+
+ c <- &ResultMany{Data: data, Error: errs}
+ close(c)
+ }()
+
+ var result struct {
+ mu sync.RWMutex
+ value *ResultMany
+ }
+
+ thunkMany := func() ([]interface{}, []error) {
+ result.mu.RLock()
+ resultNotSet := result.value == nil
+ result.mu.RUnlock()
+
+ if resultNotSet {
+ result.mu.Lock()
+ if v, ok := <-c; ok {
+ result.value = v
+ }
+ result.mu.Unlock()
+ }
+ result.mu.RLock()
+ defer result.mu.RUnlock()
+ return result.value.Data, result.value.Error
+ }
+
+ defer finish(thunkMany)
+ return thunkMany
+}
+
+// Clear clears the value at `key` from the cache, it it exsits. Returs self for method chaining
+func (l *Loader) Clear(ctx context.Context, key Key) Interface {
+ l.cacheLock.Lock()
+ l.cache.Delete(ctx, key)
+ l.cacheLock.Unlock()
+ return l
+}
+
+// ClearAll clears the entire cache. To be used when some event results in unknown invalidations.
+// Returns self for method chaining.
+func (l *Loader) ClearAll() Interface {
+ l.cacheLock.Lock()
+ l.cache.Clear()
+ l.cacheLock.Unlock()
+ return l
+}
+
+// Prime adds the provided key and value to the cache. If the key already exists, no change is made.
+// Returns self for method chaining
+func (l *Loader) Prime(ctx context.Context, key Key, value interface{}) Interface {
+ if _, ok := l.cache.Get(ctx, key); !ok {
+ thunk := func() (interface{}, error) {
+ return value, nil
+ }
+ l.cache.Set(ctx, key, thunk)
+ }
+ return l
+}
+
+func (l *Loader) reset() {
+ l.count = 0
+ l.curBatcher = nil
+
+ if l.clearCacheOnBatch {
+ l.cache.Clear()
+ }
+}
+
+type batcher struct {
+ input chan *batchRequest
+ batchFn BatchFunc
+ finished bool
+ silent bool
+ tracer Tracer
+}
+
+// newBatcher returns a batcher for the current requests
+// all the batcher methods must be protected by a global batchLock
+func (l *Loader) newBatcher(silent bool, tracer Tracer) *batcher {
+ return &batcher{
+ input: make(chan *batchRequest, l.inputCap),
+ batchFn: l.batchFn,
+ silent: silent,
+ tracer: tracer,
+ }
+}
+
+// stop receiving input and process batch function
+func (b *batcher) end() {
+ if !b.finished {
+ close(b.input)
+ b.finished = true
+ }
+}
+
+// execute the batch of all items in queue
+func (b *batcher) batch(originalContext context.Context) {
+ var (
+ keys = make(Keys, 0)
+ reqs = make([]*batchRequest, 0)
+ items = make([]*Result, 0)
+ panicErr interface{}
+ )
+
+ for item := range b.input {
+ keys = append(keys, item.key)
+ reqs = append(reqs, item)
+ }
+
+ ctx, finish := b.tracer.TraceBatch(originalContext, keys)
+ defer finish(items)
+
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ panicErr = r
+ if b.silent {
+ return
+ }
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ log.Printf("Dataloader: Panic received in batch function:: %v\n%s", panicErr, buf)
+ }
+ }()
+ items = b.batchFn(ctx, keys)
+ }()
+
+ if panicErr != nil {
+ for _, req := range reqs {
+ req.channel <- &Result{Error: fmt.Errorf("Panic received in batch function: %v", panicErr)}
+ close(req.channel)
+ }
+ return
+ }
+
+ if len(items) != len(keys) {
+ err := &Result{Error: fmt.Errorf(`
+ The batch function supplied did not return an array of responses
+ the same length as the array of keys.
+
+ Keys:
+ %v
+
+ Values:
+ %v
+ `, keys, items)}
+
+ for _, req := range reqs {
+ req.channel <- err
+ close(req.channel)
+ }
+
+ return
+ }
+
+ for i, req := range reqs {
+ req.channel <- items[i]
+ close(req.channel)
+ }
+}
+
+// wait the appropriate amount of time for the provided batcher
+func (l *Loader) sleeper(b *batcher, close chan bool) {
+ select {
+ // used by batch to close early. usually triggered by max batch size
+ case <-close:
+ return
+ // this will move this goroutine to the back of the callstack?
+ case <-time.After(l.wait):
+ }
+
+ // reset
+ // this is protected by the batchLock to avoid closing the batcher input
+ // channel while Load is inserting a request
+ l.batchLock.Lock()
+ b.end()
+
+ // We can end here also if the batcher has already been closed and a
+ // new one has been created. So reset the loader state only if the batcher
+ // is the current one
+ if l.curBatcher == b {
+ l.reset()
+ }
+ l.batchLock.Unlock()
+}
diff --git a/src/vendor/github.com/graph-gophers/dataloader/inMemoryCache.go b/src/vendor/github.com/graph-gophers/dataloader/inMemoryCache.go
new file mode 100644
index 000000000..f540554cd
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/inMemoryCache.go
@@ -0,0 +1,65 @@
+// +build !go1.9
+
+package dataloader
+
+import (
+ "context"
+ "sync"
+)
+
+// InMemoryCache is an in memory implementation of Cache interface.
+// this simple implementation is well suited for
+// a "per-request" dataloader (i.e. one that only lives
+// for the life of an http request) but it not well suited
+// for long lived cached items.
+type InMemoryCache struct {
+ items map[string]Thunk
+ mu sync.RWMutex
+}
+
+// NewCache constructs a new InMemoryCache
+func NewCache() *InMemoryCache {
+ items := make(map[string]Thunk)
+ return &InMemoryCache{
+ items: items,
+ }
+}
+
+// Set sets the `value` at `key` in the cache
+func (c *InMemoryCache) Set(_ context.Context, key Key, value Thunk) {
+ c.mu.Lock()
+ c.items[key.String()] = value
+ c.mu.Unlock()
+}
+
+// Get gets the value at `key` if it exsits, returns value (or nil) and bool
+// indicating of value was found
+func (c *InMemoryCache) Get(_ context.Context, key Key) (Thunk, bool) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ item, found := c.items[key.String()]
+ if !found {
+ return nil, false
+ }
+
+ return item, true
+}
+
+// Delete deletes item at `key` from cache
+func (c *InMemoryCache) Delete(ctx context.Context, key Key) bool {
+ if _, found := c.Get(ctx, key); found {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ delete(c.items, key.String())
+ return true
+ }
+ return false
+}
+
+// Clear clears the entire cache
+func (c *InMemoryCache) Clear() {
+ c.mu.Lock()
+ c.items = map[string]Thunk{}
+ c.mu.Unlock()
+}
diff --git a/src/vendor/github.com/graph-gophers/dataloader/inMemoryCache_go19.go b/src/vendor/github.com/graph-gophers/dataloader/inMemoryCache_go19.go
new file mode 100644
index 000000000..07ce6ce44
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/inMemoryCache_go19.go
@@ -0,0 +1,57 @@
+// +build go1.9
+
+package dataloader
+
+import (
+ "context"
+ "sync"
+)
+
+// InMemoryCache is an in memory implementation of Cache interface.
+// this simple implementation is well suited for
+// a "per-request" dataloader (i.e. one that only lives
+// for the life of an http request) but it not well suited
+// for long lived cached items.
+type InMemoryCache struct {
+ items *sync.Map
+}
+
+// NewCache constructs a new InMemoryCache
+func NewCache() *InMemoryCache {
+ return &InMemoryCache{
+ items: &sync.Map{},
+ }
+}
+
+// Set sets the `value` at `key` in the cache
+func (c *InMemoryCache) Set(_ context.Context, key Key, value Thunk) {
+ c.items.Store(key.String(), value)
+}
+
+// Get gets the value at `key` if it exsits, returns value (or nil) and bool
+// indicating of value was found
+func (c *InMemoryCache) Get(_ context.Context, key Key) (Thunk, bool) {
+ item, found := c.items.Load(key.String())
+ if !found {
+ return nil, false
+ }
+
+ return item.(Thunk), true
+}
+
+// Delete deletes item at `key` from cache
+func (c *InMemoryCache) Delete(_ context.Context, key Key) bool {
+ if _, found := c.items.Load(key.String()); found {
+ c.items.Delete(key.String())
+ return true
+ }
+ return false
+}
+
+// Clear clears the entire cache
+func (c *InMemoryCache) Clear() {
+ c.items.Range(func(key, _ interface{}) bool {
+ c.items.Delete(key)
+ return true
+ })
+}
diff --git a/src/vendor/github.com/graph-gophers/dataloader/key.go b/src/vendor/github.com/graph-gophers/dataloader/key.go
new file mode 100644
index 000000000..57f66f4b3
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/key.go
@@ -0,0 +1,39 @@
+package dataloader
+
+// Key is the interface that all keys need to implement
+type Key interface {
+ // String returns a guaranteed unique string that can be used to identify an object
+ String() string
+ // Raw returns the raw, underlaying value of the key
+ Raw() interface{}
+}
+
+// Keys wraps a slice of Key types to provide some convenience methods.
+type Keys []Key
+
+// Keys returns the list of strings. One for each "Key" in the list
+func (l Keys) Keys() []string {
+ list := make([]string, len(l))
+ for i := range l {
+ list[i] = l[i].String()
+ }
+ return list
+}
+
+// StringKey implements the Key interface for a string
+type StringKey string
+
+// String is an identity method. Used to implement String interface
+func (k StringKey) String() string { return string(k) }
+
+// String is an identity method. Used to implement Key Raw
+func (k StringKey) Raw() interface{} { return k }
+
+// NewKeysFromStrings converts a `[]strings` to a `Keys` ([]Key)
+func NewKeysFromStrings(strings []string) Keys {
+ list := make(Keys, len(strings))
+ for i := range strings {
+ list[i] = StringKey(strings[i])
+ }
+ return list
+}
diff --git a/src/vendor/github.com/graph-gophers/dataloader/trace.go b/src/vendor/github.com/graph-gophers/dataloader/trace.go
new file mode 100644
index 000000000..17e7bc9ca
--- /dev/null
+++ b/src/vendor/github.com/graph-gophers/dataloader/trace.go
@@ -0,0 +1,78 @@
+package dataloader
+
+import (
+ "context"
+
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+type TraceLoadFinishFunc func(Thunk)
+type TraceLoadManyFinishFunc func(ThunkMany)
+type TraceBatchFinishFunc func([]*Result)
+
+// Tracer is an interface that may be used to implement tracing.
+type Tracer interface {
+ // TraceLoad will trace the calls to Load
+ TraceLoad(ctx context.Context, key Key) (context.Context, TraceLoadFinishFunc)
+ // TraceLoadMany will trace the calls to LoadMany
+ TraceLoadMany(ctx context.Context, keys Keys) (context.Context, TraceLoadManyFinishFunc)
+ // TraceBatch will trace data loader batches
+ TraceBatch(ctx context.Context, keys Keys) (context.Context, TraceBatchFinishFunc)
+}
+
+// OpenTracing Tracer implements a tracer that can be used with the Open Tracing standard.
+type OpenTracingTracer struct{}
+
+// TraceLoad will trace a call to dataloader.LoadMany with Open Tracing
+func (OpenTracingTracer) TraceLoad(ctx context.Context, key Key) (context.Context, TraceLoadFinishFunc) {
+ span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: load")
+
+ span.SetTag("dataloader.key", key.String())
+
+ return spanCtx, func(thunk Thunk) {
+ // TODO: is there anything we should do with the results?
+ span.Finish()
+ }
+}
+
+// TraceLoadMany will trace a call to dataloader.LoadMany with Open Tracing
+func (OpenTracingTracer) TraceLoadMany(ctx context.Context, keys Keys) (context.Context, TraceLoadManyFinishFunc) {
+ span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: loadmany")
+
+ span.SetTag("dataloader.keys", keys.Keys())
+
+ return spanCtx, func(thunk ThunkMany) {
+ // TODO: is there anything we should do with the results?
+ span.Finish()
+ }
+}
+
+// TraceBatch will trace a call to dataloader.LoadMany with Open Tracing
+func (OpenTracingTracer) TraceBatch(ctx context.Context, keys Keys) (context.Context, TraceBatchFinishFunc) {
+ span, spanCtx := opentracing.StartSpanFromContext(ctx, "Dataloader: batch")
+
+ span.SetTag("dataloader.keys", keys.Keys())
+
+ return spanCtx, func(results []*Result) {
+ // TODO: is there anything we should do with the results?
+ span.Finish()
+ }
+}
+
+// NoopTracer is the default (noop) tracer
+type NoopTracer struct{}
+
+// TraceLoad is a noop function
+func (NoopTracer) TraceLoad(ctx context.Context, key Key) (context.Context, TraceLoadFinishFunc) {
+ return ctx, func(Thunk) {}
+}
+
+// TraceLoadMany is a noop function
+func (NoopTracer) TraceLoadMany(ctx context.Context, keys Keys) (context.Context, TraceLoadManyFinishFunc) {
+ return ctx, func(ThunkMany) {}
+}
+
+// TraceBatch is a noop function
+func (NoopTracer) TraceBatch(ctx context.Context, keys Keys) (context.Context, TraceBatchFinishFunc) {
+ return ctx, func(result []*Result) {}
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/.gitignore b/src/vendor/github.com/jmespath/go-jmespath/.gitignore
new file mode 100644
index 000000000..5091fb073
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/.gitignore
@@ -0,0 +1,4 @@
+/jpgo
+jmespath-fuzz.zip
+cpu.out
+go-jmespath.test
diff --git a/src/vendor/github.com/jmespath/go-jmespath/.travis.yml b/src/vendor/github.com/jmespath/go-jmespath/.travis.yml
new file mode 100644
index 000000000..1f9807757
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+sudo: false
+
+go:
+ - 1.4
+
+install: go get -v -t ./...
+script: make test
diff --git a/src/vendor/github.com/jmespath/go-jmespath/LICENSE b/src/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 000000000..b03310a91
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/src/vendor/github.com/jmespath/go-jmespath/Makefile b/src/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100644
index 000000000..a828d2848
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " test to run all the tests"
+ @echo " build to build the library and jp executable"
+ @echo " generate to run codegen"
+
+
+generate:
+ go generate ./...
+
+build:
+ rm -f $(CMD)
+ go build ./...
+ rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+ mv cmd/$(CMD)/$(CMD) .
+
+test:
+ go test -v ./...
+
+check:
+ go vet ./...
+ @echo "golint ./..."
+ @lint=`golint ./...`; \
+ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+ echo "$$lint"; \
+ if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+ go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+ go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+ go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+ go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+ go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/src/vendor/github.com/jmespath/go-jmespath/README.md b/src/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 000000000..187ef676d
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/src/vendor/github.com/jmespath/go-jmespath/api.go b/src/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 000000000..8e26ffeec
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/src/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 000000000..1cd2d239c
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/functions.go b/src/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 000000000..9b7cd89b4
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/interpreter.go b/src/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 000000000..13c74604c
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/lexer.go b/src/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 000000000..817900c8f
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/parser.go b/src/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 000000000..1240a1755
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/src/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 000000000..dae79cbdf
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/src/vendor/github.com/jmespath/go-jmespath/util.go b/src/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 000000000..ddc1b7d7d
--- /dev/null
+++ b/src/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/src/vendor/github.com/json-iterator/go/build.sh b/src/vendor/github.com/json-iterator/go/build.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/json-iterator/go/test.sh b/src/vendor/github.com/json-iterator/go/test.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/justinas/alice/.travis.yml b/src/vendor/github.com/justinas/alice/.travis.yml
new file mode 100644
index 000000000..dc6bea671
--- /dev/null
+++ b/src/vendor/github.com/justinas/alice/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+matrix:
+ include:
+ - go: 1.0.x
+ - go: 1.1.x
+ - go: 1.2.x
+ - go: 1.3.x
+ - go: 1.4.x
+ - go: 1.5.x
+ - go: 1.6.x
+ - go: 1.7.x
+ - go: 1.8.x
+ - go: 1.9.x
+ - go: tip
+ allow_failures:
+ - go: tip
diff --git a/src/vendor/github.com/justinas/alice/LICENSE b/src/vendor/github.com/justinas/alice/LICENSE
new file mode 100644
index 000000000..0d0d352ec
--- /dev/null
+++ b/src/vendor/github.com/justinas/alice/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Justinas Stankevicius
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/vendor/github.com/justinas/alice/README.md b/src/vendor/github.com/justinas/alice/README.md
new file mode 100644
index 000000000..e4f9157c0
--- /dev/null
+++ b/src/vendor/github.com/justinas/alice/README.md
@@ -0,0 +1,98 @@
+# Alice
+
+[![GoDoc](https://godoc.org/github.com/golang/gddo?status.svg)](http://godoc.org/github.com/justinas/alice)
+[![Build Status](https://travis-ci.org/justinas/alice.svg?branch=master)](https://travis-ci.org/justinas/alice)
+[![Coverage](http://gocover.io/_badge/github.com/justinas/alice)](http://gocover.io/github.com/justinas/alice)
+
+Alice provides a convenient way to chain
+your HTTP middleware functions and the app handler.
+
+In short, it transforms
+
+```go
+Middleware1(Middleware2(Middleware3(App)))
+```
+
+to
+
+```go
+alice.New(Middleware1, Middleware2, Middleware3).Then(App)
+```
+
+### Why?
+
+None of the other middleware chaining solutions
+behaves exactly like Alice.
+Alice is as minimal as it gets:
+in essence, it's just a for loop that does the wrapping for you.
+
+Check out [this blog post](http://justinas.org/alice-painless-middleware-chaining-for-go/)
+for explanation how Alice is different from other chaining solutions.
+
+### Usage
+
+Your middleware constructors should have the form of
+
+```go
+func (http.Handler) http.Handler
+```
+
+Some middleware provide this out of the box.
+For ones that don't, it's trivial to write one yourself.
+
+```go
+func myStripPrefix(h http.Handler) http.Handler {
+ return http.StripPrefix("/old", h)
+}
+```
+
+This complete example shows the full power of Alice.
+
+```go
+package main
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/throttled/throttled"
+ "github.com/justinas/alice"
+ "github.com/justinas/nosurf"
+)
+
+func timeoutHandler(h http.Handler) http.Handler {
+ return http.TimeoutHandler(h, 1*time.Second, "timed out")
+}
+
+func myApp(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello world!"))
+}
+
+func main() {
+ th := throttled.Interval(throttled.PerSec(10), 1, &throttled.VaryBy{Path: true}, 50)
+ myHandler := http.HandlerFunc(myApp)
+
+ chain := alice.New(th.Throttle, timeoutHandler, nosurf.NewPure).Then(myHandler)
+ http.ListenAndServe(":8000", chain)
+}
+```
+
+Here, the request will pass [throttled](https://github.com/PuerkitoBio/throttled) first,
+then an http.TimeoutHandler we've set up,
+then [nosurf](https://github.com/justinas/nosurf)
+and will finally reach our handler.
+
+Note that Alice makes **no guarantees** for
+how one or another piece of middleware will behave.
+Once it passes the execution to the outer layer of middleware,
+it has no saying in whether middleware will execute the inner handlers.
+This is intentional behavior.
+
+Alice works with Go 1.0 and higher.
+
+### Contributing
+
+0. Find an issue that bugs you / open a new one.
+1. Discuss.
+2. Branch off, commit, test.
+3. Make a pull request / attach the commits to the issue.
diff --git a/src/vendor/github.com/justinas/alice/chain.go b/src/vendor/github.com/justinas/alice/chain.go
new file mode 100644
index 000000000..da0e2b580
--- /dev/null
+++ b/src/vendor/github.com/justinas/alice/chain.go
@@ -0,0 +1,112 @@
+// Package alice provides a convenient way to chain http handlers.
+package alice
+
+import "net/http"
+
+// A constructor for a piece of middleware.
+// Some middleware use this constructor out of the box,
+// so in most cases you can just pass somepackage.New
+type Constructor func(http.Handler) http.Handler
+
+// Chain acts as a list of http.Handler constructors.
+// Chain is effectively immutable:
+// once created, it will always hold
+// the same set of constructors in the same order.
+type Chain struct {
+ constructors []Constructor
+}
+
+// New creates a new chain,
+// memorizing the given list of middleware constructors.
+// New serves no other function,
+// constructors are only called upon a call to Then().
+func New(constructors ...Constructor) Chain {
+ return Chain{append(([]Constructor)(nil), constructors...)}
+}
+
+// Then chains the middleware and returns the final http.Handler.
+// New(m1, m2, m3).Then(h)
+// is equivalent to:
+// m1(m2(m3(h)))
+// When the request comes in, it will be passed to m1, then m2, then m3
+// and finally, the given handler
+// (assuming every middleware calls the following one).
+//
+// A chain can be safely reused by calling Then() several times.
+// stdStack := alice.New(ratelimitHandler, csrfHandler)
+// indexPipe = stdStack.Then(indexHandler)
+// authPipe = stdStack.Then(authHandler)
+// Note that constructors are called on every call to Then()
+// and thus several instances of the same middleware will be created
+// when a chain is reused in this way.
+// For proper middleware, this should cause no problems.
+//
+// Then() treats nil as http.DefaultServeMux.
+func (c Chain) Then(h http.Handler) http.Handler {
+ if h == nil {
+ h = http.DefaultServeMux
+ }
+
+ for i := range c.constructors {
+ h = c.constructors[len(c.constructors)-1-i](h)
+ }
+
+ return h
+}
+
+// ThenFunc works identically to Then, but takes
+// a HandlerFunc instead of a Handler.
+//
+// The following two statements are equivalent:
+// c.Then(http.HandlerFunc(fn))
+// c.ThenFunc(fn)
+//
+// ThenFunc provides all the guarantees of Then.
+func (c Chain) ThenFunc(fn http.HandlerFunc) http.Handler {
+ if fn == nil {
+ return c.Then(nil)
+ }
+ return c.Then(fn)
+}
+
+// Append extends a chain, adding the specified constructors
+// as the last ones in the request flow.
+//
+// Append returns a new chain, leaving the original one untouched.
+//
+// stdChain := alice.New(m1, m2)
+// extChain := stdChain.Append(m3, m4)
+// // requests in stdChain go m1 -> m2
+// // requests in extChain go m1 -> m2 -> m3 -> m4
+func (c Chain) Append(constructors ...Constructor) Chain {
+ newCons := make([]Constructor, 0, len(c.constructors)+len(constructors))
+ newCons = append(newCons, c.constructors...)
+ newCons = append(newCons, constructors...)
+
+ return Chain{newCons}
+}
+
+// Extend extends a chain by adding the specified chain
+// as the last one in the request flow.
+//
+// Extend returns a new chain, leaving the original one untouched.
+//
+// stdChain := alice.New(m1, m2)
+// ext1Chain := alice.New(m3, m4)
+// ext2Chain := stdChain.Extend(ext1Chain)
+// // requests in stdChain go m1 -> m2
+// // requests in ext1Chain go m3 -> m4
+// // requests in ext2Chain go m1 -> m2 -> m3 -> m4
+//
+// Another example:
+// aHtmlAfterNosurf := alice.New(m2)
+// aHtml := alice.New(m1, func(h http.Handler) http.Handler {
+// csrf := nosurf.New(h)
+// csrf.SetFailureHandler(aHtmlAfterNosurf.ThenFunc(csrfFail))
+// return csrf
+// }).Extend(aHtmlAfterNosurf)
+// // requests to aHtml hitting nosurfs success handler go m1 -> nosurf -> m2 -> target-handler
+// // requests to aHtml hitting nosurfs failure handler go m1 -> nosurf -> m2 -> csrfFail
+func (c Chain) Extend(chain Chain) Chain {
+ return c.Append(chain.constructors...)
+}
diff --git a/src/vendor/github.com/lib/pq/.travis.sh b/src/vendor/github.com/lib/pq/.travis.sh
old mode 100755
new mode 100644
index ead01df73..21a526443
--- a/src/vendor/github.com/lib/pq/.travis.sh
+++ b/src/vendor/github.com/lib/pq/.travis.sh
@@ -71,12 +71,6 @@ postgresql_uninstall() {
}
megacheck_install() {
- # Megacheck is Go 1.6+, so skip if Go 1.5.
- if [[ "$(go version)" =~ "go1.5" ]]
- then
- echo "megacheck not supported, skipping installation"
- return 0
- fi
# Lock megacheck version at $MEGACHECK_VERSION to prevent spontaneous
# new error messages in old code.
go get -d honnef.co/go/tools/...
@@ -86,13 +80,7 @@ megacheck_install() {
}
golint_install() {
- # Golint is Go 1.6+, so skip if Go 1.5.
- if [[ "$(go version)" =~ "go1.5" ]]
- then
- echo "golint not supported, skipping installation"
- return 0
- fi
- go get github.com/golang/lint/golint
+ go get golang.org/x/lint/golint
}
$1
diff --git a/src/vendor/github.com/lib/pq/.travis.yml b/src/vendor/github.com/lib/pq/.travis.yml
index 79c59a81d..f0305809f 100644
--- a/src/vendor/github.com/lib/pq/.travis.yml
+++ b/src/vendor/github.com/lib/pq/.travis.yml
@@ -1,10 +1,9 @@
language: go
go:
- - 1.6.x
- - 1.7.x
- - 1.8.x
- 1.9.x
+ - 1.10.x
+ - 1.11.x
- master
sudo: true
@@ -15,7 +14,7 @@ env:
- PQGOSSLTESTS=1
- PQSSLCERTTEST_PATH=$PWD/certs
- PGHOST=127.0.0.1
- - MEGACHECK_VERSION=2017.2.1
+ - MEGACHECK_VERSION=2017.2.2
matrix:
- PGVERSION=10
- PGVERSION=9.6
@@ -45,13 +44,7 @@ script:
- >
goimports -d -e $(find -name '*.go') | awk '{ print } END { exit NR == 0 ? 0 : 1 }'
- go vet ./...
- # For compatibility with Go 1.5, launch only if megacheck is present.
- - >
- which megacheck > /dev/null && megacheck -go 1.5 ./...
- || echo 'megacheck is not supported, skipping check'
- # For compatibility with Go 1.5, launch only if golint is present.
- - >
- which golint > /dev/null && golint ./...
- || echo 'golint is not supported, skipping check'
+ - megacheck -go 1.9 ./...
+ - golint ./...
- PQTEST_BINARY_PARAMETERS=no go test -race -v ./...
- PQTEST_BINARY_PARAMETERS=yes go test -race -v ./...
diff --git a/src/vendor/github.com/lib/pq/README.md b/src/vendor/github.com/lib/pq/README.md
index 781c89eea..385fe7350 100644
--- a/src/vendor/github.com/lib/pq/README.md
+++ b/src/vendor/github.com/lib/pq/README.md
@@ -10,22 +10,11 @@
## Docs
For detailed documentation and basic usage examples, please see the package
-documentation at .
+documentation at .
## Tests
-`go test` is used for testing. A running PostgreSQL server is
-required, with the ability to log in. The default database to connect
-to test with is "pqgotest," but it can be overridden using environment
-variables.
-
-Example:
-
- PGHOST=/run/postgresql go test github.com/lib/pq
-
-Optionally, a benchmark suite can be run as part of the tests:
-
- PGHOST=/run/postgresql go test -bench .
+`go test` is used for testing. See [TESTS.md](TESTS.md) for more details.
## Features
diff --git a/src/vendor/github.com/lib/pq/TESTS.md b/src/vendor/github.com/lib/pq/TESTS.md
new file mode 100644
index 000000000..f05021115
--- /dev/null
+++ b/src/vendor/github.com/lib/pq/TESTS.md
@@ -0,0 +1,33 @@
+# Tests
+
+## Running Tests
+
+`go test` is used for testing. A running PostgreSQL
+server is required, with the ability to log in. The
+database to connect to test with is "pqgotest," on
+"localhost" but these can be overridden using [environment
+variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html).
+
+Example:
+
+ PGHOST=/run/postgresql go test
+
+## Benchmarks
+
+A benchmark suite can be run as part of the tests:
+
+ go test -bench .
+
+## Example setup (Docker)
+
+Run a postgres container:
+
+```
+docker run --expose 5432:5432 postgres
+```
+
+Run tests:
+
+```
+PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test
+```
diff --git a/src/vendor/github.com/lib/pq/conn.go b/src/vendor/github.com/lib/pq/conn.go
index de6e5c17c..62551a142 100644
--- a/src/vendor/github.com/lib/pq/conn.go
+++ b/src/vendor/github.com/lib/pq/conn.go
@@ -2,7 +2,9 @@ package pq
import (
"bufio"
+ "context"
"crypto/md5"
+ "crypto/sha256"
"database/sql"
"database/sql/driver"
"encoding/binary"
@@ -20,6 +22,7 @@ import (
"unicode"
"github.com/lib/pq/oid"
+ "github.com/lib/pq/scram"
)
// Common error types
@@ -89,13 +92,24 @@ type Dialer interface {
DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
}
-type defaultDialer struct{}
-
-func (d defaultDialer) Dial(ntw, addr string) (net.Conn, error) {
- return net.Dial(ntw, addr)
+type DialerContext interface {
+ DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
-func (d defaultDialer) DialTimeout(ntw, addr string, timeout time.Duration) (net.Conn, error) {
- return net.DialTimeout(ntw, addr, timeout)
+
+type defaultDialer struct {
+ d net.Dialer
+}
+
+func (d defaultDialer) Dial(network, address string) (net.Conn, error) {
+ return d.d.Dial(network, address)
+}
+func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ return d.DialContext(ctx, network, address)
+}
+func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+ return d.d.DialContext(ctx, network, address)
}
type conn struct {
@@ -244,90 +258,35 @@ func (cn *conn) writeBuf(b byte) *writeBuf {
}
}
-// Open opens a new connection to the database. name is a connection string.
+// Open opens a new connection to the database. dsn is a connection string.
// Most users should only use it through database/sql package from the standard
// library.
-func Open(name string) (_ driver.Conn, err error) {
- return DialOpen(defaultDialer{}, name)
+func Open(dsn string) (_ driver.Conn, err error) {
+ return DialOpen(defaultDialer{}, dsn)
}
// DialOpen opens a new connection to the database using a dialer.
-func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
+func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) {
+ c, err := NewConnector(dsn)
+ if err != nil {
+ return nil, err
+ }
+ c.dialer = d
+ return c.open(context.Background())
+}
+
+func (c *Connector) open(ctx context.Context) (cn *conn, err error) {
// Handle any panics during connection initialization. Note that we
// specifically do *not* want to use errRecover(), as that would turn any
// connection errors into ErrBadConns, hiding the real error message from
// the user.
defer errRecoverNoErrBadConn(&err)
- o := make(values)
+ o := c.opts
- // A number of defaults are applied here, in this order:
- //
- // * Very low precedence defaults applied in every situation
- // * Environment variables
- // * Explicitly passed connection information
- o["host"] = "localhost"
- o["port"] = "5432"
- // N.B.: Extra float digits should be set to 3, but that breaks
- // Postgres 8.4 and older, where the max is 2.
- o["extra_float_digits"] = "2"
- for k, v := range parseEnviron(os.Environ()) {
- o[k] = v
- }
-
- if strings.HasPrefix(name, "postgres://") || strings.HasPrefix(name, "postgresql://") {
- name, err = ParseURL(name)
- if err != nil {
- return nil, err
- }
- }
-
- if err := parseOpts(name, o); err != nil {
- return nil, err
- }
-
- // Use the "fallback" application name if necessary
- if fallback, ok := o["fallback_application_name"]; ok {
- if _, ok := o["application_name"]; !ok {
- o["application_name"] = fallback
- }
- }
-
- // We can't work with any client_encoding other than UTF-8 currently.
- // However, we have historically allowed the user to set it to UTF-8
- // explicitly, and there's no reason to break such programs, so allow that.
- // Note that the "options" setting could also set client_encoding, but
- // parsing its value is not worth it. Instead, we always explicitly send
- // client_encoding as a separate run-time parameter, which should override
- // anything set in options.
- if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
- return nil, errors.New("client_encoding must be absent or 'UTF8'")
- }
- o["client_encoding"] = "UTF8"
- // DateStyle needs a similar treatment.
- if datestyle, ok := o["datestyle"]; ok {
- if datestyle != "ISO, MDY" {
- panic(fmt.Sprintf("setting datestyle must be absent or %v; got %v",
- "ISO, MDY", datestyle))
- }
- } else {
- o["datestyle"] = "ISO, MDY"
- }
-
- // If a user is not provided by any other means, the last
- // resort is to use the current operating system provided user
- // name.
- if _, ok := o["user"]; !ok {
- u, err := userCurrent()
- if err != nil {
- return nil, err
- }
- o["user"] = u
- }
-
- cn := &conn{
+ cn = &conn{
opts: o,
- dialer: d,
+ dialer: c.dialer,
}
err = cn.handleDriverSettings(o)
if err != nil {
@@ -335,12 +294,17 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
}
cn.handlePgpass(o)
- cn.c, err = dial(d, o)
+ cn.c, err = dial(ctx, c.dialer, o)
if err != nil {
return nil, err
}
- // cn.ssl and cn.startup panic on error. Make sure we don't leak cn.c.
+ err = cn.ssl(o)
+ if err != nil {
+ return nil, err
+ }
+
+ // cn.startup panics on error. Make sure we don't leak cn.c.
panicking := true
defer func() {
if panicking {
@@ -348,7 +312,6 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
}
}()
- cn.ssl(o)
cn.buf = bufio.NewReader(cn.c)
cn.startup(o)
@@ -360,10 +323,10 @@ func DialOpen(d Dialer, name string) (_ driver.Conn, err error) {
return cn, err
}
-func dial(d Dialer, o values) (net.Conn, error) {
- ntw, addr := network(o)
+func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) {
+ network, address := network(o)
// SSL is not necessary or supported over UNIX domain sockets
- if ntw == "unix" {
+ if network == "unix" {
o["sslmode"] = "disable"
}
@@ -374,19 +337,30 @@ func dial(d Dialer, o values) (net.Conn, error) {
return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err)
}
duration := time.Duration(seconds) * time.Second
+
// connect_timeout should apply to the entire connection establishment
// procedure, so we both use a timeout for the TCP connection
// establishment and set a deadline for doing the initial handshake.
// The deadline is then reset after startup() is done.
deadline := time.Now().Add(duration)
- conn, err := d.DialTimeout(ntw, addr, duration)
+ var conn net.Conn
+ if dctx, ok := d.(DialerContext); ok {
+ ctx, cancel := context.WithTimeout(ctx, duration)
+ defer cancel()
+ conn, err = dctx.DialContext(ctx, network, address)
+ } else {
+ conn, err = d.DialTimeout(network, address, duration)
+ }
if err != nil {
return nil, err
}
err = conn.SetDeadline(deadline)
return conn, err
}
- return d.Dial(ntw, addr)
+ if dctx, ok := d.(DialerContext); ok {
+ return dctx.DialContext(ctx, network, address)
+ }
+ return d.Dial(network, address)
}
func network(o values) (string, string) {
@@ -700,7 +674,7 @@ func (cn *conn) simpleQuery(q string) (res *rows, err error) {
// res might be non-nil here if we received a previous
// CommandComplete, but that's fine; just overwrite it
res = &rows{cn: cn}
- res.colNames, res.colFmts, res.colTyps = parsePortalRowDescribe(r)
+ res.rowsHeader = parsePortalRowDescribe(r)
// To work around a bug in QueryRow in Go 1.2 and earlier, wait
// until the first DataRow has been received.
@@ -857,17 +831,15 @@ func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) {
cn.readParseResponse()
cn.readBindResponse()
rows := &rows{cn: cn}
- rows.colNames, rows.colFmts, rows.colTyps = cn.readPortalDescribeResponse()
+ rows.rowsHeader = cn.readPortalDescribeResponse()
cn.postExecuteWorkaround()
return rows, nil
}
st := cn.prepareTo(query, "")
st.exec(args)
return &rows{
- cn: cn,
- colNames: st.colNames,
- colTyps: st.colTyps,
- colFmts: st.colFmts,
+ cn: cn,
+ rowsHeader: st.rowsHeader,
}, nil
}
@@ -988,7 +960,6 @@ func (cn *conn) recv() (t byte, r *readBuf) {
if err != nil {
panic(err)
}
-
switch t {
case 'E':
panic(parseError(r))
@@ -1029,30 +1000,35 @@ func (cn *conn) recv1() (t byte, r *readBuf) {
return t, r
}
-func (cn *conn) ssl(o values) {
- upgrade := ssl(o)
+func (cn *conn) ssl(o values) error {
+ upgrade, err := ssl(o)
+ if err != nil {
+ return err
+ }
+
if upgrade == nil {
// Nothing to do
- return
+ return nil
}
w := cn.writeBuf(0)
w.int32(80877103)
- if err := cn.sendStartupPacket(w); err != nil {
- panic(err)
+ if err = cn.sendStartupPacket(w); err != nil {
+ return err
}
b := cn.scratch[:1]
- _, err := io.ReadFull(cn.c, b)
+ _, err = io.ReadFull(cn.c, b)
if err != nil {
- panic(err)
+ return err
}
if b[0] != 'S' {
- panic(ErrSSLNotSupported)
+ return ErrSSLNotSupported
}
- cn.c = upgrade(cn.c)
+ cn.c, err = upgrade(cn.c)
+ return err
}
// isDriverSetting returns true iff a setting is purely for configuring the
@@ -1154,6 +1130,55 @@ func (cn *conn) auth(r *readBuf, o values) {
if r.int32() != 0 {
errorf("unexpected authentication response: %q", t)
}
+ case 10:
+ sc := scram.NewClient(sha256.New, o["user"], o["password"])
+ sc.Step(nil)
+ if sc.Err() != nil {
+ errorf("SCRAM-SHA-256 error: %s", sc.Err().Error())
+ }
+ scOut := sc.Out()
+
+ w := cn.writeBuf('p')
+ w.string("SCRAM-SHA-256")
+ w.int32(len(scOut))
+ w.bytes(scOut)
+ cn.send(w)
+
+ t, r := cn.recv()
+ if t != 'R' {
+ errorf("unexpected password response: %q", t)
+ }
+
+ if r.int32() != 11 {
+ errorf("unexpected authentication response: %q", t)
+ }
+
+ nextStep := r.next(len(*r))
+ sc.Step(nextStep)
+ if sc.Err() != nil {
+ errorf("SCRAM-SHA-256 error: %s", sc.Err().Error())
+ }
+
+ scOut = sc.Out()
+ w = cn.writeBuf('p')
+ w.bytes(scOut)
+ cn.send(w)
+
+ t, r = cn.recv()
+ if t != 'R' {
+ errorf("unexpected password response: %q", t)
+ }
+
+ if r.int32() != 12 {
+ errorf("unexpected authentication response: %q", t)
+ }
+
+ nextStep = r.next(len(*r))
+ sc.Step(nextStep)
+ if sc.Err() != nil {
+ errorf("SCRAM-SHA-256 error: %s", sc.Err().Error())
+ }
+
default:
errorf("unknown authentication response: %d", code)
}
@@ -1171,12 +1196,10 @@ var colFmtDataAllBinary = []byte{0, 1, 0, 1}
var colFmtDataAllText = []byte{0, 0}
type stmt struct {
- cn *conn
- name string
- colNames []string
- colFmts []format
+ cn *conn
+ name string
+ rowsHeader
colFmtData []byte
- colTyps []fieldDesc
paramTyps []oid.Oid
closed bool
}
@@ -1222,10 +1245,8 @@ func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) {
st.exec(v)
return &rows{
- cn: st.cn,
- colNames: st.colNames,
- colTyps: st.colTyps,
- colFmts: st.colFmts,
+ cn: st.cn,
+ rowsHeader: st.rowsHeader,
}, nil
}
@@ -1335,16 +1356,22 @@ func (cn *conn) parseComplete(commandTag string) (driver.Result, string) {
return driver.RowsAffected(n), commandTag
}
-type rows struct {
- cn *conn
- finish func()
+type rowsHeader struct {
colNames []string
colTyps []fieldDesc
colFmts []format
- done bool
- rb readBuf
- result driver.Result
- tag string
+}
+
+type rows struct {
+ cn *conn
+ finish func()
+ rowsHeader
+ done bool
+ rb readBuf
+ result driver.Result
+ tag string
+
+ next *rowsHeader
}
func (rs *rows) Close() error {
@@ -1431,7 +1458,8 @@ func (rs *rows) Next(dest []driver.Value) (err error) {
}
return
case 'T':
- rs.colNames, rs.colFmts, rs.colTyps = parsePortalRowDescribe(&rs.rb)
+ next := parsePortalRowDescribe(&rs.rb)
+ rs.next = &next
return io.EOF
default:
errorf("unexpected message after execute: %q", t)
@@ -1440,10 +1468,16 @@ func (rs *rows) Next(dest []driver.Value) (err error) {
}
func (rs *rows) HasNextResultSet() bool {
- return !rs.done
+ hasNext := rs.next != nil && !rs.done
+ return hasNext
}
func (rs *rows) NextResultSet() error {
+ if rs.next == nil {
+ return io.EOF
+ }
+ rs.rowsHeader = *rs.next
+ rs.next = nil
return nil
}
@@ -1621,13 +1655,13 @@ func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames [
}
}
-func (cn *conn) readPortalDescribeResponse() (colNames []string, colFmts []format, colTyps []fieldDesc) {
+func (cn *conn) readPortalDescribeResponse() rowsHeader {
t, r := cn.recv1()
switch t {
case 'T':
return parsePortalRowDescribe(r)
case 'n':
- return nil, nil, nil
+ return rowsHeader{}
case 'E':
err := parseError(r)
cn.readReadyForQuery()
@@ -1733,11 +1767,11 @@ func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDe
return
}
-func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, colTyps []fieldDesc) {
+func parsePortalRowDescribe(r *readBuf) rowsHeader {
n := r.int16()
- colNames = make([]string, n)
- colFmts = make([]format, n)
- colTyps = make([]fieldDesc, n)
+ colNames := make([]string, n)
+ colFmts := make([]format, n)
+ colTyps := make([]fieldDesc, n)
for i := range colNames {
colNames[i] = r.string()
r.next(6)
@@ -1746,7 +1780,11 @@ func parsePortalRowDescribe(r *readBuf) (colNames []string, colFmts []format, co
colTyps[i].Mod = r.int32()
colFmts[i] = format(r.int16())
}
- return
+ return rowsHeader{
+ colNames: colNames,
+ colFmts: colFmts,
+ colTyps: colTyps,
+ }
}
// parseEnviron tries to mimic some of libpq's environment handling
diff --git a/src/vendor/github.com/lib/pq/conn_go18.go b/src/vendor/github.com/lib/pq/conn_go18.go
index ab97a104d..0fdd06a61 100644
--- a/src/vendor/github.com/lib/pq/conn_go18.go
+++ b/src/vendor/github.com/lib/pq/conn_go18.go
@@ -1,5 +1,3 @@
-// +build go1.8
-
package pq
import (
@@ -9,6 +7,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "time"
)
// Implement the "QueryerContext" interface
@@ -76,13 +75,32 @@ func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx,
return tx, nil
}
+func (cn *conn) Ping(ctx context.Context) error {
+ if finish := cn.watchCancel(ctx); finish != nil {
+ defer finish()
+ }
+ rows, err := cn.simpleQuery("SELECT 'lib/pq ping test';")
+ if err != nil {
+ return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger
+ }
+ rows.Close()
+ return nil
+}
+
func (cn *conn) watchCancel(ctx context.Context) func() {
if done := ctx.Done(); done != nil {
finished := make(chan struct{})
go func() {
select {
case <-done:
- _ = cn.cancel()
+ // At this point the function level context is canceled,
+ // so it must not be used for the additional network
+ // request to cancel the query.
+ // Create a new context to pass into the dial.
+ ctxCancel, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+
+ _ = cn.cancel(ctxCancel)
finished <- struct{}{}
case <-finished:
}
@@ -97,8 +115,8 @@ func (cn *conn) watchCancel(ctx context.Context) func() {
return nil
}
-func (cn *conn) cancel() error {
- c, err := dial(cn.dialer, cn.opts)
+func (cn *conn) cancel(ctx context.Context) error {
+ c, err := dial(ctx, cn.dialer, cn.opts)
if err != nil {
return err
}
@@ -108,7 +126,10 @@ func (cn *conn) cancel() error {
can := conn{
c: c,
}
- can.ssl(cn.opts)
+ err = can.ssl(cn.opts)
+ if err != nil {
+ return err
+ }
w := can.writeBuf(0)
w.int32(80877102) // cancel request code
diff --git a/src/vendor/github.com/lib/pq/connector.go b/src/vendor/github.com/lib/pq/connector.go
new file mode 100644
index 000000000..2f8ced673
--- /dev/null
+++ b/src/vendor/github.com/lib/pq/connector.go
@@ -0,0 +1,110 @@
+package pq
+
+import (
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+)
+
+// Connector represents a fixed configuration for the pq driver with a given
+// name. Connector satisfies the database/sql/driver Connector interface and
+// can be used to create any number of DB Conn's via the database/sql OpenDB
+// function.
+//
+// See https://golang.org/pkg/database/sql/driver/#Connector.
+// See https://golang.org/pkg/database/sql/#OpenDB.
+type Connector struct {
+ opts values
+ dialer Dialer
+}
+
+// Connect returns a connection to the database using the fixed configuration
+// of this Connector. Context is not used.
+func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
+ return c.open(ctx)
+}
+
+// Driver returnst the underlying driver of this Connector.
+func (c *Connector) Driver() driver.Driver {
+ return &Driver{}
+}
+
+// NewConnector returns a connector for the pq driver in a fixed configuration
+// with the given dsn. The returned connector can be used to create any number
+// of equivalent Conn's. The returned connector is intended to be used with
+// database/sql.OpenDB.
+//
+// See https://golang.org/pkg/database/sql/driver/#Connector.
+// See https://golang.org/pkg/database/sql/#OpenDB.
+func NewConnector(dsn string) (*Connector, error) {
+ var err error
+ o := make(values)
+
+ // A number of defaults are applied here, in this order:
+ //
+ // * Very low precedence defaults applied in every situation
+ // * Environment variables
+ // * Explicitly passed connection information
+ o["host"] = "localhost"
+ o["port"] = "5432"
+ // N.B.: Extra float digits should be set to 3, but that breaks
+ // Postgres 8.4 and older, where the max is 2.
+ o["extra_float_digits"] = "2"
+ for k, v := range parseEnviron(os.Environ()) {
+ o[k] = v
+ }
+
+ if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
+ dsn, err = ParseURL(dsn)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err := parseOpts(dsn, o); err != nil {
+ return nil, err
+ }
+
+ // Use the "fallback" application name if necessary
+ if fallback, ok := o["fallback_application_name"]; ok {
+ if _, ok := o["application_name"]; !ok {
+ o["application_name"] = fallback
+ }
+ }
+
+ // We can't work with any client_encoding other than UTF-8 currently.
+ // However, we have historically allowed the user to set it to UTF-8
+ // explicitly, and there's no reason to break such programs, so allow that.
+ // Note that the "options" setting could also set client_encoding, but
+ // parsing its value is not worth it. Instead, we always explicitly send
+ // client_encoding as a separate run-time parameter, which should override
+ // anything set in options.
+ if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) {
+ return nil, errors.New("client_encoding must be absent or 'UTF8'")
+ }
+ o["client_encoding"] = "UTF8"
+ // DateStyle needs a similar treatment.
+ if datestyle, ok := o["datestyle"]; ok {
+ if datestyle != "ISO, MDY" {
+ return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle)
+ }
+ } else {
+ o["datestyle"] = "ISO, MDY"
+ }
+
+ // If a user is not provided by any other means, the last
+ // resort is to use the current operating system provided user
+ // name.
+ if _, ok := o["user"]; !ok {
+ u, err := userCurrent()
+ if err != nil {
+ return nil, err
+ }
+ o["user"] = u
+ }
+
+ return &Connector{opts: o, dialer: defaultDialer{}}, nil
+}
diff --git a/src/vendor/github.com/lib/pq/doc.go b/src/vendor/github.com/lib/pq/doc.go
index a1b029713..2a60054e2 100644
--- a/src/vendor/github.com/lib/pq/doc.go
+++ b/src/vendor/github.com/lib/pq/doc.go
@@ -239,7 +239,7 @@ for more information). Note that the channel name will be truncated to 63
bytes by the PostgreSQL server.
You can find a complete, working example of Listener usage at
-http://godoc.org/github.com/lib/pq/example/listen.
+https://godoc.org/github.com/lib/pq/example/listen.
*/
package pq
diff --git a/src/vendor/github.com/lib/pq/error.go b/src/vendor/github.com/lib/pq/error.go
index 6928d9670..96aae29c6 100644
--- a/src/vendor/github.com/lib/pq/error.go
+++ b/src/vendor/github.com/lib/pq/error.go
@@ -460,6 +460,11 @@ func errorf(s string, args ...interface{}) {
panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)))
}
+// TODO(ainar-g) Rename to errorf after removing panics.
+func fmterrorf(s string, args ...interface{}) error {
+ return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))
+}
+
func errRecoverNoErrBadConn(err *error) {
e := recover()
if e == nil {
@@ -488,7 +493,8 @@ func (c *conn) errRecover(err *error) {
*err = v
}
case *net.OpError:
- *err = driver.ErrBadConn
+ c.bad = true
+ *err = v
case error:
if v == io.EOF || v.(error).Error() == "remote error: handshake failure" {
*err = driver.ErrBadConn
diff --git a/src/vendor/github.com/lib/pq/go.mod b/src/vendor/github.com/lib/pq/go.mod
new file mode 100644
index 000000000..edf0b343f
--- /dev/null
+++ b/src/vendor/github.com/lib/pq/go.mod
@@ -0,0 +1 @@
+module github.com/lib/pq
diff --git a/src/vendor/github.com/lib/pq/notify.go b/src/vendor/github.com/lib/pq/notify.go
index 304e081fe..850bb9040 100644
--- a/src/vendor/github.com/lib/pq/notify.go
+++ b/src/vendor/github.com/lib/pq/notify.go
@@ -725,6 +725,9 @@ func (l *Listener) Close() error {
}
l.isClosed = true
+ // Unblock calls to Listen()
+ l.reconnectCond.Broadcast()
+
return nil
}
@@ -784,7 +787,7 @@ func (l *Listener) listenerConnLoop() {
}
l.emitEvent(ListenerEventDisconnected, err)
- time.Sleep(nextReconnect.Sub(time.Now()))
+ time.Sleep(time.Until(nextReconnect))
}
}
diff --git a/src/vendor/github.com/lib/pq/scram/scram.go b/src/vendor/github.com/lib/pq/scram/scram.go
new file mode 100644
index 000000000..5d0358f8f
--- /dev/null
+++ b/src/vendor/github.com/lib/pq/scram/scram.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2014 - Gustavo Niemeyer
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+// list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Pacakage scram implements a SCRAM-{SHA-1,etc} client per RFC5802.
+//
+// http://tools.ietf.org/html/rfc5802
+//
+package scram
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "hash"
+ "strconv"
+ "strings"
+)
+
+// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc).
+//
+// A Client may be used within a SASL conversation with logic resembling:
+//
+// var in []byte
+// var client = scram.NewClient(sha1.New, user, pass)
+// for client.Step(in) {
+// out := client.Out()
+// // send out to server
+// in := serverOut
+// }
+// if client.Err() != nil {
+// // auth failed
+// }
+//
+type Client struct {
+ newHash func() hash.Hash
+
+ user string
+ pass string
+ step int
+ out bytes.Buffer
+ err error
+
+ clientNonce []byte
+ serverNonce []byte
+ saltedPass []byte
+ authMsg bytes.Buffer
+}
+
+// NewClient returns a new SCRAM-* client with the provided hash algorithm.
+//
+// For SCRAM-SHA-256, for example, use:
+//
+// client := scram.NewClient(sha256.New, user, pass)
+//
+func NewClient(newHash func() hash.Hash, user, pass string) *Client {
+ c := &Client{
+ newHash: newHash,
+ user: user,
+ pass: pass,
+ }
+ c.out.Grow(256)
+ c.authMsg.Grow(256)
+ return c
+}
+
+// Out returns the data to be sent to the server in the current step.
+func (c *Client) Out() []byte {
+ if c.out.Len() == 0 {
+ return nil
+ }
+ return c.out.Bytes()
+}
+
+// Err returns the error that ocurred, or nil if there were no errors.
+func (c *Client) Err() error {
+ return c.err
+}
+
+// SetNonce sets the client nonce to the provided value.
+// If not set, the nonce is generated automatically out of crypto/rand on the first step.
+func (c *Client) SetNonce(nonce []byte) {
+ c.clientNonce = nonce
+}
+
+var escaper = strings.NewReplacer("=", "=3D", ",", "=2C")
+
+// Step processes the incoming data from the server and makes the
+// next round of data for the server available via Client.Out.
+// Step returns false if there are no errors and more data is
+// still expected.
+func (c *Client) Step(in []byte) bool {
+ c.out.Reset()
+ if c.step > 2 || c.err != nil {
+ return false
+ }
+ c.step++
+ switch c.step {
+ case 1:
+ c.err = c.step1(in)
+ case 2:
+ c.err = c.step2(in)
+ case 3:
+ c.err = c.step3(in)
+ }
+ return c.step > 2 || c.err != nil
+}
+
+func (c *Client) step1(in []byte) error {
+ if len(c.clientNonce) == 0 {
+ const nonceLen = 16
+ buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen))
+ if _, err := rand.Read(buf[:nonceLen]); err != nil {
+ return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err)
+ }
+ c.clientNonce = buf[nonceLen:]
+ b64.Encode(c.clientNonce, buf[:nonceLen])
+ }
+ c.authMsg.WriteString("n=")
+ escaper.WriteString(&c.authMsg, c.user)
+ c.authMsg.WriteString(",r=")
+ c.authMsg.Write(c.clientNonce)
+
+ c.out.WriteString("n,,")
+ c.out.Write(c.authMsg.Bytes())
+ return nil
+}
+
+var b64 = base64.StdEncoding
+
+func (c *Client) step2(in []byte) error {
+ c.authMsg.WriteByte(',')
+ c.authMsg.Write(in)
+
+ fields := bytes.Split(in, []byte(","))
+ if len(fields) != 3 {
+ return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in)
+ }
+ if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0])
+ }
+ if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1])
+ }
+ if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
+ }
+
+ c.serverNonce = fields[0][2:]
+ if !bytes.HasPrefix(c.serverNonce, c.clientNonce) {
+ return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce)
+ }
+
+ salt := make([]byte, b64.DecodedLen(len(fields[1][2:])))
+ n, err := b64.Decode(salt, fields[1][2:])
+ if err != nil {
+ return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1])
+ }
+ salt = salt[:n]
+ iterCount, err := strconv.Atoi(string(fields[2][2:]))
+ if err != nil {
+ return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2])
+ }
+ c.saltPassword(salt, iterCount)
+
+ c.authMsg.WriteString(",c=biws,r=")
+ c.authMsg.Write(c.serverNonce)
+
+ c.out.WriteString("c=biws,r=")
+ c.out.Write(c.serverNonce)
+ c.out.WriteString(",p=")
+ c.out.Write(c.clientProof())
+ return nil
+}
+
+func (c *Client) step3(in []byte) error {
+ var isv, ise bool
+ var fields = bytes.Split(in, []byte(","))
+ if len(fields) == 1 {
+ isv = bytes.HasPrefix(fields[0], []byte("v="))
+ ise = bytes.HasPrefix(fields[0], []byte("e="))
+ }
+ if ise {
+ return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:])
+ } else if !isv {
+ return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in)
+ }
+ if !bytes.Equal(c.serverSignature(), fields[0][2:]) {
+ return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:])
+ }
+ return nil
+}
+
+func (c *Client) saltPassword(salt []byte, iterCount int) {
+ mac := hmac.New(c.newHash, []byte(c.pass))
+ mac.Write(salt)
+ mac.Write([]byte{0, 0, 0, 1})
+ ui := mac.Sum(nil)
+ hi := make([]byte, len(ui))
+ copy(hi, ui)
+ for i := 1; i < iterCount; i++ {
+ mac.Reset()
+ mac.Write(ui)
+ mac.Sum(ui[:0])
+ for j, b := range ui {
+ hi[j] ^= b
+ }
+ }
+ c.saltedPass = hi
+}
+
+func (c *Client) clientProof() []byte {
+ mac := hmac.New(c.newHash, c.saltedPass)
+ mac.Write([]byte("Client Key"))
+ clientKey := mac.Sum(nil)
+ hash := c.newHash()
+ hash.Write(clientKey)
+ storedKey := hash.Sum(nil)
+ mac = hmac.New(c.newHash, storedKey)
+ mac.Write(c.authMsg.Bytes())
+ clientProof := mac.Sum(nil)
+ for i, b := range clientKey {
+ clientProof[i] ^= b
+ }
+ clientProof64 := make([]byte, b64.EncodedLen(len(clientProof)))
+ b64.Encode(clientProof64, clientProof)
+ return clientProof64
+}
+
+func (c *Client) serverSignature() []byte {
+ mac := hmac.New(c.newHash, c.saltedPass)
+ mac.Write([]byte("Server Key"))
+ serverKey := mac.Sum(nil)
+
+ mac = hmac.New(c.newHash, serverKey)
+ mac.Write(c.authMsg.Bytes())
+ serverSignature := mac.Sum(nil)
+
+ encoded := make([]byte, b64.EncodedLen(len(serverSignature)))
+ b64.Encode(encoded, serverSignature)
+ return encoded
+}
diff --git a/src/vendor/github.com/lib/pq/ssl.go b/src/vendor/github.com/lib/pq/ssl.go
index 7deb30436..d90208455 100644
--- a/src/vendor/github.com/lib/pq/ssl.go
+++ b/src/vendor/github.com/lib/pq/ssl.go
@@ -12,7 +12,7 @@ import (
// ssl generates a function to upgrade a net.Conn based on the "sslmode" and
// related settings. The function is nil when no upgrade should take place.
-func ssl(o values) func(net.Conn) net.Conn {
+func ssl(o values) (func(net.Conn) (net.Conn, error), error) {
verifyCaOnly := false
tlsConf := tls.Config{}
switch mode := o["sslmode"]; mode {
@@ -45,29 +45,44 @@ func ssl(o values) func(net.Conn) net.Conn {
case "verify-full":
tlsConf.ServerName = o["host"]
case "disable":
- return nil
+ return nil, nil
default:
- errorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
+ return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode)
}
- sslClientCertificates(&tlsConf, o)
- sslCertificateAuthority(&tlsConf, o)
- sslRenegotiation(&tlsConf)
+ err := sslClientCertificates(&tlsConf, o)
+ if err != nil {
+ return nil, err
+ }
+ err = sslCertificateAuthority(&tlsConf, o)
+ if err != nil {
+ return nil, err
+ }
- return func(conn net.Conn) net.Conn {
+ // Accept renegotiation requests initiated by the backend.
+ //
+ // Renegotiation was deprecated then removed from PostgreSQL 9.5, but
+ // the default configuration of older versions has it enabled. Redshift
+ // also initiates renegotiations and cannot be reconfigured.
+ tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient
+
+ return func(conn net.Conn) (net.Conn, error) {
client := tls.Client(conn, &tlsConf)
if verifyCaOnly {
- sslVerifyCertificateAuthority(client, &tlsConf)
+ err := sslVerifyCertificateAuthority(client, &tlsConf)
+ if err != nil {
+ return nil, err
+ }
}
- return client
- }
+ return client, nil
+ }, nil
}
// sslClientCertificates adds the certificate specified in the "sslcert" and
// "sslkey" settings, or if they aren't set, from the .postgresql directory
// in the user's home directory. The configured files must exist and have
// the correct permissions.
-func sslClientCertificates(tlsConf *tls.Config, o values) {
+func sslClientCertificates(tlsConf *tls.Config, o values) error {
// user.Current() might fail when cross-compiling. We have to ignore the
// error and continue without home directory defaults, since we wouldn't
// know from where to load them.
@@ -82,13 +97,13 @@ func sslClientCertificates(tlsConf *tls.Config, o values) {
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045
if len(sslcert) == 0 {
- return
+ return nil
}
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054
if _, err := os.Stat(sslcert); os.IsNotExist(err) {
- return
+ return nil
} else if err != nil {
- panic(err)
+ return err
}
// In libpq, the ssl key is only loaded if the setting is not blank.
@@ -101,19 +116,21 @@ func sslClientCertificates(tlsConf *tls.Config, o values) {
if len(sslkey) > 0 {
if err := sslKeyPermissions(sslkey); err != nil {
- panic(err)
+ return err
}
}
cert, err := tls.LoadX509KeyPair(sslcert, sslkey)
if err != nil {
- panic(err)
+ return err
}
+
tlsConf.Certificates = []tls.Certificate{cert}
+ return nil
}
// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting.
-func sslCertificateAuthority(tlsConf *tls.Config, o values) {
+func sslCertificateAuthority(tlsConf *tls.Config, o values) error {
// In libpq, the root certificate is only loaded if the setting is not blank.
//
// https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951
@@ -122,22 +139,24 @@ func sslCertificateAuthority(tlsConf *tls.Config, o values) {
cert, err := ioutil.ReadFile(sslrootcert)
if err != nil {
- panic(err)
+ return err
}
if !tlsConf.RootCAs.AppendCertsFromPEM(cert) {
- errorf("couldn't parse pem in sslrootcert")
+ return fmterrorf("couldn't parse pem in sslrootcert")
}
}
+
+ return nil
}
// sslVerifyCertificateAuthority carries out a TLS handshake to the server and
// verifies the presented certificate against the CA, i.e. the one specified in
// sslrootcert or the system CA if sslrootcert was not specified.
-func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) {
+func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error {
err := client.Handshake()
if err != nil {
- panic(err)
+ return err
}
certs := client.ConnectionState().PeerCertificates
opts := x509.VerifyOptions{
@@ -152,7 +171,5 @@ func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) {
opts.Intermediates.AddCert(cert)
}
_, err = certs[0].Verify(opts)
- if err != nil {
- panic(err)
- }
+ return err
}
diff --git a/src/vendor/github.com/lib/pq/ssl_go1.7.go b/src/vendor/github.com/lib/pq/ssl_go1.7.go
deleted file mode 100644
index d7ba43b32..000000000
--- a/src/vendor/github.com/lib/pq/ssl_go1.7.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build go1.7
-
-package pq
-
-import "crypto/tls"
-
-// Accept renegotiation requests initiated by the backend.
-//
-// Renegotiation was deprecated then removed from PostgreSQL 9.5, but
-// the default configuration of older versions has it enabled. Redshift
-// also initiates renegotiations and cannot be reconfigured.
-func sslRenegotiation(conf *tls.Config) {
- conf.Renegotiation = tls.RenegotiateFreelyAsClient
-}
diff --git a/src/vendor/github.com/lib/pq/ssl_renegotiation.go b/src/vendor/github.com/lib/pq/ssl_renegotiation.go
deleted file mode 100644
index 85ed5e437..000000000
--- a/src/vendor/github.com/lib/pq/ssl_renegotiation.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !go1.7
-
-package pq
-
-import "crypto/tls"
-
-// Renegotiation is not supported by crypto/tls until Go 1.7.
-func sslRenegotiation(*tls.Config) {}
diff --git a/src/vendor/github.com/mattn/go-runewidth/.travis.yml b/src/vendor/github.com/mattn/go-runewidth/.travis.yml
new file mode 100644
index 000000000..5c9c2a30f
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+go:
+ - tip
+before_install:
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+script:
+ - $HOME/gopath/bin/goveralls -repotoken lAKAWPzcGsD3A8yBX3BGGtRUdJ6CaGERL
diff --git a/src/vendor/github.com/mattn/go-runewidth/LICENSE b/src/vendor/github.com/mattn/go-runewidth/LICENSE
new file mode 100644
index 000000000..91b5cef30
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/src/vendor/github.com/mattn/go-runewidth/README.mkd b/src/vendor/github.com/mattn/go-runewidth/README.mkd
new file mode 100644
index 000000000..66663a94b
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/README.mkd
@@ -0,0 +1,27 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Coverage Status](https://coveralls.io/repos/mattn/go-runewidth/badge.png?branch=HEAD)](https://coveralls.io/r/mattn/go-runewidth?branch=HEAD)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/src/vendor/github.com/mattn/go-runewidth/runewidth.go b/src/vendor/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 000000000..3cb94106f
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,977 @@
+package runewidth
+
+import (
+ "os"
+)
+
+var (
+ // EastAsianWidth will be set true if the current locale is CJK
+ EastAsianWidth bool
+
+ // ZeroWidthJoiner is flag to set to use UTR#51 ZWJ
+ ZeroWidthJoiner bool
+
+ // DefaultCondition is a condition in current locale
+ DefaultCondition = &Condition{}
+)
+
+func init() {
+ handleEnv()
+}
+
+func handleEnv() {
+ env := os.Getenv("RUNEWIDTH_EASTASIAN")
+ if env == "" {
+ EastAsianWidth = IsEastAsian()
+ } else {
+ EastAsianWidth = env == "1"
+ }
+ // update DefaultCondition
+ DefaultCondition.EastAsianWidth = EastAsianWidth
+ DefaultCondition.ZeroWidthJoiner = ZeroWidthJoiner
+}
+
+type interval struct {
+ first rune
+ last rune
+}
+
+type table []interval
+
+func inTables(r rune, ts ...table) bool {
+ for _, t := range ts {
+ if inTable(r, t) {
+ return true
+ }
+ }
+ return false
+}
+
+func inTable(r rune, t table) bool {
+ // func (t table) IncludesRune(r rune) bool {
+ if r < t[0].first {
+ return false
+ }
+
+ bot := 0
+ top := len(t) - 1
+ for top >= bot {
+ mid := (bot + top) >> 1
+
+ switch {
+ case t[mid].last < r:
+ bot = mid + 1
+ case t[mid].first > r:
+ top = mid - 1
+ default:
+ return true
+ }
+ }
+
+ return false
+}
+
+var private = table{
+ {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD},
+}
+
+var nonprint = table{
+ {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
+ {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
+ {0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
+ {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
+}
+
+var combining = table{
+ {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD},
+ {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5},
+ {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F},
+ {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4},
+ {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711},
+ {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3},
+ {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827},
+ {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1},
+ {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F},
+ {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983},
+ {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8},
+ {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3},
+ {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
+ {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
+ {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83},
+ {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9},
+ {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03},
+ {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48},
+ {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63},
+ {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8},
+ {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03},
+ {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
+ {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83},
+ {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8},
+ {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3},
+ {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48},
+ {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63},
+ {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4},
+ {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3},
+ {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E},
+ {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC},
+ {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35},
+ {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F},
+ {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97},
+ {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E},
+ {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064},
+ {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D},
+ {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F},
+ {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753},
+ {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD},
+ {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9},
+ {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B},
+ {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F},
+ {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44},
+ {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD},
+ {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2},
+ {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4},
+ {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF},
+ {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F},
+ {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A},
+ {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F},
+ {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806},
+ {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881},
+ {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D},
+ {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0},
+ {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43},
+ {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0},
+ {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF},
+ {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6},
+ {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E},
+ {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD},
+ {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03},
+ {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A},
+ {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002},
+ {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA},
+ {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173},
+ {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC},
+ {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA},
+ {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344},
+ {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357},
+ {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
+ {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5},
+ {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640},
+ {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36},
+ {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6},
+ {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E},
+ {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169},
+ {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B},
+ {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36},
+ {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84},
+ {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
+ {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
+ {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A},
+ {0xE0100, 0xE01EF},
+}
+
+var doublewidth = table{
+ {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A},
+ {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3},
+ {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653},
+ {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1},
+ {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
+ {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA},
+ {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA},
+ {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B},
+ {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E},
+ {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
+ {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
+ {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
+ {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
+ {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
+ {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA},
+ {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247},
+ {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C},
+ {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3},
+ {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52},
+ {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60},
+ {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC},
+ {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004},
+ {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
+ {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
+ {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335},
+ {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA},
+ {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4},
+ {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC},
+ {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567},
+ {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4},
+ {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC},
+ {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6},
+ {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930},
+ {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E},
+ {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD},
+ {0x30000, 0x3FFFD},
+}
+
+var ambiguous = table{
+ {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8},
+ {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4},
+ {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6},
+ {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1},
+ {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED},
+ {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA},
+ {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101},
+ {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B},
+ {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133},
+ {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144},
+ {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153},
+ {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE},
+ {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4},
+ {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA},
+ {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261},
+ {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB},
+ {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB},
+ {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F},
+ {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1},
+ {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F},
+ {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016},
+ {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022},
+ {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033},
+ {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E},
+ {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084},
+ {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105},
+ {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116},
+ {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B},
+ {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B},
+ {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199},
+ {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4},
+ {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203},
+ {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F},
+ {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A},
+ {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225},
+ {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237},
+ {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C},
+ {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267},
+ {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283},
+ {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299},
+ {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312},
+ {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573},
+ {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1},
+ {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7},
+ {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8},
+ {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5},
+ {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609},
+ {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E},
+ {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661},
+ {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D},
+ {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF},
+ {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1},
+ {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1},
+ {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC},
+ {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F},
+ {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF},
+ {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A},
+ {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D},
+ {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF},
+ {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD},
+}
+
+var emoji = table{
+ {0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122},
+ {0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA},
+ {0x231A, 0x231B}, {0x2328, 0x2328}, {0x23CF, 0x23CF},
+ {0x23E9, 0x23F3}, {0x23F8, 0x23FA}, {0x24C2, 0x24C2},
+ {0x25AA, 0x25AB}, {0x25B6, 0x25B6}, {0x25C0, 0x25C0},
+ {0x25FB, 0x25FE}, {0x2600, 0x2604}, {0x260E, 0x260E},
+ {0x2611, 0x2611}, {0x2614, 0x2615}, {0x2618, 0x2618},
+ {0x261D, 0x261D}, {0x2620, 0x2620}, {0x2622, 0x2623},
+ {0x2626, 0x2626}, {0x262A, 0x262A}, {0x262E, 0x262F},
+ {0x2638, 0x263A}, {0x2640, 0x2640}, {0x2642, 0x2642},
+ {0x2648, 0x2653}, {0x265F, 0x2660}, {0x2663, 0x2663},
+ {0x2665, 0x2666}, {0x2668, 0x2668}, {0x267B, 0x267B},
+ {0x267E, 0x267F}, {0x2692, 0x2697}, {0x2699, 0x2699},
+ {0x269B, 0x269C}, {0x26A0, 0x26A1}, {0x26AA, 0x26AB},
+ {0x26B0, 0x26B1}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
+ {0x26C8, 0x26C8}, {0x26CE, 0x26CF}, {0x26D1, 0x26D1},
+ {0x26D3, 0x26D4}, {0x26E9, 0x26EA}, {0x26F0, 0x26F5},
+ {0x26F7, 0x26FA}, {0x26FD, 0x26FD}, {0x2702, 0x2702},
+ {0x2705, 0x2705}, {0x2708, 0x270D}, {0x270F, 0x270F},
+ {0x2712, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716},
+ {0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728},
+ {0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747},
+ {0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755},
+ {0x2757, 0x2757}, {0x2763, 0x2764}, {0x2795, 0x2797},
+ {0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF},
+ {0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C},
+ {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030},
+ {0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299},
+ {0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F170, 0x1F171},
+ {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
+ {0x1F1E6, 0x1F1FF}, {0x1F201, 0x1F202}, {0x1F21A, 0x1F21A},
+ {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A}, {0x1F250, 0x1F251},
+ {0x1F300, 0x1F321}, {0x1F324, 0x1F393}, {0x1F396, 0x1F397},
+ {0x1F399, 0x1F39B}, {0x1F39E, 0x1F3F0}, {0x1F3F3, 0x1F3F5},
+ {0x1F3F7, 0x1F4FD}, {0x1F4FF, 0x1F53D}, {0x1F549, 0x1F54E},
+ {0x1F550, 0x1F567}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F57A},
+ {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590},
+ {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A5}, {0x1F5A8, 0x1F5A8},
+ {0x1F5B1, 0x1F5B2}, {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4},
+ {0x1F5D1, 0x1F5D3}, {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1},
+ {0x1F5E3, 0x1F5E3}, {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF},
+ {0x1F5F3, 0x1F5F3}, {0x1F5FA, 0x1F64F}, {0x1F680, 0x1F6C5},
+ {0x1F6CB, 0x1F6D2}, {0x1F6E0, 0x1F6E5}, {0x1F6E9, 0x1F6E9},
+ {0x1F6EB, 0x1F6EC}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F9},
+ {0x1F910, 0x1F93A}, {0x1F93C, 0x1F93E}, {0x1F940, 0x1F945},
+ {0x1F947, 0x1F970}, {0x1F973, 0x1F976}, {0x1F97A, 0x1F97A},
+ {0x1F97C, 0x1F9A2}, {0x1F9B0, 0x1F9B9}, {0x1F9C0, 0x1F9C2},
+ {0x1F9D0, 0x1F9FF},
+}
+
+var notassigned = table{
+ {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B},
+ {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530},
+ {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588},
+ {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF},
+ {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D},
+ {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF},
+ {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F},
+ {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5},
+ {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E},
+ {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1},
+ {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6},
+ {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB},
+ {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00},
+ {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12},
+ {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34},
+ {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D},
+ {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50},
+ {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65},
+ {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E},
+ {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1},
+ {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6},
+ {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF},
+ {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00},
+ {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12},
+ {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34},
+ {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A},
+ {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E},
+ {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84},
+ {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98},
+ {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2},
+ {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD},
+ {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF},
+ {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF},
+ {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11},
+ {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45},
+ {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57},
+ {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77},
+ {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91},
+ {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB},
+ {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4},
+ {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5},
+ {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04},
+ {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C},
+ {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53},
+ {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84},
+ {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC},
+ {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE},
+ {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5},
+ {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E},
+ {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86},
+ {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93},
+ {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4},
+ {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC},
+ {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5},
+ {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB},
+ {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70},
+ {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD},
+ {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC},
+ {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F},
+ {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F},
+ {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1},
+ {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1},
+ {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311},
+ {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F},
+ {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF},
+ {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D},
+ {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F},
+ {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F},
+ {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF},
+ {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F},
+ {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F},
+ {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943},
+ {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF},
+ {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D},
+ {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F},
+ {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF},
+ {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB},
+ {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF},
+ {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF},
+ {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F},
+ {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58},
+ {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E},
+ {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5},
+ {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1},
+ {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065},
+ {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F},
+ {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F},
+ {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F},
+ {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC},
+ {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF},
+ {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8},
+ {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F},
+ {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F},
+ {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7},
+ {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF},
+ {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F},
+ {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF},
+ {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098},
+ {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F},
+ {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F},
+ {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF},
+ {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F},
+ {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6},
+ {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F},
+ {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF},
+ {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE},
+ {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F},
+ {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA},
+ {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10},
+ {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F},
+ {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF},
+ {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF},
+ {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12},
+ {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D},
+ {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45},
+ {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91},
+ {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F},
+ {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F},
+ {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00},
+ {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1},
+ {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7},
+ {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C},
+ {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E},
+ {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF},
+ {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F},
+ {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F},
+ {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF},
+ {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F},
+ {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF},
+ {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7},
+ {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E},
+ {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F},
+ {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809},
+ {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E},
+ {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF},
+ {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E},
+ {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB},
+ {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B},
+ {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37},
+ {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F},
+ {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF},
+ {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77},
+ {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF},
+ {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9},
+ {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051},
+ {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF},
+ {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F},
+ {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0},
+ {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F},
+ {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E},
+ {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF},
+ {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E},
+ {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331},
+ {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346},
+ {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356},
+ {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F},
+ {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C},
+ {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F},
+ {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F},
+ {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF},
+ {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F},
+ {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF},
+ {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37},
+ {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91},
+ {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF},
+ {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF},
+ {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F},
+ {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF},
+ {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F},
+ {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C},
+ {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E},
+ {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF},
+ {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F},
+ {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B},
+ {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128},
+ {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F},
+ {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D},
+ {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8},
+ {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC},
+ {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C},
+ {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A},
+ {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549},
+ {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD},
+ {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF},
+ {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022},
+ {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6},
+ {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D},
+ {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20},
+ {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28},
+ {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A},
+ {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48},
+ {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50},
+ {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58},
+ {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E},
+ {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66},
+ {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78},
+ {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A},
+ {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA},
+ {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F},
+ {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0},
+ {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F},
+ {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5},
+ {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F},
+ {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF},
+ {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF},
+ {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F},
+ {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F},
+ {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F},
+ {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF},
+ {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F},
+ {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000},
+ {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF},
+ {0xFFFFE, 0xFFFFF},
+}
+
+var neutral = table{
+ {0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9},
+ {0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB},
+ {0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6},
+ {0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7},
+ {0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1},
+ {0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD},
+ {0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112},
+ {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A},
+ {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E},
+ {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C},
+ {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A},
+ {0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1},
+ {0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7},
+ {0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250},
+ {0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6},
+ {0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF},
+ {0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE},
+ {0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F},
+ {0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390},
+ {0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400},
+ {0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
+ {0x0531, 0x0556}, {0x0559, 0x055F}, {0x0561, 0x0587},
+ {0x0589, 0x058A}, {0x058D, 0x058F}, {0x0591, 0x05C7},
+ {0x05D0, 0x05EA}, {0x05F0, 0x05F4}, {0x0600, 0x061C},
+ {0x061E, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1},
+ {0x07C0, 0x07FA}, {0x0800, 0x082D}, {0x0830, 0x083E},
+ {0x0840, 0x085B}, {0x085E, 0x085E}, {0x08A0, 0x08B4},
+ {0x08B6, 0x08BD}, {0x08D4, 0x0983}, {0x0985, 0x098C},
+ {0x098F, 0x0990}, {0x0993, 0x09A8}, {0x09AA, 0x09B0},
+ {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, {0x09BC, 0x09C4},
+ {0x09C7, 0x09C8}, {0x09CB, 0x09CE}, {0x09D7, 0x09D7},
+ {0x09DC, 0x09DD}, {0x09DF, 0x09E3}, {0x09E6, 0x09FB},
+ {0x0A01, 0x0A03}, {0x0A05, 0x0A0A}, {0x0A0F, 0x0A10},
+ {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, {0x0A32, 0x0A33},
+ {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, {0x0A3C, 0x0A3C},
+ {0x0A3E, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D},
+ {0x0A51, 0x0A51}, {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E},
+ {0x0A66, 0x0A75}, {0x0A81, 0x0A83}, {0x0A85, 0x0A8D},
+ {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0},
+ {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5},
+ {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0},
+ {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1}, {0x0AF9, 0x0AF9},
+ {0x0B01, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10},
+ {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33},
+ {0x0B35, 0x0B39}, {0x0B3C, 0x0B44}, {0x0B47, 0x0B48},
+ {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B5C, 0x0B5D},
+ {0x0B5F, 0x0B63}, {0x0B66, 0x0B77}, {0x0B82, 0x0B83},
+ {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95},
+ {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F},
+ {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9},
+ {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD},
+ {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA},
+ {0x0C00, 0x0C03}, {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10},
+ {0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3D, 0x0C44},
+ {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
+ {0x0C58, 0x0C5A}, {0x0C60, 0x0C63}, {0x0C66, 0x0C6F},
+ {0x0C78, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90},
+ {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
+ {0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
+ {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
+ {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D01, 0x0D03},
+ {0x0D05, 0x0D0C}, {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A},
+ {0x0D3D, 0x0D44}, {0x0D46, 0x0D48}, {0x0D4A, 0x0D4F},
+ {0x0D54, 0x0D63}, {0x0D66, 0x0D7F}, {0x0D82, 0x0D83},
+ {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB},
+ {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA},
+ {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF},
+ {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4}, {0x0E01, 0x0E3A},
+ {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82}, {0x0E84, 0x0E84},
+ {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, {0x0E8D, 0x0E8D},
+ {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, {0x0EA1, 0x0EA3},
+ {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, {0x0EAA, 0x0EAB},
+ {0x0EAD, 0x0EB9}, {0x0EBB, 0x0EBD}, {0x0EC0, 0x0EC4},
+ {0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
+ {0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
+ {0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
+ {0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
+ {0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248},
+ {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258},
+ {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D},
+ {0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE},
+ {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6},
+ {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
+ {0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
+ {0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
+ {0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
+ {0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
+ {0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
+ {0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
+ {0x1820, 0x1877}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
+ {0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
+ {0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
+ {0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
+ {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
+ {0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
+ {0x1AB0, 0x1ABE}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
+ {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
+ {0x1C4D, 0x1C88}, {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CF6},
+ {0x1CF8, 0x1CF9}, {0x1D00, 0x1DF5}, {0x1DFB, 0x1F15},
+ {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
+ {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
+ {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
+ {0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB},
+ {0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE},
+ {0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017},
+ {0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023},
+ {0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034},
+ {0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
+ {0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
+ {0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
+ {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20F0},
+ {0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
+ {0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
+ {0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
+ {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F},
+ {0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7},
+ {0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6},
+ {0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206},
+ {0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210},
+ {0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C},
+ {0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226},
+ {0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B},
+ {0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251},
+ {0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269},
+ {0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285},
+ {0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4},
+ {0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319},
+ {0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF},
+ {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, {0x2400, 0x2426},
+ {0x2440, 0x244A}, {0x24EA, 0x24EA}, {0x254C, 0x254F},
+ {0x2574, 0x257F}, {0x2590, 0x2591}, {0x2596, 0x259F},
+ {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, {0x25B4, 0x25B5},
+ {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, {0x25C2, 0x25C5},
+ {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, {0x25D2, 0x25E1},
+ {0x25E6, 0x25EE}, {0x25F0, 0x25FC}, {0x25FF, 0x2604},
+ {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613},
+ {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F},
+ {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F},
+ {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B},
+ {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692},
+ {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9},
+ {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2},
+ {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709},
+ {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B},
+ {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756},
+ {0x2758, 0x2775}, {0x2780, 0x2794}, {0x2798, 0x27AF},
+ {0x27B1, 0x27BE}, {0x27C0, 0x27E5}, {0x27EE, 0x2984},
+ {0x2987, 0x2B1A}, {0x2B1D, 0x2B4F}, {0x2B51, 0x2B54},
+ {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9},
+ {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF},
+ {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2CF3},
+ {0x2CF9, 0x2D25}, {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D},
+ {0x2D30, 0x2D67}, {0x2D6F, 0x2D70}, {0x2D7F, 0x2D96},
+ {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6},
+ {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE},
+ {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2E44},
+ {0x303F, 0x303F}, {0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B},
+ {0xA640, 0xA6F7}, {0xA700, 0xA7AE}, {0xA7B0, 0xA7B7},
+ {0xA7F7, 0xA82B}, {0xA830, 0xA839}, {0xA840, 0xA877},
+ {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9}, {0xA8E0, 0xA8FD},
+ {0xA900, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
+ {0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
+ {0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2},
+ {0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E},
+ {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E},
+ {0xAB30, 0xAB65}, {0xAB70, 0xABED}, {0xABF0, 0xABF9},
+ {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
+ {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
+ {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
+ {0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
+ {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
+ {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
+ {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
+ {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
+ {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA},
+ {0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E},
+ {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD},
+ {0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB},
+ {0x10300, 0x10323}, {0x10330, 0x1034A}, {0x10350, 0x1037A},
+ {0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
+ {0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
+ {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
+ {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
+ {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
+ {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
+ {0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
+ {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
+ {0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
+ {0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
+ {0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33},
+ {0x10A38, 0x10A3A}, {0x10A3F, 0x10A47}, {0x10A50, 0x10A58},
+ {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
+ {0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
+ {0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
+ {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
+ {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x1104D},
+ {0x11052, 0x1106F}, {0x1107F, 0x110C1}, {0x110D0, 0x110E8},
+ {0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11143},
+ {0x11150, 0x11176}, {0x11180, 0x111CD}, {0x111D0, 0x111DF},
+ {0x111E1, 0x111F4}, {0x11200, 0x11211}, {0x11213, 0x1123E},
+ {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D},
+ {0x1128F, 0x1129D}, {0x1129F, 0x112A9}, {0x112B0, 0x112EA},
+ {0x112F0, 0x112F9}, {0x11300, 0x11303}, {0x11305, 0x1130C},
+ {0x1130F, 0x11310}, {0x11313, 0x11328}, {0x1132A, 0x11330},
+ {0x11332, 0x11333}, {0x11335, 0x11339}, {0x1133C, 0x11344},
+ {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350},
+ {0x11357, 0x11357}, {0x1135D, 0x11363}, {0x11366, 0x1136C},
+ {0x11370, 0x11374}, {0x11400, 0x11459}, {0x1145B, 0x1145B},
+ {0x1145D, 0x1145D}, {0x11480, 0x114C7}, {0x114D0, 0x114D9},
+ {0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644},
+ {0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B7},
+ {0x116C0, 0x116C9}, {0x11700, 0x11719}, {0x1171D, 0x1172B},
+ {0x11730, 0x1173F}, {0x118A0, 0x118F2}, {0x118FF, 0x118FF},
+ {0x11AC0, 0x11AF8}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C36},
+ {0x11C38, 0x11C45}, {0x11C50, 0x11C6C}, {0x11C70, 0x11C8F},
+ {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x12000, 0x12399},
+ {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
+ {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38},
+ {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F},
+ {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5}, {0x16B00, 0x16B45},
+ {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, {0x16B63, 0x16B77},
+ {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, {0x16F50, 0x16F7E},
+ {0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C},
+ {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3},
+ {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1E8},
+ {0x1D200, 0x1D245}, {0x1D300, 0x1D356}, {0x1D360, 0x1D371},
+ {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
+ {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
+ {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
+ {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
+ {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
+ {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
+ {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
+ {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
+ {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
+ {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
+ {0x1E900, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
+ {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22},
+ {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32},
+ {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B},
+ {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49},
+ {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52},
+ {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59},
+ {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F},
+ {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A},
+ {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C},
+ {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B},
+ {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB},
+ {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B},
+ {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF},
+ {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C},
+ {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF},
+ {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D},
+ {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF},
+ {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F},
+ {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A},
+ {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
+ {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
+ {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA},
+ {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4},
+ {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859},
+ {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001},
+ {0xE0020, 0xE007F},
+}
+
+// Condition have flag EastAsianWidth whether the current locale is CJK or not.
+type Condition struct {
+ EastAsianWidth bool
+ ZeroWidthJoiner bool
+}
+
+// NewCondition return new instance of Condition which is current locale.
+func NewCondition() *Condition {
+ return &Condition{
+ EastAsianWidth: EastAsianWidth,
+ ZeroWidthJoiner: ZeroWidthJoiner,
+ }
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+ switch {
+ case r < 0 || r > 0x10FFFF ||
+ inTables(r, nonprint, combining, notassigned):
+ return 0
+ case (c.EastAsianWidth && IsAmbiguousWidth(r)) ||
+ inTables(r, doublewidth, emoji):
+ return 2
+ default:
+ return 1
+ }
+}
+
+func (c *Condition) stringWidth(s string) (width int) {
+ for _, r := range []rune(s) {
+ width += c.RuneWidth(r)
+ }
+ return width
+}
+
+func (c *Condition) stringWidthZeroJoiner(s string) (width int) {
+ r1, r2 := rune(0), rune(0)
+ for _, r := range []rune(s) {
+ if r == 0xFE0E || r == 0xFE0F {
+ continue
+ }
+ w := c.RuneWidth(r)
+ if r2 == 0x200D && inTables(r, emoji) && inTables(r1, emoji) {
+ w = 0
+ }
+ width += w
+ r1, r2 = r2, r
+ }
+ return width
+}
+
+// StringWidth return width as you can see
+func (c *Condition) StringWidth(s string) (width int) {
+ if c.ZeroWidthJoiner {
+ return c.stringWidthZeroJoiner(s)
+ }
+ return c.stringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func (c *Condition) Truncate(s string, w int, tail string) string {
+ if c.StringWidth(s) <= w {
+ return s
+ }
+ r := []rune(s)
+ tw := c.StringWidth(tail)
+ w -= tw
+ width := 0
+ i := 0
+ for ; i < len(r); i++ {
+ cw := c.RuneWidth(r[i])
+ if width+cw > w {
+ break
+ }
+ width += cw
+ }
+ return string(r[0:i]) + tail
+}
+
+// Wrap return string wrapped with w cells
+func (c *Condition) Wrap(s string, w int) string {
+ width := 0
+ out := ""
+ for _, r := range []rune(s) {
+ cw := RuneWidth(r)
+ if r == '\n' {
+ out += string(r)
+ width = 0
+ continue
+ } else if width+cw > w {
+ out += "\n"
+ width = 0
+ out += string(r)
+ width += cw
+ continue
+ }
+ out += string(r)
+ width += cw
+ }
+ return out
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func (c *Condition) FillLeft(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b) + s
+ }
+ return s
+}
+
+// FillRight return string filled in left by spaces in w cells
+func (c *Condition) FillRight(s string, w int) string {
+ width := c.StringWidth(s)
+ count := w - width
+ if count > 0 {
+ b := make([]byte, count)
+ for i := range b {
+ b[i] = ' '
+ }
+ return s + string(b)
+ }
+ return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+ return DefaultCondition.RuneWidth(r)
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+ return inTables(r, private, ambiguous)
+}
+
+// IsNeutralWidth returns whether is neutral width or not.
+func IsNeutralWidth(r rune) bool {
+ return inTable(r, neutral)
+}
+
+// StringWidth return width as you can see
+func StringWidth(s string) (width int) {
+ return DefaultCondition.StringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func Truncate(s string, w int, tail string) string {
+ return DefaultCondition.Truncate(s, w, tail)
+}
+
+// Wrap return string wrapped with w cells
+func Wrap(s string, w int) string {
+ return DefaultCondition.Wrap(s, w)
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func FillLeft(s string, w int) string {
+ return DefaultCondition.FillLeft(s, w)
+}
+
+// FillRight return string filled in left by spaces in w cells
+func FillRight(s string, w int) string {
+ return DefaultCondition.FillRight(s, w)
+}
diff --git a/src/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/src/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
new file mode 100644
index 000000000..7d99f6e52
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package runewidth
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ return false
+}
diff --git a/src/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/src/vendor/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 000000000..c5fdf40ba
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,9 @@
+// +build js
+// +build !appengine
+
+package runewidth
+
+func IsEastAsian() bool {
+ // TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+ return false
+}
diff --git a/src/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/src/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 000000000..66a58b5d8
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,79 @@
+// +build !windows
+// +build !js
+// +build !appengine
+
+package runewidth
+
+import (
+ "os"
+ "regexp"
+ "strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+var mblenTable = map[string]int{
+ "utf-8": 6,
+ "utf8": 6,
+ "jis": 8,
+ "eucjp": 3,
+ "euckr": 2,
+ "euccn": 2,
+ "sjis": 2,
+ "cp932": 2,
+ "cp51932": 2,
+ "cp936": 2,
+ "cp949": 2,
+ "cp950": 2,
+ "big5": 2,
+ "gbk": 2,
+ "gb2312": 2,
+}
+
+func isEastAsian(locale string) bool {
+ charset := strings.ToLower(locale)
+ r := reLoc.FindStringSubmatch(locale)
+ if len(r) == 2 {
+ charset = strings.ToLower(r[1])
+ }
+
+ if strings.HasSuffix(charset, "@cjk_narrow") {
+ return false
+ }
+
+ for pos, b := range []byte(charset) {
+ if b == '@' {
+ charset = charset[:pos]
+ break
+ }
+ }
+ max := 1
+ if m, ok := mblenTable[charset]; ok {
+ max = m
+ }
+ if max > 1 && (charset[0] != 'u' ||
+ strings.HasPrefix(locale, "ja") ||
+ strings.HasPrefix(locale, "ko") ||
+ strings.HasPrefix(locale, "zh")) {
+ return true
+ }
+ return false
+}
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ locale := os.Getenv("LC_CTYPE")
+ if locale == "" {
+ locale = os.Getenv("LANG")
+ }
+
+ // ignore C locale
+ if locale == "POSIX" || locale == "C" {
+ return false
+ }
+ if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+ return false
+ }
+
+ return isEastAsian(locale)
+}
diff --git a/src/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/src/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 000000000..d6a61777d
--- /dev/null
+++ b/src/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+// +build !appengine
+
+package runewidth
+
+import (
+ "syscall"
+)
+
+var (
+ kernel32 = syscall.NewLazyDLL("kernel32")
+ procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+ r1, _, _ := procGetConsoleOutputCP.Call()
+ if r1 == 0 {
+ return false
+ }
+
+ switch int(r1) {
+ case 932, 51932, 936, 949, 950:
+ return true
+ }
+
+ return false
+}
diff --git a/src/vendor/github.com/modern-go/concurrent/test.sh b/src/vendor/github.com/modern-go/concurrent/test.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/modern-go/reflect2/test.sh b/src/vendor/github.com/modern-go/reflect2/test.sh
old mode 100755
new mode 100644
diff --git a/src/vendor/github.com/modern-go/reflect2/type_map.go b/src/vendor/github.com/modern-go/reflect2/type_map.go
index 6d489112f..3acfb5580 100644
--- a/src/vendor/github.com/modern-go/reflect2/type_map.go
+++ b/src/vendor/github.com/modern-go/reflect2/type_map.go
@@ -4,6 +4,7 @@ import (
"reflect"
"runtime"
"strings"
+ "sync"
"unsafe"
)
@@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
-var types = map[string]reflect.Type{}
-var packages = map[string]map[string]reflect.Type{}
+// initOnce guards initialization of types and packages
+var initOnce sync.Once
+
+var types map[string]reflect.Type
+var packages map[string]map[string]reflect.Type
+
+// discoverTypes initializes types and packages
+func discoverTypes() {
+ types = make(map[string]reflect.Type)
+ packages = make(map[string]map[string]reflect.Type)
-func init() {
ver := runtime.Version()
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
loadGo15Types()
@@ -90,11 +98,13 @@ type emptyInterface struct {
// TypeByName return the type by its name, just like Class.forName in java
func TypeByName(typeName string) Type {
+ initOnce.Do(discoverTypes)
return Type2(types[typeName])
}
// TypeByPackageName return the type by its package and name
func TypeByPackageName(pkgPath string, name string) Type {
+ initOnce.Do(discoverTypes)
pkgTypes := packages[pkgPath]
if pkgTypes == nil {
return nil
diff --git a/src/vendor/github.com/olekukonko/tablewriter/.gitignore b/src/vendor/github.com/olekukonko/tablewriter/.gitignore
new file mode 100644
index 000000000..b66cec635
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/.gitignore
@@ -0,0 +1,15 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
diff --git a/src/vendor/github.com/olekukonko/tablewriter/.travis.yml b/src/vendor/github.com/olekukonko/tablewriter/.travis.yml
new file mode 100644
index 000000000..9c64270e2
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.1
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+ - 1.6
+ - 1.7
+ - 1.8
+ - 1.9
+ - "1.10"
+ - tip
diff --git a/src/vendor/github.com/astaxie/beego/utils/captcha/LICENSE b/src/vendor/github.com/olekukonko/tablewriter/LICENSE.md
similarity index 93%
rename from src/vendor/github.com/astaxie/beego/utils/captcha/LICENSE
rename to src/vendor/github.com/olekukonko/tablewriter/LICENSE.md
index 0ad73ae0e..a0769b5c1 100644
--- a/src/vendor/github.com/astaxie/beego/utils/captcha/LICENSE
+++ b/src/vendor/github.com/olekukonko/tablewriter/LICENSE.md
@@ -1,4 +1,4 @@
-Copyright (c) 2011-2014 Dmitry Chestnykh
+Copyright (C) 2014 by Oleku Konko
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/src/vendor/github.com/olekukonko/tablewriter/README.md b/src/vendor/github.com/olekukonko/tablewriter/README.md
new file mode 100644
index 000000000..9c2b139b2
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/README.md
@@ -0,0 +1,277 @@
+ASCII Table Writer
+=========
+
+[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter)
+[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
+[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter)
+
+Generate ASCII table on the fly ... Installation is simple as
+
+ go get github.com/olekukonko/tablewriter
+
+
+#### Features
+- Automatic Padding
+- Support Multiple Lines
+- Supports Alignment
+- Support Custom Separators
+- Automatic Alignment of numbers & percentage
+- Write directly to http , file etc via `io.Writer`
+- Read directly from CSV file
+- Optional row line via `SetRowLine`
+- Normalise table header
+- Make CSV Headers optional
+- Enable or disable table border
+- Set custom footer support
+- Optional identical cells merging
+- Set custom caption
+- Optional reflowing of paragrpahs in multi-line cells.
+
+#### Example 1 - Basic
+```go
+data := [][]string{
+ []string{"A", "The Good", "500"},
+ []string{"B", "The Very very Bad Man", "288"},
+ []string{"C", "The Ugly", "120"},
+ []string{"D", "The Gopher", "800"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Name", "Sign", "Rating"})
+
+for _, v := range data {
+ table.Append(v)
+}
+table.Render() // Send output
+```
+
+##### Output 1
+```
++------+-----------------------+--------+
+| NAME | SIGN | RATING |
++------+-----------------------+--------+
+| A | The Good | 500 |
+| B | The Very very Bad Man | 288 |
+| C | The Ugly | 120 |
+| D | The Gopher | 800 |
++------+-----------------------+--------+
+```
+
+#### Example 2 - Without Border / Footer / Bulk Append
+```go
+data := [][]string{
+ []string{"1/1/2014", "Domain name", "2233", "$10.98"},
+ []string{"1/1/2014", "January Hosting", "2233", "$54.95"},
+ []string{"1/4/2014", "February Hosting", "2233", "$51.00"},
+ []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
+table.SetBorder(false) // Set Border to false
+table.AppendBulk(data) // Add Bulk Data
+table.Render()
+```
+
+##### Output 2
+```
+
+ DATE | DESCRIPTION | CV2 | AMOUNT
++----------+--------------------------+-------+---------+
+ 1/1/2014 | Domain name | 2233 | $10.98
+ 1/1/2014 | January Hosting | 2233 | $54.95
+ 1/4/2014 | February Hosting | 2233 | $51.00
+ 1/4/2014 | February Extra Bandwidth | 2233 | $30.00
++----------+--------------------------+-------+---------+
+ TOTAL | $146 93
+ +-------+---------+
+
+```
+
+
+#### Example 3 - CSV
+```go
+table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true)
+table.SetAlignment(tablewriter.ALIGN_LEFT) // Set Alignment
+table.Render()
+```
+
+##### Output 3
+```
++----------+--------------+------+-----+---------+----------------+
+| FIELD | TYPE | NULL | KEY | DEFAULT | EXTRA |
++----------+--------------+------+-----+---------+----------------+
+| user_id | smallint(5) | NO | PRI | NULL | auto_increment |
+| username | varchar(10) | NO | | NULL | |
+| password | varchar(100) | NO | | NULL | |
++----------+--------------+------+-----+---------+----------------+
+```
+
+#### Example 4 - Custom Separator
+```go
+table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true)
+table.SetRowLine(true) // Enable row line
+
+// Change table lines
+table.SetCenterSeparator("*")
+table.SetColumnSeparator("╪")
+table.SetRowSeparator("-")
+
+table.SetAlignment(tablewriter.ALIGN_LEFT)
+table.Render()
+```
+
+##### Output 4
+```
+*------------*-----------*---------*
+╪ FIRST NAME ╪ LAST NAME ╪ SSN ╪
+*------------*-----------*---------*
+╪ John ╪ Barry ╪ 123456 ╪
+*------------*-----------*---------*
+╪ Kathy ╪ Smith ╪ 687987 ╪
+*------------*-----------*---------*
+╪ Bob ╪ McCornick ╪ 3979870 ╪
+*------------*-----------*---------*
+```
+
+#### Example 5 - Markdown Format
+```go
+data := [][]string{
+ []string{"1/1/2014", "Domain name", "2233", "$10.98"},
+ []string{"1/1/2014", "January Hosting", "2233", "$54.95"},
+ []string{"1/4/2014", "February Hosting", "2233", "$51.00"},
+ []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
+table.SetCenterSeparator("|")
+table.AppendBulk(data) // Add Bulk Data
+table.Render()
+```
+
+##### Output 5
+```
+| DATE | DESCRIPTION | CV2 | AMOUNT |
+|----------|--------------------------|------|--------|
+| 1/1/2014 | Domain name | 2233 | $10.98 |
+| 1/1/2014 | January Hosting | 2233 | $54.95 |
+| 1/4/2014 | February Hosting | 2233 | $51.00 |
+| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 |
+```
+
+#### Example 6 - Identical cells merging
+```go
+data := [][]string{
+ []string{"1/1/2014", "Domain name", "1234", "$10.98"},
+ []string{"1/1/2014", "January Hosting", "2345", "$54.95"},
+ []string{"1/4/2014", "February Hosting", "3456", "$51.00"},
+ []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"})
+table.SetAutoMergeCells(true)
+table.SetRowLine(true)
+table.AppendBulk(data)
+table.Render()
+```
+
+##### Output 6
+```
++----------+--------------------------+-------+---------+
+| DATE | DESCRIPTION | CV2 | AMOUNT |
++----------+--------------------------+-------+---------+
+| 1/1/2014 | Domain name | 1234 | $10.98 |
++ +--------------------------+-------+---------+
+| | January Hosting | 2345 | $54.95 |
++----------+--------------------------+-------+---------+
+| 1/4/2014 | February Hosting | 3456 | $51.00 |
++ +--------------------------+-------+---------+
+| | February Extra Bandwidth | 4567 | $30.00 |
++----------+--------------------------+-------+---------+
+| TOTAL | $146 93 |
++----------+--------------------------+-------+---------+
+```
+
+
+#### Table with color
+```go
+data := [][]string{
+ []string{"1/1/2014", "Domain name", "2233", "$10.98"},
+ []string{"1/1/2014", "January Hosting", "2233", "$54.95"},
+ []string{"1/4/2014", "February Hosting", "2233", "$51.00"},
+ []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
+table.SetBorder(false) // Set Border to false
+
+table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor},
+ tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor},
+ tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor},
+ tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor})
+
+table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
+ tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor},
+ tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
+ tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor})
+
+table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{},
+ tablewriter.Colors{tablewriter.Bold},
+ tablewriter.Colors{tablewriter.FgHiRedColor})
+
+table.AppendBulk(data)
+table.Render()
+```
+
+#### Table with color Output
+![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png)
+
+#### Example 6 - Set table caption
+```go
+data := [][]string{
+ []string{"A", "The Good", "500"},
+ []string{"B", "The Very very Bad Man", "288"},
+ []string{"C", "The Ugly", "120"},
+ []string{"D", "The Gopher", "800"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Name", "Sign", "Rating"})
+table.SetCaption(true, "Movie ratings.")
+
+for _, v := range data {
+ table.Append(v)
+}
+table.Render() // Send output
+```
+
+Note: Caption text will wrap with total width of rendered table.
+
+##### Output 6
+```
++------+-----------------------+--------+
+| NAME | SIGN | RATING |
++------+-----------------------+--------+
+| A | The Good | 500 |
+| B | The Very very Bad Man | 288 |
+| C | The Ugly | 120 |
+| D | The Gopher | 800 |
++------+-----------------------+--------+
+Movie ratings.
+```
+
+#### TODO
+- ~~Import Directly from CSV~~ - `done`
+- ~~Support for `SetFooter`~~ - `done`
+- ~~Support for `SetBorder`~~ - `done`
+- ~~Support table with uneven rows~~ - `done`
+- ~~Support custom alignment~~
+- General Improvement & Optimisation
+- `NewHTML` Parse table from HTML
diff --git a/src/vendor/github.com/olekukonko/tablewriter/csv.go b/src/vendor/github.com/olekukonko/tablewriter/csv.go
new file mode 100644
index 000000000..98878303b
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/csv.go
@@ -0,0 +1,52 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+package tablewriter
+
+import (
+ "encoding/csv"
+ "io"
+ "os"
+)
+
+// Start A new table by importing from a CSV file
+// Takes io.Writer and csv File name
+func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) {
+ file, err := os.Open(fileName)
+ if err != nil {
+ return &Table{}, err
+ }
+ defer file.Close()
+ csvReader := csv.NewReader(file)
+ t, err := NewCSVReader(writer, csvReader, hasHeader)
+ return t, err
+}
+
+// Start a New Table Writer with csv.Reader
+// This enables customisation such as reader.Comma = ';'
+// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94
+func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) {
+ t := NewWriter(writer)
+ if hasHeader {
+ // Read the first row
+ headers, err := csvReader.Read()
+ if err != nil {
+ return &Table{}, err
+ }
+ t.SetHeader(headers)
+ }
+ for {
+ record, err := csvReader.Read()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return &Table{}, err
+ }
+ t.Append(record)
+ }
+ return t, nil
+}
diff --git a/src/vendor/github.com/olekukonko/tablewriter/table.go b/src/vendor/github.com/olekukonko/tablewriter/table.go
new file mode 100644
index 000000000..dec0385f5
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/table.go
@@ -0,0 +1,839 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+// Create & Generate text based table
+package tablewriter
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+)
+
+const (
+ MAX_ROW_WIDTH = 30
+)
+
+const (
+ CENTER = "+"
+ ROW = "-"
+ COLUMN = "|"
+ SPACE = " "
+ NEWLINE = "\n"
+)
+
+const (
+ ALIGN_DEFAULT = iota
+ ALIGN_CENTER
+ ALIGN_RIGHT
+ ALIGN_LEFT
+)
+
+var (
+ decimal = regexp.MustCompile(`^-?(?:\d{1,3}(?:,\d{3})*|\d+)(?:\.\d+)?$`)
+ percent = regexp.MustCompile(`^-?\d+\.?\d*$%$`)
+)
+
+type Border struct {
+ Left bool
+ Right bool
+ Top bool
+ Bottom bool
+}
+
+type Table struct {
+ out io.Writer
+ rows [][]string
+ lines [][][]string
+ cs map[int]int
+ rs map[int]int
+ headers [][]string
+ footers [][]string
+ caption bool
+ captionText string
+ autoFmt bool
+ autoWrap bool
+ reflowText bool
+ mW int
+ pCenter string
+ pRow string
+ pColumn string
+ tColumn int
+ tRow int
+ hAlign int
+ fAlign int
+ align int
+ newLine string
+ rowLine bool
+ autoMergeCells bool
+ hdrLine bool
+ borders Border
+ colSize int
+ headerParams []string
+ columnsParams []string
+ footerParams []string
+ columnsAlign []int
+}
+
+// Start New Table
+// Take io.Writer Directly
+func NewWriter(writer io.Writer) *Table {
+ t := &Table{
+ out: writer,
+ rows: [][]string{},
+ lines: [][][]string{},
+ cs: make(map[int]int),
+ rs: make(map[int]int),
+ headers: [][]string{},
+ footers: [][]string{},
+ caption: false,
+ captionText: "Table caption.",
+ autoFmt: true,
+ autoWrap: true,
+ reflowText: true,
+ mW: MAX_ROW_WIDTH,
+ pCenter: CENTER,
+ pRow: ROW,
+ pColumn: COLUMN,
+ tColumn: -1,
+ tRow: -1,
+ hAlign: ALIGN_DEFAULT,
+ fAlign: ALIGN_DEFAULT,
+ align: ALIGN_DEFAULT,
+ newLine: NEWLINE,
+ rowLine: false,
+ hdrLine: true,
+ borders: Border{Left: true, Right: true, Bottom: true, Top: true},
+ colSize: -1,
+ headerParams: []string{},
+ columnsParams: []string{},
+ footerParams: []string{},
+ columnsAlign: []int{}}
+ return t
+}
+
+// Render table output
+func (t *Table) Render() {
+ if t.borders.Top {
+ t.printLine(true)
+ }
+ t.printHeading()
+ if t.autoMergeCells {
+ t.printRowsMergeCells()
+ } else {
+ t.printRows()
+ }
+ if !t.rowLine && t.borders.Bottom {
+ t.printLine(true)
+ }
+ t.printFooter()
+
+ if t.caption {
+ t.printCaption()
+ }
+}
+
+const (
+ headerRowIdx = -1
+ footerRowIdx = -2
+)
+
+// Set table header
+func (t *Table) SetHeader(keys []string) {
+ t.colSize = len(keys)
+ for i, v := range keys {
+ lines := t.parseDimension(v, i, headerRowIdx)
+ t.headers = append(t.headers, lines)
+ }
+}
+
+// Set table Footer
+func (t *Table) SetFooter(keys []string) {
+ //t.colSize = len(keys)
+ for i, v := range keys {
+ lines := t.parseDimension(v, i, footerRowIdx)
+ t.footers = append(t.footers, lines)
+ }
+}
+
+// Set table Caption
+func (t *Table) SetCaption(caption bool, captionText ...string) {
+ t.caption = caption
+ if len(captionText) == 1 {
+ t.captionText = captionText[0]
+ }
+}
+
+// Turn header autoformatting on/off. Default is on (true).
+func (t *Table) SetAutoFormatHeaders(auto bool) {
+ t.autoFmt = auto
+}
+
+// Turn automatic multiline text adjustment on/off. Default is on (true).
+func (t *Table) SetAutoWrapText(auto bool) {
+ t.autoWrap = auto
+}
+
+// Turn automatic reflowing of multiline text when rewrapping. Default is on (true).
+func (t *Table) SetReflowDuringAutoWrap(auto bool) {
+ t.reflowText = auto
+}
+
+// Set the Default column width
+func (t *Table) SetColWidth(width int) {
+ t.mW = width
+}
+
+// Set the minimal width for a column
+func (t *Table) SetColMinWidth(column int, width int) {
+ t.cs[column] = width
+}
+
+// Set the Column Separator
+func (t *Table) SetColumnSeparator(sep string) {
+ t.pColumn = sep
+}
+
+// Set the Row Separator
+func (t *Table) SetRowSeparator(sep string) {
+ t.pRow = sep
+}
+
+// Set the center Separator
+func (t *Table) SetCenterSeparator(sep string) {
+ t.pCenter = sep
+}
+
+// Set Header Alignment
+func (t *Table) SetHeaderAlignment(hAlign int) {
+ t.hAlign = hAlign
+}
+
+// Set Footer Alignment
+func (t *Table) SetFooterAlignment(fAlign int) {
+ t.fAlign = fAlign
+}
+
+// Set Table Alignment
+func (t *Table) SetAlignment(align int) {
+ t.align = align
+}
+
+func (t *Table) SetColumnAlignment(keys []int) {
+ for _, v := range keys {
+ switch v {
+ case ALIGN_CENTER:
+ break
+ case ALIGN_LEFT:
+ break
+ case ALIGN_RIGHT:
+ break
+ default:
+ v = ALIGN_DEFAULT
+ }
+ t.columnsAlign = append(t.columnsAlign, v)
+ }
+}
+
+// Set New Line
+func (t *Table) SetNewLine(nl string) {
+ t.newLine = nl
+}
+
+// Set Header Line
+// This would enable / disable a line after the header
+func (t *Table) SetHeaderLine(line bool) {
+ t.hdrLine = line
+}
+
+// Set Row Line
+// This would enable / disable a line on each row of the table
+func (t *Table) SetRowLine(line bool) {
+ t.rowLine = line
+}
+
+// Set Auto Merge Cells
+// This would enable / disable the merge of cells with identical values
+func (t *Table) SetAutoMergeCells(auto bool) {
+ t.autoMergeCells = auto
+}
+
+// Set Table Border
+// This would enable / disable line around the table
+func (t *Table) SetBorder(border bool) {
+ t.SetBorders(Border{border, border, border, border})
+}
+
+func (t *Table) SetBorders(border Border) {
+ t.borders = border
+}
+
+// Append row to table
+func (t *Table) Append(row []string) {
+ rowSize := len(t.headers)
+ if rowSize > t.colSize {
+ t.colSize = rowSize
+ }
+
+ n := len(t.lines)
+ line := [][]string{}
+ for i, v := range row {
+
+ // Detect string width
+ // Detect String height
+ // Break strings into words
+ out := t.parseDimension(v, i, n)
+
+ // Append broken words
+ line = append(line, out)
+ }
+ t.lines = append(t.lines, line)
+}
+
+// Allow Support for Bulk Append
+// Eliminates repeated for loops
+func (t *Table) AppendBulk(rows [][]string) {
+ for _, row := range rows {
+ t.Append(row)
+ }
+}
+
+// NumLines to get the number of lines
+func (t *Table) NumLines() int {
+ return len(t.lines)
+}
+
+// Clear rows
+func (t *Table) ClearRows() {
+ t.lines = [][][]string{}
+}
+
+// Clear footer
+func (t *Table) ClearFooter() {
+ t.footers = [][]string{}
+}
+
+// Print line based on row width
+func (t *Table) printLine(nl bool) {
+ fmt.Fprint(t.out, t.pCenter)
+ for i := 0; i < len(t.cs); i++ {
+ v := t.cs[i]
+ fmt.Fprintf(t.out, "%s%s%s%s",
+ t.pRow,
+ strings.Repeat(string(t.pRow), v),
+ t.pRow,
+ t.pCenter)
+ }
+ if nl {
+ fmt.Fprint(t.out, t.newLine)
+ }
+}
+
+// Print line based on row width with our without cell separator
+func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
+ fmt.Fprint(t.out, t.pCenter)
+ for i := 0; i < len(t.cs); i++ {
+ v := t.cs[i]
+ if i > len(displayCellSeparator) || displayCellSeparator[i] {
+ // Display the cell separator
+ fmt.Fprintf(t.out, "%s%s%s%s",
+ t.pRow,
+ strings.Repeat(string(t.pRow), v),
+ t.pRow,
+ t.pCenter)
+ } else {
+ // Don't display the cell separator for this cell
+ fmt.Fprintf(t.out, "%s%s",
+ strings.Repeat(" ", v+2),
+ t.pCenter)
+ }
+ }
+ if nl {
+ fmt.Fprint(t.out, t.newLine)
+ }
+}
+
+// Return the PadRight function if align is left, PadLeft if align is right,
+// and Pad by default
+func pad(align int) func(string, string, int) string {
+ padFunc := Pad
+ switch align {
+ case ALIGN_LEFT:
+ padFunc = PadRight
+ case ALIGN_RIGHT:
+ padFunc = PadLeft
+ }
+ return padFunc
+}
+
+// Print heading information
+func (t *Table) printHeading() {
+ // Check if headers is available
+ if len(t.headers) < 1 {
+ return
+ }
+
+ // Identify last column
+ end := len(t.cs) - 1
+
+ // Get pad function
+ padFunc := pad(t.hAlign)
+
+ // Checking for ANSI escape sequences for header
+ is_esc_seq := false
+ if len(t.headerParams) > 0 {
+ is_esc_seq = true
+ }
+
+ // Maximum height.
+ max := t.rs[headerRowIdx]
+
+ // Print Heading
+ for x := 0; x < max; x++ {
+ // Check if border is set
+ // Replace with space if not set
+ fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
+
+ for y := 0; y <= end; y++ {
+ v := t.cs[y]
+ h := ""
+ if y < len(t.headers) && x < len(t.headers[y]) {
+ h = t.headers[y][x]
+ }
+ if t.autoFmt {
+ h = Title(h)
+ }
+ pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn)
+
+ if is_esc_seq {
+ fmt.Fprintf(t.out, " %s %s",
+ format(padFunc(h, SPACE, v),
+ t.headerParams[y]), pad)
+ } else {
+ fmt.Fprintf(t.out, " %s %s",
+ padFunc(h, SPACE, v),
+ pad)
+ }
+ }
+ // Next line
+ fmt.Fprint(t.out, t.newLine)
+ }
+ if t.hdrLine {
+ t.printLine(true)
+ }
+}
+
+// Print heading information
+func (t *Table) printFooter() {
+ // Check if headers is available
+ if len(t.footers) < 1 {
+ return
+ }
+
+ // Only print line if border is not set
+ if !t.borders.Bottom {
+ t.printLine(true)
+ }
+
+ // Identify last column
+ end := len(t.cs) - 1
+
+ // Get pad function
+ padFunc := pad(t.fAlign)
+
+ // Checking for ANSI escape sequences for header
+ is_esc_seq := false
+ if len(t.footerParams) > 0 {
+ is_esc_seq = true
+ }
+
+ // Maximum height.
+ max := t.rs[footerRowIdx]
+
+ // Print Footer
+ erasePad := make([]bool, len(t.footers))
+ for x := 0; x < max; x++ {
+ // Check if border is set
+ // Replace with space if not set
+ fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
+
+ for y := 0; y <= end; y++ {
+ v := t.cs[y]
+ f := ""
+ if y < len(t.footers) && x < len(t.footers[y]) {
+ f = t.footers[y][x]
+ }
+ if t.autoFmt {
+ f = Title(f)
+ }
+ pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn)
+
+ if erasePad[y] || (x == 0 && len(f) == 0) {
+ pad = SPACE
+ erasePad[y] = true
+ }
+
+ if is_esc_seq {
+ fmt.Fprintf(t.out, " %s %s",
+ format(padFunc(f, SPACE, v),
+ t.footerParams[y]), pad)
+ } else {
+ fmt.Fprintf(t.out, " %s %s",
+ padFunc(f, SPACE, v),
+ pad)
+ }
+
+ //fmt.Fprintf(t.out, " %s %s",
+ // padFunc(f, SPACE, v),
+ // pad)
+ }
+ // Next line
+ fmt.Fprint(t.out, t.newLine)
+ //t.printLine(true)
+ }
+
+ hasPrinted := false
+
+ for i := 0; i <= end; i++ {
+ v := t.cs[i]
+ pad := t.pRow
+ center := t.pCenter
+ length := len(t.footers[i][0])
+
+ if length > 0 {
+ hasPrinted = true
+ }
+
+ // Set center to be space if length is 0
+ if length == 0 && !t.borders.Right {
+ center = SPACE
+ }
+
+ // Print first junction
+ if i == 0 {
+ fmt.Fprint(t.out, center)
+ }
+
+ // Pad With space of length is 0
+ if length == 0 {
+ pad = SPACE
+ }
+ // Ignore left space of it has printed before
+ if hasPrinted || t.borders.Left {
+ pad = t.pRow
+ center = t.pCenter
+ }
+
+ // Change Center start position
+ if center == SPACE {
+ if i < end && len(t.footers[i+1][0]) != 0 {
+ center = t.pCenter
+ }
+ }
+
+ // Print the footer
+ fmt.Fprintf(t.out, "%s%s%s%s",
+ pad,
+ strings.Repeat(string(pad), v),
+ pad,
+ center)
+
+ }
+
+ fmt.Fprint(t.out, t.newLine)
+}
+
+// Print caption text
+func (t Table) printCaption() {
+ width := t.getTableWidth()
+ paragraph, _ := WrapString(t.captionText, width)
+ for linecount := 0; linecount < len(paragraph); linecount++ {
+ fmt.Fprintln(t.out, paragraph[linecount])
+ }
+}
+
+// Calculate the total number of characters in a row
+func (t Table) getTableWidth() int {
+ var chars int
+ for _, v := range t.cs {
+ chars += v
+ }
+
+ // Add chars, spaces, seperators to calculate the total width of the table.
+ // ncols := t.colSize
+ // spaces := ncols * 2
+ // seps := ncols + 1
+
+ return (chars + (3 * t.colSize) + 2)
+}
+
+func (t Table) printRows() {
+ for i, lines := range t.lines {
+ t.printRow(lines, i)
+ }
+}
+
+func (t *Table) fillAlignment(num int) {
+ if len(t.columnsAlign) < num {
+ t.columnsAlign = make([]int, num)
+ for i := range t.columnsAlign {
+ t.columnsAlign[i] = t.align
+ }
+ }
+}
+
+// Print Row Information
+// Adjust column alignment based on type
+
+func (t *Table) printRow(columns [][]string, rowIdx int) {
+ // Get Maximum Height
+ max := t.rs[rowIdx]
+ total := len(columns)
+
+ // TODO Fix uneven col size
+ // if total < t.colSize {
+ // for n := t.colSize - total; n < t.colSize ; n++ {
+ // columns = append(columns, []string{SPACE})
+ // t.cs[n] = t.mW
+ // }
+ //}
+
+ // Pad Each Height
+ pads := []int{}
+
+ // Checking for ANSI escape sequences for columns
+ is_esc_seq := false
+ if len(t.columnsParams) > 0 {
+ is_esc_seq = true
+ }
+ t.fillAlignment(total)
+
+ for i, line := range columns {
+ length := len(line)
+ pad := max - length
+ pads = append(pads, pad)
+ for n := 0; n < pad; n++ {
+ columns[i] = append(columns[i], " ")
+ }
+ }
+ //fmt.Println(max, "\n")
+ for x := 0; x < max; x++ {
+ for y := 0; y < total; y++ {
+
+ // Check if border is set
+ fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn))
+
+ fmt.Fprintf(t.out, SPACE)
+ str := columns[y][x]
+
+ // Embedding escape sequence with column value
+ if is_esc_seq {
+ str = format(str, t.columnsParams[y])
+ }
+
+ // This would print alignment
+ // Default alignment would use multiple configuration
+ switch t.columnsAlign[y] {
+ case ALIGN_CENTER: //
+ fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
+ case ALIGN_RIGHT:
+ fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y]))
+ case ALIGN_LEFT:
+ fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
+ default:
+ if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) {
+ fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y]))
+ } else {
+ fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
+
+ // TODO Custom alignment per column
+ //if max == 1 || pads[y] > 0 {
+ // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
+ //} else {
+ // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
+ //}
+
+ }
+ }
+ fmt.Fprintf(t.out, SPACE)
+ }
+ // Check if border is set
+ // Replace with space if not set
+ fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
+ fmt.Fprint(t.out, t.newLine)
+ }
+
+ if t.rowLine {
+ t.printLine(true)
+ }
+}
+
+// Print the rows of the table and merge the cells that are identical
+func (t *Table) printRowsMergeCells() {
+ var previousLine []string
+ var displayCellBorder []bool
+ var tmpWriter bytes.Buffer
+ for i, lines := range t.lines {
+ // We store the display of the current line in a tmp writer, as we need to know which border needs to be print above
+ previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine)
+ if i > 0 { //We don't need to print borders above first line
+ if t.rowLine {
+ t.printLineOptionalCellSeparators(true, displayCellBorder)
+ }
+ }
+ tmpWriter.WriteTo(t.out)
+ }
+ //Print the end of the table
+ if t.rowLine {
+ t.printLine(true)
+ }
+}
+
+// Print Row Information to a writer and merge identical cells.
+// Adjust column alignment based on type
+
+func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) {
+ // Get Maximum Height
+ max := t.rs[rowIdx]
+ total := len(columns)
+
+ // Pad Each Height
+ pads := []int{}
+
+ for i, line := range columns {
+ length := len(line)
+ pad := max - length
+ pads = append(pads, pad)
+ for n := 0; n < pad; n++ {
+ columns[i] = append(columns[i], " ")
+ }
+ }
+
+ var displayCellBorder []bool
+ t.fillAlignment(total)
+ for x := 0; x < max; x++ {
+ for y := 0; y < total; y++ {
+
+ // Check if border is set
+ fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn))
+
+ fmt.Fprintf(writer, SPACE)
+
+ str := columns[y][x]
+
+ if t.autoMergeCells {
+ //Store the full line to merge mutli-lines cells
+ fullLine := strings.Join(columns[y], " ")
+ if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" {
+ // If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty.
+ displayCellBorder = append(displayCellBorder, false)
+ str = ""
+ } else {
+ // First line or different content, keep the content and print the cell border
+ displayCellBorder = append(displayCellBorder, true)
+ }
+ }
+
+ // This would print alignment
+ // Default alignment would use multiple configuration
+ switch t.columnsAlign[y] {
+ case ALIGN_CENTER: //
+ fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y]))
+ case ALIGN_RIGHT:
+ fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y]))
+ case ALIGN_LEFT:
+ fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y]))
+ default:
+ if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) {
+ fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y]))
+ } else {
+ fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y]))
+ }
+ }
+ fmt.Fprintf(writer, SPACE)
+ }
+ // Check if border is set
+ // Replace with space if not set
+ fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE))
+ fmt.Fprint(writer, t.newLine)
+ }
+
+ //The new previous line is the current one
+ previousLine = make([]string, total)
+ for y := 0; y < total; y++ {
+ previousLine[y] = strings.Join(columns[y], " ") //Store the full line for multi-lines cells
+ }
+ //Returns the newly added line and wether or not a border should be displayed above.
+ return previousLine, displayCellBorder
+}
+
+func (t *Table) parseDimension(str string, colKey, rowKey int) []string {
+ var (
+ raw []string
+ maxWidth int
+ )
+
+ raw = getLines(str)
+ maxWidth = 0
+ for _, line := range raw {
+ if w := DisplayWidth(line); w > maxWidth {
+ maxWidth = w
+ }
+ }
+
+ // If wrapping, ensure that all paragraphs in the cell fit in the
+ // specified width.
+ if t.autoWrap {
+ // If there's a maximum allowed width for wrapping, use that.
+ if maxWidth > t.mW {
+ maxWidth = t.mW
+ }
+
+ // In the process of doing so, we need to recompute maxWidth. This
+ // is because perhaps a word in the cell is longer than the
+ // allowed maximum width in t.mW.
+ newMaxWidth := maxWidth
+ newRaw := make([]string, 0, len(raw))
+
+ if t.reflowText {
+ // Make a single paragraph of everything.
+ raw = []string{strings.Join(raw, " ")}
+ }
+ for i, para := range raw {
+ paraLines, _ := WrapString(para, maxWidth)
+ for _, line := range paraLines {
+ if w := DisplayWidth(line); w > newMaxWidth {
+ newMaxWidth = w
+ }
+ }
+ if i > 0 {
+ newRaw = append(newRaw, " ")
+ }
+ newRaw = append(newRaw, paraLines...)
+ }
+ raw = newRaw
+ maxWidth = newMaxWidth
+ }
+
+ // Store the new known maximum width.
+ v, ok := t.cs[colKey]
+ if !ok || v < maxWidth || v == 0 {
+ t.cs[colKey] = maxWidth
+ }
+
+ // Remember the number of lines for the row printer.
+ h := len(raw)
+ v, ok = t.rs[rowKey]
+
+ if !ok || v < h || v == 0 {
+ t.rs[rowKey] = h
+ }
+ //fmt.Printf("Raw %+v %d\n", raw, len(raw))
+ return raw
+}
diff --git a/src/vendor/github.com/olekukonko/tablewriter/table_with_color.go b/src/vendor/github.com/olekukonko/tablewriter/table_with_color.go
new file mode 100644
index 000000000..5a4a53ec2
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/table_with_color.go
@@ -0,0 +1,134 @@
+package tablewriter
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const ESC = "\033"
+const SEP = ";"
+
+const (
+ BgBlackColor int = iota + 40
+ BgRedColor
+ BgGreenColor
+ BgYellowColor
+ BgBlueColor
+ BgMagentaColor
+ BgCyanColor
+ BgWhiteColor
+)
+
+const (
+ FgBlackColor int = iota + 30
+ FgRedColor
+ FgGreenColor
+ FgYellowColor
+ FgBlueColor
+ FgMagentaColor
+ FgCyanColor
+ FgWhiteColor
+)
+
+const (
+ BgHiBlackColor int = iota + 100
+ BgHiRedColor
+ BgHiGreenColor
+ BgHiYellowColor
+ BgHiBlueColor
+ BgHiMagentaColor
+ BgHiCyanColor
+ BgHiWhiteColor
+)
+
+const (
+ FgHiBlackColor int = iota + 90
+ FgHiRedColor
+ FgHiGreenColor
+ FgHiYellowColor
+ FgHiBlueColor
+ FgHiMagentaColor
+ FgHiCyanColor
+ FgHiWhiteColor
+)
+
+const (
+ Normal = 0
+ Bold = 1
+ UnderlineSingle = 4
+ Italic
+)
+
+type Colors []int
+
+func startFormat(seq string) string {
+ return fmt.Sprintf("%s[%sm", ESC, seq)
+}
+
+func stopFormat() string {
+ return fmt.Sprintf("%s[%dm", ESC, Normal)
+}
+
+// Making the SGR (Select Graphic Rendition) sequence.
+func makeSequence(codes []int) string {
+ codesInString := []string{}
+ for _, code := range codes {
+ codesInString = append(codesInString, strconv.Itoa(code))
+ }
+ return strings.Join(codesInString, SEP)
+}
+
+// Adding ANSI escape sequences before and after string
+func format(s string, codes interface{}) string {
+ var seq string
+
+ switch v := codes.(type) {
+
+ case string:
+ seq = v
+ case []int:
+ seq = makeSequence(v)
+ default:
+ return s
+ }
+
+ if len(seq) == 0 {
+ return s
+ }
+ return startFormat(seq) + s + stopFormat()
+}
+
+// Adding header colors (ANSI codes)
+func (t *Table) SetHeaderColor(colors ...Colors) {
+ if t.colSize != len(colors) {
+ panic("Number of header colors must be equal to number of headers.")
+ }
+ for i := 0; i < len(colors); i++ {
+ t.headerParams = append(t.headerParams, makeSequence(colors[i]))
+ }
+}
+
+// Adding column colors (ANSI codes)
+func (t *Table) SetColumnColor(colors ...Colors) {
+ if t.colSize != len(colors) {
+ panic("Number of column colors must be equal to number of headers.")
+ }
+ for i := 0; i < len(colors); i++ {
+ t.columnsParams = append(t.columnsParams, makeSequence(colors[i]))
+ }
+}
+
+// Adding column colors (ANSI codes)
+func (t *Table) SetFooterColor(colors ...Colors) {
+ if len(t.footers) != len(colors) {
+ panic("Number of footer colors must be equal to number of footer.")
+ }
+ for i := 0; i < len(colors); i++ {
+ t.footerParams = append(t.footerParams, makeSequence(colors[i]))
+ }
+}
+
+func Color(colors ...int) []int {
+ return colors
+}
diff --git a/src/vendor/github.com/olekukonko/tablewriter/util.go b/src/vendor/github.com/olekukonko/tablewriter/util.go
new file mode 100644
index 000000000..9e8f0cbb6
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/util.go
@@ -0,0 +1,93 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+package tablewriter
+
+import (
+ "math"
+ "regexp"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+)
+
+var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]")
+
+func DisplayWidth(str string) int {
+ return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, ""))
+}
+
+// Simple Condition for string
+// Returns value based on condition
+func ConditionString(cond bool, valid, inValid string) string {
+ if cond {
+ return valid
+ }
+ return inValid
+}
+
+func isNumOrSpace(r rune) bool {
+ return ('0' <= r && r <= '9') || r == ' '
+}
+
+// Format Table Header
+// Replace _ , . and spaces
+func Title(name string) string {
+ origLen := len(name)
+ rs := []rune(name)
+ for i, r := range rs {
+ switch r {
+ case '_':
+ rs[i] = ' '
+ case '.':
+ // ignore floating number 0.0
+ if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) {
+ rs[i] = ' '
+ }
+ }
+ }
+ name = string(rs)
+ name = strings.TrimSpace(name)
+ if len(name) == 0 && origLen > 0 {
+ // Keep at least one character. This is important to preserve
+ // empty lines in multi-line headers/footers.
+ name = " "
+ }
+ return strings.ToUpper(name)
+}
+
+// Pad String
+// Attempts to play string in the center
+func Pad(s, pad string, width int) string {
+ gap := width - DisplayWidth(s)
+ if gap > 0 {
+ gapLeft := int(math.Ceil(float64(gap / 2)))
+ gapRight := gap - gapLeft
+ return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)
+ }
+ return s
+}
+
+// Pad String Right position
+// This would pace string at the left side fo the screen
+func PadRight(s, pad string, width int) string {
+ gap := width - DisplayWidth(s)
+ if gap > 0 {
+ return s + strings.Repeat(string(pad), gap)
+ }
+ return s
+}
+
+// Pad String Left position
+// This would pace string at the right side fo the screen
+func PadLeft(s, pad string, width int) string {
+ gap := width - DisplayWidth(s)
+ if gap > 0 {
+ return strings.Repeat(string(pad), gap) + s
+ }
+ return s
+}
diff --git a/src/vendor/github.com/olekukonko/tablewriter/wrap.go b/src/vendor/github.com/olekukonko/tablewriter/wrap.go
new file mode 100644
index 000000000..a092ee1f7
--- /dev/null
+++ b/src/vendor/github.com/olekukonko/tablewriter/wrap.go
@@ -0,0 +1,99 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+package tablewriter
+
+import (
+ "math"
+ "strings"
+
+ "github.com/mattn/go-runewidth"
+)
+
+var (
+ nl = "\n"
+ sp = " "
+)
+
+const defaultPenalty = 1e5
+
+// Wrap wraps s into a paragraph of lines of length lim, with minimal
+// raggedness.
+func WrapString(s string, lim int) ([]string, int) {
+ words := strings.Split(strings.Replace(s, nl, sp, -1), sp)
+ var lines []string
+ max := 0
+ for _, v := range words {
+ max = runewidth.StringWidth(v)
+ if max > lim {
+ lim = max
+ }
+ }
+ for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
+ lines = append(lines, strings.Join(line, sp))
+ }
+ return lines, lim
+}
+
+// WrapWords is the low-level line-breaking algorithm, useful if you need more
+// control over the details of the text wrapping process. For most uses,
+// WrapString will be sufficient and more convenient.
+//
+// WrapWords splits a list of words into lines with minimal "raggedness",
+// treating each rune as one unit, accounting for spc units between adjacent
+// words on each line, and attempting to limit lines to lim units. Raggedness
+// is the total error over all lines, where error is the square of the
+// difference of the length of the line and lim. Too-long lines (which only
+// happen when a single word is longer than lim units) have pen penalty units
+// added to the error.
+func WrapWords(words []string, spc, lim, pen int) [][]string {
+ n := len(words)
+
+ length := make([][]int, n)
+ for i := 0; i < n; i++ {
+ length[i] = make([]int, n)
+ length[i][i] = runewidth.StringWidth(words[i])
+ for j := i + 1; j < n; j++ {
+ length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j])
+ }
+ }
+ nbrk := make([]int, n)
+ cost := make([]int, n)
+ for i := range cost {
+ cost[i] = math.MaxInt32
+ }
+ for i := n - 1; i >= 0; i-- {
+ if length[i][n-1] <= lim {
+ cost[i] = 0
+ nbrk[i] = n
+ } else {
+ for j := i + 1; j < n; j++ {
+ d := lim - length[i][j-1]
+ c := d*d + cost[j]
+ if length[i][j-1] > lim {
+ c += pen // too-long lines get a worse penalty
+ }
+ if c < cost[i] {
+ cost[i] = c
+ nbrk[i] = j
+ }
+ }
+ }
+ }
+ var lines [][]string
+ i := 0
+ for i < n {
+ lines = append(lines, words[i:nbrk[i]])
+ i = nbrk[i]
+ }
+ return lines
+}
+
+// getLines decomposes a multiline string into a slice of strings.
+func getLines(s string) []string {
+ return strings.Split(s, nl)
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/.gitignore b/src/vendor/github.com/opentracing/opentracing-go/.gitignore
new file mode 100644
index 000000000..c57100a59
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/.gitignore
@@ -0,0 +1 @@
+coverage.txt
diff --git a/src/vendor/github.com/opentracing/opentracing-go/.travis.yml b/src/vendor/github.com/opentracing/opentracing-go/.travis.yml
new file mode 100644
index 000000000..8d5b75e41
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+matrix:
+ include:
+ - go: "1.11.x"
+ - go: "1.12.x"
+ - go: "tip"
+ env:
+ - LINT=true
+ - COVERAGE=true
+
+install:
+ - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
+ - go get -u github.com/stretchr/testify/...
+
+script:
+ - make test
+ - go build ./...
+ - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
+ - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/src/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/src/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
new file mode 100644
index 000000000..7c14febe1
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -0,0 +1,46 @@
+Changes by Version
+==================
+
+1.1.0 (2019-03-23)
+-------------------
+
+Notable changes:
+- The library is now released under Apache 2.0 license
+- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
+- 'golang.org/x/net/context' is replaced with 'context' from the standard library
+
+List of all changes:
+
+- Export StartSpanFromContextWithTracer (#214)
+- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201)
+- Use Set() instead of Add() in HTTPHeadersCarrier (#191)
+- Update license to Apache 2.0 (#181)
+- Replace 'golang.org/x/net/context' with 'context' (#176)
+- Port of Python opentracing/harness/api_check.py to Go (#146)
+- Fix race condition in MockSpan.Context() (#170)
+- Add PeerHostIPv4.SetString() (#155)
+- Add a Noop log field type to log to allow for optional fields (#150)
+
+
+1.0.2 (2017-04-26)
+-------------------
+
+- Add more semantic tags (#139)
+
+
+1.0.1 (2017-02-06)
+-------------------
+
+- Correct spelling in comments
+- Address race in nextMockID() (#123)
+- log: avoid panic marshaling nil error (#131)
+- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128)
+- Drop Go 1.5 that fails in Travis (#129)
+- Add convenience methods Key() and Value() to log.Field
+- Add convenience methods to log.Field (2 years, 6 months ago)
+
+1.0.0 (2016-09-26)
+-------------------
+
+- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
+
diff --git a/src/vendor/github.com/opentracing/opentracing-go/LICENSE b/src/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 000000000..f0027349e
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The OpenTracing Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/src/vendor/github.com/opentracing/opentracing-go/Makefile b/src/vendor/github.com/opentracing/opentracing-go/Makefile
new file mode 100644
index 000000000..62abb63f5
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/Makefile
@@ -0,0 +1,20 @@
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test lint
+
+.PHONY: test
+test:
+ go test -v -cover -race ./...
+
+.PHONY: cover
+cover:
+ go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
+
+.PHONY: lint
+lint:
+ go fmt ./...
+ golint ./...
+ @# Run again with magic to exit non-zero if golint outputs anything.
+ @! (golint ./... | read dummy)
+ go vet ./...
diff --git a/src/vendor/github.com/opentracing/opentracing-go/README.md b/src/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 000000000..6ef1d7c9d
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+ import "github.com/opentracing/opentracing-go"
+ import ".../some_tracing_impl"
+
+ func main() {
+ opentracing.SetGlobalTracer(
+ // tracing impl specific:
+ some_tracing_impl.New(...),
+ )
+ ...
+ }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+ func xyz(ctx context.Context, ...) {
+ ...
+ span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+ defer span.Finish()
+ span.LogFields(
+ log.String("event", "soft error"),
+ log.String("type", "cache timeout"),
+ log.Int("waited.millis", 1500))
+ ...
+ }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+ func xyz() {
+ ...
+ sp := opentracing.StartSpan("operation_name")
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+ func xyz(parentSpan opentracing.Span, ...) {
+ ...
+ sp := opentracing.StartSpan(
+ "operation_name",
+ opentracing.ChildOf(parentSpan.Context()))
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Serializing to the wire
+
+```go
+ func makeSomeRequest(ctx context.Context) ... {
+ if span := opentracing.SpanFromContext(ctx); span != nil {
+ httpClient := &http.Client{}
+ httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+ // Transmit the span's TraceContext as HTTP headers on our
+ // outbound request.
+ opentracing.GlobalTracer().Inject(
+ span.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+ resp, err := httpClient.Do(httpReq)
+ ...
+ }
+ ...
+ }
+```
+
+#### Deserializing from the wire
+
+```go
+ http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ var serverSpan opentracing.Span
+ appSpecificOperationName := ...
+ wireContext, err := opentracing.GlobalTracer().Extract(
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+ if err != nil {
+ // Optionally record something about err here
+ }
+
+ // Create the span referring to the RPC client if available.
+ // If wireContext == nil, a root span will be created.
+ serverSpan = opentracing.StartSpan(
+ appSpecificOperationName,
+ ext.RPCServerOption(wireContext))
+
+ defer serverSpan.Finish()
+
+ ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+ ...
+ }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field. For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+ func Customer(order *Order) log.Field {
+ if os.Getenv("ENVIRONMENT") == "dev" {
+ return log.String("customer", order.Customer.ID)
+ }
+ return log.Noop()
+ }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/src/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/src/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 000000000..4f7066a92
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+ tracer Tracer
+ isRegistered bool
+}
+
+var (
+ globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+ globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+ return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+ SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+ return globalTracer.isRegistered
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/gocontext.go b/src/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 000000000..08c00c04e
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,60 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// `span`'s SpanContext.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+ return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+ val := ctx.Value(activeSpanKey)
+ if sp, ok := val.(Span); ok {
+ return sp
+ }
+ return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+// SomeFunction(ctx context.Context, ...) {
+// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+// defer sp.Finish()
+// ...
+// }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// StartSpanFromContextWithTracer starts and returns a span with `operationName`
+// using a span found within the context as a ChildOfRef. If that doesn't exist
+// it creates a root span. It also returns a context.Context object built
+// around the returned span.
+//
+// It's behavior is identical to StartSpanFromContext except that it takes an explicit
+// tracer as opposed to using the global tracer.
+func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+ opts = append(opts, ChildOf(parentSpan.Context()))
+ }
+ span := tracer.StartSpan(operationName, opts...)
+ return span, ContextWithSpan(ctx, span)
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/log/field.go b/src/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 000000000..50feea341
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,269 @@
+package log
+
+import (
+ "fmt"
+ "math"
+)
+
+type fieldType int
+
+const (
+ stringType fieldType = iota
+ boolType
+ intType
+ int32Type
+ uint32Type
+ int64Type
+ uint64Type
+ float32Type
+ float64Type
+ errorType
+ objectType
+ lazyLoggerType
+ noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+ key string
+ fieldType fieldType
+ numericVal int64
+ stringVal string
+ interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+ return Field{
+ key: key,
+ fieldType: stringType,
+ stringVal: val,
+ }
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+ var numericVal int64
+ if val {
+ numericVal = 1
+ }
+ return Field{
+ key: key,
+ fieldType: boolType,
+ numericVal: numericVal,
+ }
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+ return Field{
+ key: key,
+ fieldType: intType,
+ numericVal: int64(val),
+ }
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+ return Field{
+ key: key,
+ fieldType: int32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+ return Field{
+ key: key,
+ fieldType: int64Type,
+ numericVal: val,
+ }
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+ return Field{
+ key: key,
+ fieldType: uint32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+ return Field{
+ key: key,
+ fieldType: uint64Type,
+ numericVal: int64(val),
+ }
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+ return Field{
+ key: key,
+ fieldType: float32Type,
+ numericVal: int64(math.Float32bits(val)),
+ }
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+ return Field{
+ key: key,
+ fieldType: float64Type,
+ numericVal: int64(math.Float64bits(val)),
+ }
+}
+
+// Error adds an error with the key "error" to a Span.LogFields() record
+func Error(err error) Field {
+ return Field{
+ key: "error",
+ fieldType: errorType,
+ interfaceVal: err,
+ }
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+func Object(key string, obj interface{}) Field {
+ return Field{
+ key: key,
+ fieldType: objectType,
+ interfaceVal: obj,
+ }
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+ return Field{
+ fieldType: lazyLoggerType,
+ interfaceVal: ll,
+ }
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+// func customerField(order *Order) log.Field {
+// if os.Getenv("ENVIRONMENT") == "dev" {
+// return log.String("customer", order.Customer.ID)
+// }
+// return log.Noop()
+// }
+//
+// span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+ return Field{
+ fieldType: noopType,
+ }
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+ EmitString(key, value string)
+ EmitBool(key string, value bool)
+ EmitInt(key string, value int)
+ EmitInt32(key string, value int32)
+ EmitInt64(key string, value int64)
+ EmitUint32(key string, value uint32)
+ EmitUint64(key string, value uint64)
+ EmitFloat32(key string, value float32)
+ EmitFloat64(key string, value float64)
+ EmitObject(key string, value interface{})
+ EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+ switch lf.fieldType {
+ case stringType:
+ visitor.EmitString(lf.key, lf.stringVal)
+ case boolType:
+ visitor.EmitBool(lf.key, lf.numericVal != 0)
+ case intType:
+ visitor.EmitInt(lf.key, int(lf.numericVal))
+ case int32Type:
+ visitor.EmitInt32(lf.key, int32(lf.numericVal))
+ case int64Type:
+ visitor.EmitInt64(lf.key, int64(lf.numericVal))
+ case uint32Type:
+ visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+ case uint64Type:
+ visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+ case float32Type:
+ visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+ case float64Type:
+ visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+ case errorType:
+ if err, ok := lf.interfaceVal.(error); ok {
+ visitor.EmitString(lf.key, err.Error())
+ } else {
+ visitor.EmitString(lf.key, "")
+ }
+ case objectType:
+ visitor.EmitObject(lf.key, lf.interfaceVal)
+ case lazyLoggerType:
+ visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+ case noopType:
+ // intentionally left blank
+ }
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+ return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+ switch lf.fieldType {
+ case stringType:
+ return lf.stringVal
+ case boolType:
+ return lf.numericVal != 0
+ case intType:
+ return int(lf.numericVal)
+ case int32Type:
+ return int32(lf.numericVal)
+ case int64Type:
+ return int64(lf.numericVal)
+ case uint32Type:
+ return uint32(lf.numericVal)
+ case uint64Type:
+ return uint64(lf.numericVal)
+ case float32Type:
+ return math.Float32frombits(uint32(lf.numericVal))
+ case float64Type:
+ return math.Float64frombits(uint64(lf.numericVal))
+ case errorType, objectType, lazyLoggerType:
+ return lf.interfaceVal
+ case noopType:
+ return nil
+ default:
+ return nil
+ }
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+ return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/log/util.go b/src/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 000000000..3832feb5c
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,54 @@
+package log
+
+import "fmt"
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+ if len(keyValues)%2 != 0 {
+ return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+ }
+ fields := make([]Field, len(keyValues)/2)
+ for i := 0; i*2 < len(keyValues); i++ {
+ key, ok := keyValues[i*2].(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "non-string key (pair #%d): %T",
+ i, keyValues[i*2])
+ }
+ switch typedVal := keyValues[i*2+1].(type) {
+ case bool:
+ fields[i] = Bool(key, typedVal)
+ case string:
+ fields[i] = String(key, typedVal)
+ case int:
+ fields[i] = Int(key, typedVal)
+ case int8:
+ fields[i] = Int32(key, int32(typedVal))
+ case int16:
+ fields[i] = Int32(key, int32(typedVal))
+ case int32:
+ fields[i] = Int32(key, typedVal)
+ case int64:
+ fields[i] = Int64(key, typedVal)
+ case uint:
+ fields[i] = Uint64(key, uint64(typedVal))
+ case uint64:
+ fields[i] = Uint64(key, typedVal)
+ case uint8:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint16:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint32:
+ fields[i] = Uint32(key, typedVal)
+ case float32:
+ fields[i] = Float32(key, typedVal)
+ case float64:
+ fields[i] = Float64(key, typedVal)
+ default:
+ // When in doubt, coerce to a string
+ fields[i] = String(key, fmt.Sprint(typedVal))
+ }
+ }
+ return fields, nil
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/noop.go b/src/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 000000000..0d32f692c
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+ defaultNoopSpanContext = noopSpanContext{}
+ defaultNoopSpan = noopSpan{}
+ defaultNoopTracer = NoopTracer{}
+)
+
+const (
+ emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan }
+func (n noopSpan) BaggageItem(key string) string { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
+func (n noopSpan) LogFields(fields ...log.Field) {}
+func (n noopSpan) LogKV(keyVals ...interface{}) {}
+func (n noopSpan) Finish() {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
+func (n noopSpan) SetOperationName(operationName string) Span { return n }
+func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string) {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData) {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+ return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+ return nil, ErrSpanContextNotFound
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/propagation.go b/src/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 000000000..b0c275eb0
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+ "errors"
+ "net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+ // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+ // Tracer.Extract() is not recognized by the Tracer implementation.
+ ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+ // ErrSpanContextNotFound occurs when the `carrier` passed to
+ // Tracer.Extract() is valid and uncorrupted but has insufficient
+ // information to extract a SpanContext.
+ ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+ // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+ // operate on a SpanContext which it is not prepared to handle (for
+ // example, since it was created by a different tracer implementation).
+ ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+ // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+ // implementations expect a different type of `carrier` than they are
+ // given.
+ ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+ // ErrSpanContextCorrupted occurs when the `carrier` passed to
+ // Tracer.Extract() is of the expected type but is corrupted.
+ ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+ // Binary represents SpanContexts as opaque binary data.
+ //
+ // For Tracer.Inject(): the carrier must be an `io.Writer`.
+ //
+ // For Tracer.Extract(): the carrier must be an `io.Reader`.
+ Binary BuiltinFormat = iota
+
+ // TextMap represents SpanContexts as key:value string pairs.
+ //
+ // Unlike HTTPHeaders, the TextMap format does not restrict the key or
+ // value character sets in any way.
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ TextMap
+
+ // HTTPHeaders represents SpanContexts as HTTP header string pairs.
+ //
+ // Unlike TextMap, the HTTPHeaders format requires that the keys and values
+ // be valid as HTTP headers as-is (i.e., character casing may be unstable
+ // and special characters are disallowed in keys, values should be
+ // URL-escaped, etc).
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ //
+ // See HTTPHeadersCarrier for an implementation of both TextMapWriter
+ // and TextMapReader that defers to an http.Header instance for storage.
+ // For example, Inject():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := span.Tracer().Inject(
+ // span.Context(), opentracing.HTTPHeaders, carrier)
+ //
+ // Or Extract():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(
+ // opentracing.HTTPHeaders, carrier)
+ //
+ HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+ // Set a key:value pair to the carrier. Multiple calls to Set() for the
+ // same key leads to undefined behavior.
+ //
+ // NOTE: The backing store for the TextMapWriter may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+ // ForeachKey returns TextMap contents via repeated calls to the `handler`
+ // function. If any call to `handler` returns a non-nil error, ForeachKey
+ // terminates and returns that error.
+ //
+ // NOTE: The backing store for the TextMapReader may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ //
+ // The "foreach" callback pattern reduces unnecessary copying in some cases
+ // and also allows implementations to hold locks while the map is read.
+ ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range c {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+ c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// err := tracer.Inject(
+// span.Context(),
+// opentracing.HTTPHeaders,
+// carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+ h := http.Header(c)
+ h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vals := range c {
+ for _, v := range vals {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/span.go b/src/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 000000000..0d3fb5341
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a tuple).
+type SpanContext interface {
+ // ForeachBaggageItem grants access to all baggage items stored in the
+ // SpanContext.
+ // The handler function will be called for each baggage key/value pair.
+ // The ordering of items is not guaranteed.
+ //
+ // The bool return value indicates if the handler wants to continue iterating
+ // through the rest of the baggage items; for example if the handler is trying to
+ // find some baggage item by pattern matching the name, it can return false
+ // as soon as the item is found to stop further iterations.
+ ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+ // Sets the end timestamp and finalizes Span state.
+ //
+ // With the exception of calls to Context() (which are always allowed),
+ // Finish() must be the last call made to any span instance, and to do
+ // otherwise leads to undefined behavior.
+ Finish()
+ // FinishWithOptions is like Finish() but with explicit control over
+ // timestamps and log data.
+ FinishWithOptions(opts FinishOptions)
+
+ // Context() yields the SpanContext for this Span. Note that the return
+ // value of Context() is still valid after a call to Span.Finish(), as is
+ // a call to Span.Context() after a call to Span.Finish().
+ Context() SpanContext
+
+ // Sets or changes the operation name.
+ //
+ // Returns a reference to this Span for chaining.
+ SetOperationName(operationName string) Span
+
+ // Adds a tag to the span.
+ //
+ // If there is a pre-existing tag set for `key`, it is overwritten.
+ //
+ // Tag values can be numeric types, strings, or bools. The behavior of
+ // other tag value types is undefined at the OpenTracing level. If a
+ // tracing system does not know how to handle a particular value type, it
+ // may ignore the tag, but shall not panic.
+ //
+ // Returns a reference to this Span for chaining.
+ SetTag(key string, value interface{}) Span
+
+ // LogFields is an efficient and type-checked way to record key:value
+ // logging data about a Span, though the programming interface is a little
+ // more verbose than LogKV(). Here's an example:
+ //
+ // span.LogFields(
+ // log.String("event", "soft error"),
+ // log.String("type", "cache timeout"),
+ // log.Int("waited.millis", 1500))
+ //
+ // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+ LogFields(fields ...log.Field)
+
+ // LogKV is a concise, readable way to record key:value logging data about
+ // a Span, though unfortunately this also makes it less efficient and less
+ // type-safe than LogFields(). Here's an example:
+ //
+ // span.LogKV(
+ // "event", "soft error",
+ // "type", "cache timeout",
+ // "waited.millis", 1500)
+ //
+ // For LogKV (as opposed to LogFields()), the parameters must appear as
+ // key-value pairs, like
+ //
+ // span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+ //
+ // The keys must all be strings. The values may be strings, numeric types,
+ // bools, Go error instances, or arbitrary structs.
+ //
+ // (Note to implementors: consider the log.InterleavedKVToFields() helper)
+ LogKV(alternatingKeyValues ...interface{})
+
+ // SetBaggageItem sets a key:value pair on this Span and its SpanContext
+ // that also propagates to descendants of this Span.
+ //
+ // SetBaggageItem() enables powerful functionality given a full-stack
+ // opentracing integration (e.g., arbitrary application data from a mobile
+ // app can make it, transparently, all the way into the depths of a storage
+ // system), and with it some powerful costs: use this feature with care.
+ //
+ // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+ // *future* causal descendants of the associated Span.
+ //
+ // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+ // value is copied into every local *and remote* child of the associated
+ // Span, and that can add up to a lot of network and cpu overhead.
+ //
+ // Returns a reference to this Span for chaining.
+ SetBaggageItem(restrictedKey, value string) Span
+
+ // Gets the value for a baggage item given its key. Returns the empty string
+ // if the value isn't found in this Span.
+ BaggageItem(restrictedKey string) string
+
+ // Provides access to the Tracer that created this Span.
+ Tracer() Tracer
+
+ // Deprecated: use LogFields or LogKV
+ LogEvent(event string)
+ // Deprecated: use LogFields or LogKV
+ LogEventWithPayload(event string, payload interface{})
+ // Deprecated: use LogFields or LogKV
+ Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+ Timestamp time.Time
+ Fields []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+ // FinishTime overrides the Span's finish time, or implicitly becomes
+ // time.Now() if FinishTime.IsZero().
+ //
+ // FinishTime must resolve to a timestamp that's >= the Span's StartTime
+ // (per StartSpanOptions).
+ FinishTime time.Time
+
+ // LogRecords allows the caller to specify the contents of many LogFields()
+ // calls with a single slice. May be nil.
+ //
+ // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+ // be set explicitly). Also, they must be >= the Span's start timestamp and
+ // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+ // behavior of FinishWithOptions() is undefined.
+ //
+ // If specified, the caller hands off ownership of LogRecords at
+ // FinishWithOptions() invocation time.
+ //
+ // If specified, the (deprecated) BulkLogData must be nil or empty.
+ LogRecords []LogRecord
+
+ // BulkLogData is DEPRECATED.
+ BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+ Timestamp time.Time
+ Event string
+ Payload interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+ var literalTimestamp time.Time
+ if ld.Timestamp.IsZero() {
+ literalTimestamp = time.Now()
+ } else {
+ literalTimestamp = ld.Timestamp
+ }
+ rval := LogRecord{
+ Timestamp: literalTimestamp,
+ }
+ if ld.Payload == nil {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ }
+ } else {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ log.Object("payload", ld.Payload),
+ }
+ }
+ return rval
+}
diff --git a/src/vendor/github.com/opentracing/opentracing-go/tracer.go b/src/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 000000000..715f0cedf
--- /dev/null
+++ b/src/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,304 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+ // Create, start, and return a new Span with the given `operationName` and
+ // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+ // from the "functional options" pattern, per
+ // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+ //
+ // A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+ // opentracing.FollowsFrom()) becomes the root of its own trace.
+ //
+ // Examples:
+ //
+ // var tracer opentracing.Tracer = ...
+ //
+ // // The root-span case:
+ // sp := tracer.StartSpan("GetFeed")
+ //
+ // // The vanilla child span case:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()))
+ //
+ // // All the bells and whistles:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()),
+ // opentracing.Tag{"user_agent", loggedReq.UserAgent},
+ // opentracing.StartTime(loggedReq.Timestamp),
+ // )
+ //
+ StartSpan(operationName string, opts ...StartSpanOption) Span
+
+ // Inject() takes the `sm` SpanContext instance and injects it for
+ // propagation within `carrier`. The actual type of `carrier` depends on
+ // the value of `format`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see https://godoc.org/context#WithValue).
+ //
+ // Example usage (sans error handling):
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := tracer.Inject(
+ // span.Context(),
+ // opentracing.HTTPHeaders,
+ // carrier)
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Implementations may return opentracing.ErrUnsupportedFormat if `format`
+ // is not supported by (or not known by) the implementation.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if the format is supported but injection
+ // fails anyway.
+ //
+ // See Tracer.Extract().
+ Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+ // Extract() returns a SpanContext instance given `format` and `carrier`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (with StartSpan):
+ //
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+ //
+ // // ... assuming the ultimate goal here is to resume the trace with a
+ // // server-side Span:
+ // var serverSpan opentracing.Span
+ // if err == nil {
+ // span = tracer.StartSpan(
+ // rpcMethodName, ext.RPCServerOption(clientContext))
+ // } else {
+ // span = tracer.StartSpan(rpcMethodName)
+ // }
+ //
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Return values:
+ // - A successful Extract returns a SpanContext instance and a nil error
+ // - If there was simply no SpanContext to extract in `carrier`, Extract()
+ // returns (nil, opentracing.ErrSpanContextNotFound)
+ // - If `format` is unsupported or unrecognized, Extract() returns (nil,
+ // opentracing.ErrUnsupportedFormat)
+ // - If there are more fundamental problems with the `carrier` object,
+ // Extract() may return opentracing.ErrInvalidCarrier,
+ // opentracing.ErrSpanContextCorrupted, or implementation-specific
+ // errors.
+ //
+ // See Tracer.Inject().
+ Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+// sso := opentracing.StartSpanOptions{}
+// for _, o := range opts {
+// o.Apply(&sso)
+// }
+// ...
+// }
+//
+type StartSpanOptions struct {
+ // Zero or more causal references to other Spans (via their SpanContext).
+ // If empty, start a "root" Span (i.e., start a new trace).
+ References []SpanReference
+
+ // StartTime overrides the Span's start time, or implicitly becomes
+ // time.Now() if StartTime.IsZero().
+ StartTime time.Time
+
+ // Tags may have zero or more entries; the restrictions on map values are
+ // identical to those for Span.SetTag(). May be nil.
+ //
+ // If specified, the caller hands off ownership of Tags at
+ // StartSpan() invocation time.
+ Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+ Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+ // ChildOfRef refers to a parent Span that caused *and* somehow depends
+ // upon the new child Span. Often (but not always), the parent Span cannot
+ // finish until the child Span does.
+ //
+ // An timing diagram for a ChildOfRef that's blocked on the new Span:
+ //
+ // [-Parent Span---------]
+ // [-Child Span----]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.ChildOf()
+ ChildOfRef SpanReferenceType = iota
+
+ // FollowsFromRef refers to a parent Span that does not depend in any way
+ // on the result of the new child Span. For instance, one might use
+ // FollowsFromRefs to describe pipeline stages separated by queues,
+ // or a fire-and-forget cache insert at the tail end of a web request.
+ //
+ // A FollowsFromRef Span is part of the same logical trace as the new Span:
+ // i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+ //
+ // All of the following could be valid timing diagrams for children that
+ // "FollowFrom" a parent.
+ //
+ // [-Parent Span-] [-Child Span-]
+ //
+ //
+ // [-Parent Span--]
+ // [-Child Span-]
+ //
+ //
+ // [-Parent Span-]
+ // [-Child Span-]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.FollowsFrom()
+ FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships. If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+// sc, _ := tracer.Extract(someFormat, someCarrier)
+// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+ Type SpanReferenceType
+ ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+ if r.ReferencedContext != nil {
+ o.References = append(o.References, r)
+ }
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: ChildOfRef,
+ ReferencedContext: sc,
+ }
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: FollowsFromRef,
+ ReferencedContext: sc,
+ }
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+ o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ for k, v := range t {
+ o.Tags[k] = v
+ }
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+// or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+ Key string
+ Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+ s.SetTag(t.Key, t.Value)
+}
diff --git a/src/vendor/github.com/spf13/pflag/bytes.go b/src/vendor/github.com/spf13/pflag/bytes.go
index 12c58db9f..67d530457 100644
--- a/src/vendor/github.com/spf13/pflag/bytes.go
+++ b/src/vendor/github.com/spf13/pflag/bytes.go
@@ -1,6 +1,7 @@
package pflag
import (
+ "encoding/base64"
"encoding/hex"
"fmt"
"strings"
@@ -9,10 +10,12 @@ import (
// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
type bytesHexValue []byte
+// String implements pflag.Value.String.
func (bytesHex bytesHexValue) String() string {
return fmt.Sprintf("%X", []byte(bytesHex))
}
+// Set implements pflag.Value.Set.
func (bytesHex *bytesHexValue) Set(value string) error {
bin, err := hex.DecodeString(strings.TrimSpace(value))
@@ -25,6 +28,7 @@ func (bytesHex *bytesHexValue) Set(value string) error {
return nil
}
+// Type implements pflag.Value.Type.
func (*bytesHexValue) Type() string {
return "bytesHex"
}
@@ -103,3 +107,103 @@ func BytesHex(name string, value []byte, usage string) *[]byte {
func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
return CommandLine.BytesHexP(name, shorthand, value, usage)
}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/src/vendor/github.com/spf13/pflag/flag.go b/src/vendor/github.com/spf13/pflag/flag.go
index 5eadc84e3..9beeda8ec 100644
--- a/src/vendor/github.com/spf13/pflag/flag.go
+++ b/src/vendor/github.com/spf13/pflag/flag.go
@@ -925,13 +925,16 @@ func stripUnknownFlagValue(args []string) []string {
}
first := args[0]
- if first[0] == '-' {
+ if len(first) > 0 && first[0] == '-' {
//--unknown --next-flag ...
return args
}
//--unknown arg ... (args will be arg ...)
- return args[1:]
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
}
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
@@ -990,11 +993,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
}
func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
if strings.HasPrefix(shorthands, "test.") {
return
}
- outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
diff --git a/src/vendor/github.com/spf13/pflag/string_to_int.go b/src/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 000000000..5ceda3965
--- /dev/null
+++ b/src/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/src/vendor/github.com/spf13/pflag/string_to_string.go b/src/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 000000000..890a01afc
--- /dev/null
+++ b/src/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/src/vendor/golang.org/x/crypto/AUTHORS b/src/vendor/golang.org/x/crypto/AUTHORS
index 15167cd74..2b00ddba0 100644
--- a/src/vendor/golang.org/x/crypto/AUTHORS
+++ b/src/vendor/golang.org/x/crypto/AUTHORS
@@ -1,3 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/src/vendor/golang.org/x/crypto/CONTRIBUTORS b/src/vendor/golang.org/x/crypto/CONTRIBUTORS
index 1c4577e96..1fbd3e976 100644
--- a/src/vendor/golang.org/x/crypto/CONTRIBUTORS
+++ b/src/vendor/golang.org/x/crypto/CONTRIBUTORS
@@ -1,3 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/src/vendor/golang.org/x/crypto/cast5/cast5.go b/src/vendor/golang.org/x/crypto/cast5/cast5.go
index 0b4af37bd..ddcbeb6f2 100644
--- a/src/vendor/golang.org/x/crypto/cast5/cast5.go
+++ b/src/vendor/golang.org/x/crypto/cast5/cast5.go
@@ -2,8 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common
-// OpenPGP cipher.
+// Package cast5 implements CAST5, as defined in RFC 2144.
+//
+// CAST5 is a legacy cipher and its short block size makes it vulnerable to
+// birthday bound attacks (see https://sweet32.info). It should only be used
+// where compatibility with legacy systems, not security, is the goal.
+//
+// Deprecated: any new system should use AES (from crypto/aes, if necessary in
+// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from
+// golang.org/x/crypto/chacha20poly1305).
package cast5 // import "golang.org/x/crypto/cast5"
import "errors"
diff --git a/src/vendor/golang.org/x/crypto/ed25519/ed25519.go b/src/vendor/golang.org/x/crypto/ed25519/ed25519.go
index f1d95674a..d6f683ba3 100644
--- a/src/vendor/golang.org/x/crypto/ed25519/ed25519.go
+++ b/src/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -3,20 +3,23 @@
// license that can be found in the LICENSE file.
// Package ed25519 implements the Ed25519 signature algorithm. See
-// http://ed25519.cr.yp.to/.
+// https://ed25519.cr.yp.to/.
//
// These functions are also compatible with the “Ed25519” function defined in
-// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seed”.
package ed25519
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
import (
+ "bytes"
"crypto"
cryptorand "crypto/rand"
"crypto/sha512"
- "crypto/subtle"
"errors"
"io"
"strconv"
@@ -31,6 +34,8 @@ const (
PrivateKeySize = 64
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
)
// PublicKey is the type of Ed25519 public keys.
@@ -46,6 +51,15 @@ func (priv PrivateKey) Public() crypto.PublicKey {
return PublicKey(publicKey)
}
+// Seed returns the private key seed corresponding to priv. It is provided for
+// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
+// in this package.
+func (priv PrivateKey) Seed() []byte {
+ seed := make([]byte, SeedSize)
+ copy(seed, priv[:32])
+ return seed
+}
+
// Sign signs the given message with priv.
// Ed25519 performs two passes over messages to be signed and therefore cannot
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
@@ -61,19 +75,33 @@ func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOp
// GenerateKey generates a public/private key pair using entropy from rand.
// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
if rand == nil {
rand = cryptorand.Reader
}
- privateKey = make([]byte, PrivateKeySize)
- publicKey = make([]byte, PublicKeySize)
- _, err = io.ReadFull(rand, privateKey[:32])
- if err != nil {
+ seed := make([]byte, SeedSize)
+ if _, err := io.ReadFull(rand, seed); err != nil {
return nil, nil, err
}
- digest := sha512.Sum512(privateKey[:32])
+ privateKey := NewKeyFromSeed(seed)
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, privateKey[32:])
+
+ return publicKey, privateKey, nil
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ if l := len(seed); l != SeedSize {
+ panic("ed25519: bad seed length: " + strconv.Itoa(l))
+ }
+
+ digest := sha512.Sum512(seed)
digest[0] &= 248
digest[31] &= 127
digest[31] |= 64
@@ -85,10 +113,11 @@ func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, er
var publicKeyBytes [32]byte
A.ToBytes(&publicKeyBytes)
+ privateKey := make([]byte, PrivateKeySize)
+ copy(privateKey, seed)
copy(privateKey[32:], publicKeyBytes[:])
- copy(publicKey, publicKeyBytes[:])
- return publicKey, privateKey, nil
+ return privateKey
}
// Sign signs the message with privateKey and returns a signature. It will
@@ -171,11 +200,18 @@ func Verify(publicKey PublicKey, message, sig []byte) bool {
edwards25519.ScReduce(&hReduced, &digest)
var R edwards25519.ProjectiveGroupElement
- var b [32]byte
- copy(b[:], sig[32:])
- edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
+ var s [32]byte
+ copy(s[:], sig[32:])
+
+ // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
+ // the range [0, order) in order to prevent signature malleability.
+ if !edwards25519.ScMinimal(&s) {
+ return false
+ }
+
+ edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
var checkR [32]byte
R.ToBytes(&checkR)
- return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
+ return bytes.Equal(sig[:32], checkR[:])
}
diff --git a/src/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/src/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
index 5f8b99478..fd03c252a 100644
--- a/src/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
+++ b/src/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
@@ -4,6 +4,8 @@
package edwards25519
+import "encoding/binary"
+
// This code is a port of the public domain, “ref10” implementation of ed25519
// from SUPERCOP.
@@ -1769,3 +1771,23 @@ func ScReduce(out *[32]byte, s *[64]byte) {
out[30] = byte(s11 >> 9)
out[31] = byte(s11 >> 17)
}
+
+// order is the order of Curve25519 in little-endian form.
+var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000}
+
+// ScMinimal returns true if the given scalar is less than the order of the
+// curve.
+func ScMinimal(scalar *[32]byte) bool {
+ for i := 3; ; i-- {
+ v := binary.LittleEndian.Uint64(scalar[i*8:])
+ if v > order[i] {
+ return false
+ } else if v < order[i] {
+ break
+ } else if i == 0 {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go b/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
index def4cabaf..a9437dc16 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/clearsign/clearsign.go
@@ -13,6 +13,7 @@ import (
"bufio"
"bytes"
"crypto"
+ "fmt"
"hash"
"io"
"net/textproto"
@@ -177,8 +178,9 @@ func Decode(data []byte) (b *Block, rest []byte) {
// message.
type dashEscaper struct {
buffered *bufio.Writer
- h hash.Hash
+ hashers []hash.Hash // one per key in privateKeys
hashType crypto.Hash
+ toHash io.Writer // writes to all the hashes in hashers
atBeginningOfLine bool
isFirstLine bool
@@ -186,8 +188,8 @@ type dashEscaper struct {
whitespace []byte
byteBuf []byte // a one byte buffer to save allocations
- privateKey *packet.PrivateKey
- config *packet.Config
+ privateKeys []*packet.PrivateKey
+ config *packet.Config
}
func (d *dashEscaper) Write(data []byte) (n int, err error) {
@@ -198,7 +200,7 @@ func (d *dashEscaper) Write(data []byte) (n int, err error) {
// The final CRLF isn't included in the hash so we have to wait
// until this point (the start of the next line) before writing it.
if !d.isFirstLine {
- d.h.Write(crlf)
+ d.toHash.Write(crlf)
}
d.isFirstLine = false
}
@@ -219,12 +221,12 @@ func (d *dashEscaper) Write(data []byte) (n int, err error) {
if _, err = d.buffered.Write(dashEscape); err != nil {
return
}
- d.h.Write(d.byteBuf)
+ d.toHash.Write(d.byteBuf)
d.atBeginningOfLine = false
} else if b == '\n' {
// Nothing to do because we delay writing CRLF to the hash.
} else {
- d.h.Write(d.byteBuf)
+ d.toHash.Write(d.byteBuf)
d.atBeginningOfLine = false
}
if err = d.buffered.WriteByte(b); err != nil {
@@ -245,13 +247,13 @@ func (d *dashEscaper) Write(data []byte) (n int, err error) {
// Any buffered whitespace wasn't at the end of the line so
// we need to write it out.
if len(d.whitespace) > 0 {
- d.h.Write(d.whitespace)
+ d.toHash.Write(d.whitespace)
if _, err = d.buffered.Write(d.whitespace); err != nil {
return
}
d.whitespace = d.whitespace[:0]
}
- d.h.Write(d.byteBuf)
+ d.toHash.Write(d.byteBuf)
if err = d.buffered.WriteByte(b); err != nil {
return
}
@@ -269,25 +271,29 @@ func (d *dashEscaper) Close() (err error) {
return
}
}
- sig := new(packet.Signature)
- sig.SigType = packet.SigTypeText
- sig.PubKeyAlgo = d.privateKey.PubKeyAlgo
- sig.Hash = d.hashType
- sig.CreationTime = d.config.Now()
- sig.IssuerKeyId = &d.privateKey.KeyId
-
- if err = sig.Sign(d.h, d.privateKey, d.config); err != nil {
- return
- }
out, err := armor.Encode(d.buffered, "PGP SIGNATURE", nil)
if err != nil {
return
}
- if err = sig.Serialize(out); err != nil {
- return
+ t := d.config.Now()
+ for i, k := range d.privateKeys {
+ sig := new(packet.Signature)
+ sig.SigType = packet.SigTypeText
+ sig.PubKeyAlgo = k.PubKeyAlgo
+ sig.Hash = d.hashType
+ sig.CreationTime = t
+ sig.IssuerKeyId = &k.KeyId
+
+ if err = sig.Sign(d.hashers[i], k, d.config); err != nil {
+ return
+ }
+ if err = sig.Serialize(out); err != nil {
+ return
+ }
}
+
if err = out.Close(); err != nil {
return
}
@@ -300,8 +306,17 @@ func (d *dashEscaper) Close() (err error) {
// Encode returns a WriteCloser which will clear-sign a message with privateKey
// and write it to w. If config is nil, sensible defaults are used.
func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if privateKey.Encrypted {
- return nil, errors.InvalidArgumentError("signing key is encrypted")
+ return EncodeMulti(w, []*packet.PrivateKey{privateKey}, config)
+}
+
+// EncodeMulti returns a WriteCloser which will clear-sign a message with all the
+// private keys indicated and write it to w. If config is nil, sensible defaults
+// are used.
+func EncodeMulti(w io.Writer, privateKeys []*packet.PrivateKey, config *packet.Config) (plaintext io.WriteCloser, err error) {
+ for _, k := range privateKeys {
+ if k.Encrypted {
+ return nil, errors.InvalidArgumentError(fmt.Sprintf("signing key %s is encrypted", k.KeyIdString()))
+ }
}
hashType := config.Hash()
@@ -313,7 +328,14 @@ func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (
if !hashType.Available() {
return nil, errors.UnsupportedError("unsupported hash type: " + strconv.Itoa(int(hashType)))
}
- h := hashType.New()
+ var hashers []hash.Hash
+ var ws []io.Writer
+ for range privateKeys {
+ h := hashType.New()
+ hashers = append(hashers, h)
+ ws = append(ws, h)
+ }
+ toHash := io.MultiWriter(ws...)
buffered := bufio.NewWriter(w)
// start has a \n at the beginning that we don't want here.
@@ -338,16 +360,17 @@ func Encode(w io.Writer, privateKey *packet.PrivateKey, config *packet.Config) (
plaintext = &dashEscaper{
buffered: buffered,
- h: h,
+ hashers: hashers,
hashType: hashType,
+ toHash: toHash,
atBeginningOfLine: true,
isFirstLine: true,
byteBuf: make([]byte, 1),
- privateKey: privateKey,
- config: config,
+ privateKeys: privateKeys,
+ config: config,
}
return
diff --git a/src/vendor/golang.org/x/crypto/openpgp/keys.go b/src/vendor/golang.org/x/crypto/openpgp/keys.go
index bfe326031..3e2518600 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/keys.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/keys.go
@@ -307,8 +307,6 @@ func readToNextPublicKey(packets *packet.Reader) (err error) {
return
}
}
-
- panic("unreachable")
}
// ReadEntity reads an entity (public key, identities, subkeys etc) from the
@@ -327,16 +325,14 @@ func ReadEntity(packets *packet.Reader) (*Entity, error) {
if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
packets.Unread(p)
return nil, errors.StructuralError("first packet was not a public/private key")
- } else {
- e.PrimaryKey = &e.PrivateKey.PublicKey
}
+ e.PrimaryKey = &e.PrivateKey.PublicKey
}
if !e.PrimaryKey.PubKeyAlgo.CanSign() {
return nil, errors.StructuralError("primary key cannot be used for signatures")
}
- var current *Identity
var revocations []*packet.Signature
EachPacket:
for {
@@ -349,32 +345,8 @@ EachPacket:
switch pkt := p.(type) {
case *packet.UserId:
- current = new(Identity)
- current.Name = pkt.Id
- current.UserId = pkt
- e.Identities[pkt.Id] = current
-
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return nil, io.ErrUnexpectedEOF
- } else if err != nil {
- return nil, err
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- return nil, errors.StructuralError("user ID packet not followed by self-signature")
- }
-
- if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
- if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
- return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error())
- }
- current.SelfSignature = sig
- break
- }
- current.Signatures = append(current.Signatures, sig)
+ if err := addUserID(e, packets, pkt); err != nil {
+ return nil, err
}
case *packet.Signature:
if pkt.SigType == packet.SigTypeKeyRevocation {
@@ -383,11 +355,9 @@ EachPacket:
// TODO: RFC4880 5.2.1 permits signatures
// directly on keys (eg. to bind additional
// revocation keys).
- } else if current == nil {
- return nil, errors.StructuralError("signature packet found before user id packet")
- } else {
- current.Signatures = append(current.Signatures, pkt)
}
+ // Else, ignoring the signature as it does not follow anything
+ // we would know to attach it to.
case *packet.PrivateKey:
if pkt.IsSubkey == false {
packets.Unread(p)
@@ -428,33 +398,105 @@ EachPacket:
return e, nil
}
+func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
+ // Make a new Identity object, that we might wind up throwing away.
+ // We'll only add it if we get a valid self-signature over this
+ // userID.
+ identity := new(Identity)
+ identity.Name = pkt.Id
+ identity.UserId = pkt
+
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return err
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ packets.Unread(p)
+ break
+ }
+
+ if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId {
+ if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
+ return errors.StructuralError("user ID self-signature invalid: " + err.Error())
+ }
+ identity.SelfSignature = sig
+ e.Identities[pkt.Id] = identity
+ } else {
+ identity.Signatures = append(identity.Signatures, sig)
+ }
+ }
+
+ return nil
+}
+
func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
var subKey Subkey
subKey.PublicKey = pub
subKey.PrivateKey = priv
- p, err := packets.Next()
- if err == io.EOF {
- return io.ErrUnexpectedEOF
+
+ for {
+ p, err := packets.Next()
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+
+ sig, ok := p.(*packet.Signature)
+ if !ok {
+ packets.Unread(p)
+ break
+ }
+
+ if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
+ return errors.StructuralError("subkey signature with wrong type")
+ }
+
+ if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
+ return errors.StructuralError("subkey signature invalid: " + err.Error())
+ }
+
+ switch sig.SigType {
+ case packet.SigTypeSubkeyRevocation:
+ subKey.Sig = sig
+ case packet.SigTypeSubkeyBinding:
+
+ if shouldReplaceSubkeySig(subKey.Sig, sig) {
+ subKey.Sig = sig
+ }
+ }
}
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
- var ok bool
- subKey.Sig, ok = p.(*packet.Signature)
- if !ok {
+
+ if subKey.Sig == nil {
return errors.StructuralError("subkey packet not followed by signature")
}
- if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation {
- return errors.StructuralError("subkey signature with wrong type")
- }
- err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig)
- if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
+
e.Subkeys = append(e.Subkeys, subKey)
+
return nil
}
+func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool {
+ if potentialNewSig == nil {
+ return false
+ }
+
+ if existingSig == nil {
+ return true
+ }
+
+ if existingSig.SigType == packet.SigTypeSubkeyRevocation {
+ return false // never override a revocation signature
+ }
+
+ return potentialNewSig.CreationTime.After(existingSig.CreationTime)
+}
+
const defaultRSAKeyBits = 2048
// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
@@ -489,7 +531,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
isPrimaryId := true
e.Identities[uid.Id] = &Identity{
- Name: uid.Name,
+ Name: uid.Id,
UserId: uid,
SelfSignature: &packet.Signature{
CreationTime: currentTime,
@@ -503,6 +545,21 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
IssuerKeyId: &e.PrimaryKey.KeyId,
},
}
+ err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the user passes in a DefaultHash via packet.Config,
+ // set the PreferredHash for the SelfSignature.
+ if config != nil && config.DefaultHash != 0 {
+ e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)}
+ }
+
+ // Likewise for DefaultCipher.
+ if config != nil && config.DefaultCipher != 0 {
+ e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)}
+ }
e.Subkeys = make([]Subkey, 1)
e.Subkeys[0] = Subkey{
@@ -521,13 +578,16 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
}
e.Subkeys[0].PublicKey.IsSubkey = true
e.Subkeys[0].PrivateKey.IsSubkey = true
-
+ err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config)
+ if err != nil {
+ return nil, err
+ }
return e, nil
}
-// SerializePrivate serializes an Entity, including private key material, to
-// the given Writer. For now, it must only be used on an Entity returned from
-// NewEntity.
+// SerializePrivate serializes an Entity, including private key material, but
+// excluding signatures from other entities, to the given Writer.
+// Identities and subkeys are re-signed in case they changed since NewEntry.
// If config is nil, sensible defaults will be used.
func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
err = e.PrivateKey.Serialize(w)
@@ -565,8 +625,8 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error
return nil
}
-// Serialize writes the public part of the given Entity to w. (No private
-// key material will be output).
+// Serialize writes the public part of the given Entity to w, including
+// signatures from other entities. No private key material will be output.
func (e *Entity) Serialize(w io.Writer) error {
err := e.PrimaryKey.Serialize(w)
if err != nil {
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
index 266840d05..02b372cf3 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go
@@ -42,12 +42,18 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) {
switch e.Algo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
case PubKeyAlgoElGamal:
e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r)
if err != nil {
return
}
e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r)
+ if err != nil {
+ return
+ }
}
_, err = consumeAll(r)
return
@@ -72,7 +78,8 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
// padding oracle attacks.
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes)
+ k := priv.PrivateKey.(*rsa.PrivateKey)
+ b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes))
case PubKeyAlgoElGamal:
c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes)
c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes)
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go
index e2bde1111..5af64c542 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/packet.go
@@ -11,10 +11,12 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
- "golang.org/x/crypto/cast5"
- "golang.org/x/crypto/openpgp/errors"
+ "crypto/rsa"
"io"
"math/big"
+
+ "golang.org/x/crypto/cast5"
+ "golang.org/x/crypto/openpgp/errors"
)
// readFull is the same as io.ReadFull except that reading zero bytes returns
@@ -273,8 +275,6 @@ func consumeAll(r io.Reader) (n int64, err error) {
return
}
}
-
- panic("unreachable")
}
// packetType represents the numeric ids of the different OpenPGP packet types. See
@@ -404,14 +404,16 @@ const (
type PublicKeyAlgorithm uint8
const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
+ PubKeyAlgoRSA PublicKeyAlgorithm = 1
+ PubKeyAlgoElGamal PublicKeyAlgorithm = 16
+ PubKeyAlgoDSA PublicKeyAlgorithm = 17
// RFC 6637, Section 5.
PubKeyAlgoECDH PublicKeyAlgorithm = 18
PubKeyAlgoECDSA PublicKeyAlgorithm = 19
+
+ // Deprecated in RFC 4880, Section 13.5. Use key flags instead.
+ PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
+ PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
)
// CanEncrypt returns true if it's possible to encrypt a message to a public
@@ -502,19 +504,17 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) {
numBytes := (int(bitLength) + 7) / 8
mpi = make([]byte, numBytes)
_, err = readFull(r, mpi)
- return
-}
-
-// mpiLength returns the length of the given *big.Int when serialized as an
-// MPI.
-func mpiLength(n *big.Int) (mpiLengthInBytes int) {
- mpiLengthInBytes = 2 /* MPI length */
- mpiLengthInBytes += (n.BitLen() + 7) / 8
+ // According to RFC 4880 3.2. we should check that the MPI has no leading
+ // zeroes (at least when not an encrypted MPI?), but this implementation
+ // does generate leading zeroes, so we keep accepting them.
return
}
// writeMPI serializes a big integer to w.
func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) {
+ // Note that we can produce leading zeroes, in violation of RFC 4880 3.2.
+ // Implementations seem to be tolerant of them, and stripping them would
+ // make it complex to guarantee matching re-serialization.
_, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)})
if err == nil {
_, err = w.Write(mpiBytes)
@@ -527,6 +527,18 @@ func writeBig(w io.Writer, i *big.Int) error {
return writeMPI(w, uint16(i.BitLen()), i.Bytes())
}
+// padToKeySize left-pads a MPI with zeroes to match the length of the
+// specified RSA public.
+func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
+ k := (pub.N.BitLen() + 7) / 8
+ if len(b) >= k {
+ return b
+ }
+ bb := make([]byte, k)
+ copy(bb[len(bb)-len(b):], b)
+ return bb
+}
+
// CompressionAlgo Represents the different compression algorithms
// supported by OpenPGP (except for BZIP2, which is not currently
// supported). See Section 9.3 of RFC 4880.
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
index 545846ba8..bd31cceac 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/private_key.go
@@ -6,6 +6,7 @@ package packet
import (
"bytes"
+ "crypto"
"crypto/cipher"
"crypto/dsa"
"crypto/ecdsa"
@@ -30,7 +31,7 @@ type PrivateKey struct {
encryptedData []byte
cipher CipherFunction
s2k func(out, in []byte)
- PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey.
+ PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer.
sha1Checksum bool
iv []byte
}
@@ -63,6 +64,28 @@ func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateK
return pk
}
+// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
+// implements RSA or ECDSA.
+func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
+ pk := new(PrivateKey)
+ // In general, the public Keys should be used as pointers. We still
+ // type-switch on the values, for backwards-compatibility.
+ switch pubkey := signer.Public().(type) {
+ case *rsa.PublicKey:
+ pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
+ case rsa.PublicKey:
+ pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
+ case *ecdsa.PublicKey:
+ pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
+ case ecdsa.PublicKey:
+ pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
+ default:
+ panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
+ }
+ pk.PrivateKey = signer
+ return pk
+}
+
func (pk *PrivateKey) parse(r io.Reader) (err error) {
err = (&pk.PublicKey).parse(r)
if err != nil {
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
index c769933ce..fcd5f5251 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/public_key.go
@@ -244,7 +244,12 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey
}
pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y)
- pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes))
+
+ // The bit length is 3 (for the 0x04 specifying an uncompressed key)
+ // plus two field elements (for x and y), which are rounded up to the
+ // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6
+ fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7
+ pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes)
pk.setFingerPrintAndKeyId()
return pk
@@ -515,7 +520,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
- err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes)
+ err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes))
if err != nil {
return errors.SignatureError("RSA verification failure")
}
@@ -540,7 +545,6 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro
default:
return errors.SignatureError("Unsupported public key algorithm used in signature")
}
- panic("unreachable")
}
// VerifySignatureV3 returns nil iff sig is a valid signature, made by this
@@ -567,7 +571,7 @@ func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err
switch pk.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
rsaPublicKey := pk.PublicKey.(*rsa.PublicKey)
- if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil {
+ if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil {
return errors.SignatureError("RSA verification failure")
}
return
@@ -585,7 +589,6 @@ func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err
default:
panic("shouldn't happen")
}
- panic("unreachable")
}
// keySignatureHash returns a Hash of the message that needs to be signed for
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
index 26337f5aa..5daf7b6cf 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go
@@ -216,7 +216,6 @@ func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (er
// V3 public keys only support RSA.
panic("shouldn't happen")
}
- panic("unreachable")
}
// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go
index 4368f6b9e..b2a24a532 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/signature.go
@@ -9,10 +9,11 @@ import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
- "crypto/rsa"
+ "encoding/asn1"
"encoding/binary"
"hash"
"io"
+ "math/big"
"strconv"
"time"
@@ -516,7 +517,8 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
switch priv.PubKeyAlgo {
case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature.bytes, err = rsa.SignPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest)
+ // supports both *rsa.PrivateKey and crypto.Signer
+ sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes))
case PubKeyAlgoDSA:
dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
@@ -534,7 +536,17 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes))
}
case PubKeyAlgoECDSA:
- r, s, err := ecdsa.Sign(config.Random(), priv.PrivateKey.(*ecdsa.PrivateKey), digest)
+ var r, s *big.Int
+ if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok {
+ // direct support, avoid asn1 wrapping/unwrapping
+ r, s, err = ecdsa.Sign(config.Random(), pk, digest)
+ } else {
+ var b []byte
+ b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
+ if err == nil {
+ r, s, err = unwrapECDSASig(b)
+ }
+ }
if err == nil {
sig.ECDSASigR = fromBig(r)
sig.ECDSASigS = fromBig(s)
@@ -546,6 +558,19 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
return
}
+// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA
+// signature.
+func unwrapECDSASig(b []byte) (r, s *big.Int, err error) {
+ var ecsdaSig struct {
+ R, S *big.Int
+ }
+ _, err = asn1.Unmarshal(b, &ecsdaSig)
+ if err != nil {
+ return
+ }
+ return ecsdaSig.R, ecsdaSig.S, nil
+}
+
// SignUserId computes a signature from priv, asserting that pub is a valid
// key for the identity id. On success, the signature is stored in sig. Call
// Serialize to write it out.
@@ -553,7 +578,7 @@ func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err e
func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
h, err := userIdSignatureHash(id, pub, sig.Hash)
if err != nil {
- return nil
+ return err
}
return sig.Sign(h, priv, config)
}
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
index 4b1105b6f..744c2d2c4 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go
@@ -88,10 +88,10 @@ func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunc
return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc)))
}
plaintextKey = plaintextKey[1:]
- if l := len(plaintextKey); l == 0 || l%cipherFunc.blockSize() != 0 {
- return nil, cipherFunc, errors.StructuralError("length of decrypted key not a multiple of block size")
+ if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() {
+ return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " +
+ "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")")
}
-
return plaintextKey, cipherFunc, nil
}
diff --git a/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
index 96a2b382a..d19ffbc78 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go
@@ -80,7 +80,7 @@ func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
// ImageData returns zero or more byte slices, each containing
// JPEG File Interchange Format (JFIF), for each photo in the
-// the user attribute packet.
+// user attribute packet.
func (uat *UserAttribute) ImageData() (imageData [][]byte) {
for _, sp := range uat.Contents {
if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
diff --git a/src/vendor/golang.org/x/crypto/openpgp/read.go b/src/vendor/golang.org/x/crypto/openpgp/read.go
index a8bb3de95..6ec664f44 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/read.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/read.go
@@ -50,7 +50,7 @@ type MessageDetails struct {
// If IsSigned is true and SignedBy is non-zero then the signature will
// be verified as UnverifiedBody is read. The signature cannot be
// checked until the whole of UnverifiedBody is read so UnverifiedBody
- // must be consumed until EOF before the data can trusted. Even if a
+ // must be consumed until EOF before the data can be trusted. Even if a
// message isn't signed (or the signer is unknown) the data may contain
// an authentication code that is only checked once UnverifiedBody has
// been consumed. Once EOF has been seen, the following fields are
diff --git a/src/vendor/golang.org/x/crypto/openpgp/write.go b/src/vendor/golang.org/x/crypto/openpgp/write.go
index 65a304cc8..4ee71784e 100644
--- a/src/vendor/golang.org/x/crypto/openpgp/write.go
+++ b/src/vendor/golang.org/x/crypto/openpgp/write.go
@@ -164,12 +164,12 @@ func hashToHashId(h crypto.Hash) uint8 {
return v
}
-// Encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
+// writeAndSign writes the data as a payload package and, optionally, signs
+// it. hints contains optional information, that is also encrypted,
+// that aids the recipients in processing the message. The resulting
+// WriteCloser must be closed after the contents of the file have been
+// written. If config is nil, sensible defaults will be used.
+func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
var signer *packet.PrivateKey
if signed != nil {
signKey, ok := signed.signingKey(config.Now())
@@ -185,6 +185,83 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
+ var hash crypto.Hash
+ for _, hashId := range candidateHashes {
+ if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
+ hash = h
+ break
+ }
+ }
+
+ // If the hash specified by config is a candidate, we'll use that.
+ if configuredHash := config.Hash(); configuredHash.Available() {
+ for _, hashId := range candidateHashes {
+ if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
+ hash = h
+ break
+ }
+ }
+ }
+
+ if hash == 0 {
+ hashId := candidateHashes[0]
+ name, ok := s2k.HashIdToString(hashId)
+ if !ok {
+ name = "#" + strconv.Itoa(int(hashId))
+ }
+ return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
+ }
+
+ if signer != nil {
+ ops := &packet.OnePassSignature{
+ SigType: packet.SigTypeBinary,
+ Hash: hash,
+ PubKeyAlgo: signer.PubKeyAlgo,
+ KeyId: signer.KeyId,
+ IsLast: true,
+ }
+ if err := ops.Serialize(payload); err != nil {
+ return nil, err
+ }
+ }
+
+ if hints == nil {
+ hints = &FileHints{}
+ }
+
+ w := payload
+ if signer != nil {
+ // If we need to write a signature packet after the literal
+ // data then we need to stop literalData from closing
+ // encryptedData.
+ w = noOpCloser{w}
+
+ }
+ var epochSeconds uint32
+ if !hints.ModTime.IsZero() {
+ epochSeconds = uint32(hints.ModTime.Unix())
+ }
+ literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
+ if err != nil {
+ return nil, err
+ }
+
+ if signer != nil {
+ return signatureWriter{payload, literalData, hash, hash.New(), signer, config}, nil
+ }
+ return literalData, nil
+}
+
+// Encrypt encrypts a message to a number of recipients and, optionally, signs
+// it. hints contains optional information, that is also encrypted, that aids
+// the recipients in processing the message. The resulting WriteCloser must
+// be closed after the contents of the file have been written.
+// If config is nil, sensible defaults will be used.
+func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
+ if len(to) == 0 {
+ return nil, errors.InvalidArgumentError("no encryption recipient provided")
+ }
+
// These are the possible ciphers that we'll use for the message.
candidateCiphers := []uint8{
uint8(packet.CipherAES128),
@@ -194,6 +271,7 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
// These are the possible hash functions that we'll use for the signature.
candidateHashes := []uint8{
hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA1),
hashToHashId(crypto.RIPEMD160),
@@ -241,33 +319,6 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
- var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := s2k.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
symKey := make([]byte, cipher.KeySize())
if _, err := io.ReadFull(config.Random(), symKey); err != nil {
return nil, err
@@ -279,49 +330,38 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
}
}
- encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
+ payload, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config)
if err != nil {
return
}
- if signer != nil {
- ops := &packet.OnePassSignature{
- SigType: packet.SigTypeBinary,
- Hash: hash,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
- if err := ops.Serialize(encryptedData); err != nil {
- return nil, err
- }
+ return writeAndSign(payload, candidateHashes, signed, hints, config)
+}
+
+// Sign signs a message. The resulting WriteCloser must be closed after the
+// contents of the file have been written. hints contains optional information
+// that aids the recipients in processing the message.
+// If config is nil, sensible defaults will be used.
+func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
+ if signed == nil {
+ return nil, errors.InvalidArgumentError("no signer provided")
}
- if hints == nil {
- hints = &FileHints{}
+ // These are the possible hash functions that we'll use for the signature.
+ candidateHashes := []uint8{
+ hashToHashId(crypto.SHA256),
+ hashToHashId(crypto.SHA384),
+ hashToHashId(crypto.SHA512),
+ hashToHashId(crypto.SHA1),
+ hashToHashId(crypto.RIPEMD160),
}
-
- w := encryptedData
- if signer != nil {
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- w = noOpCloser{encryptedData}
-
+ defaultHashes := candidateHashes[len(candidateHashes)-1:]
+ preferredHashes := signed.primaryIdentity().SelfSignature.PreferredHash
+ if len(preferredHashes) == 0 {
+ preferredHashes = defaultHashes
}
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
- if err != nil {
- return nil, err
- }
-
- if signer != nil {
- return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil
- }
- return literalData, nil
+ candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
+ return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, config)
}
// signatureWriter hashes the contents of a message while passing it along to
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/src/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
index 741eeb13f..2f04ee5b5 100644
--- a/src/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -7,6 +7,7 @@ package terminal
import (
"bytes"
"io"
+ "strconv"
"sync"
"unicode/utf8"
)
@@ -132,8 +133,11 @@ const (
keyPasteEnd
)
-var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
-var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
+var (
+ crlf = []byte{'\r', '\n'}
+ pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
+ pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
+)
// bytesToKey tries to parse a key sequence from b. If successful, it returns
// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
@@ -156,6 +160,10 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
return keyClearScreen, b[1:]
case 23: // ^W
return keyDeleteWord, b[1:]
+ case 14: // ^N
+ return keyDown, b[1:]
+ case 16: // ^P
+ return keyUp, b[1:]
}
}
@@ -264,34 +272,44 @@ func (t *Terminal) moveCursorToPos(pos int) {
}
func (t *Terminal) move(up, down, left, right int) {
- movement := make([]rune, 3*(up+down+left+right))
- m := movement
- for i := 0; i < up; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'A'
- m = m[3:]
- }
- for i := 0; i < down; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'B'
- m = m[3:]
- }
- for i := 0; i < left; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'D'
- m = m[3:]
- }
- for i := 0; i < right; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'C'
- m = m[3:]
+ m := []rune{}
+
+ // 1 unit up can be expressed as ^[[A or ^[A
+ // 5 units up can be expressed as ^[[5A
+
+ if up == 1 {
+ m = append(m, keyEscape, '[', 'A')
+ } else if up > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(up))...)
+ m = append(m, 'A')
}
- t.queue(movement)
+ if down == 1 {
+ m = append(m, keyEscape, '[', 'B')
+ } else if down > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(down))...)
+ m = append(m, 'B')
+ }
+
+ if right == 1 {
+ m = append(m, keyEscape, '[', 'C')
+ } else if right > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(right))...)
+ m = append(m, 'C')
+ }
+
+ if left == 1 {
+ m = append(m, keyEscape, '[', 'D')
+ } else if left > 1 {
+ m = append(m, keyEscape, '[')
+ m = append(m, []rune(strconv.Itoa(left))...)
+ m = append(m, 'D')
+ }
+
+ t.queue(m)
}
func (t *Terminal) clearLineToRight() {
@@ -333,7 +351,7 @@ func (t *Terminal) advanceCursor(places int) {
// So, if we are stopping at the end of a line, we
// need to write a newline so that our cursor can be
// advanced to the next line.
- t.outBuf = append(t.outBuf, '\n')
+ t.outBuf = append(t.outBuf, '\r', '\n')
}
}
@@ -593,6 +611,35 @@ func (t *Terminal) writeLine(line []rune) {
}
}
+// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n.
+func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
+ for len(buf) > 0 {
+ i := bytes.IndexByte(buf, '\n')
+ todo := len(buf)
+ if i >= 0 {
+ todo = i
+ }
+
+ var nn int
+ nn, err = w.Write(buf[:todo])
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ buf = buf[todo:]
+
+ if i >= 0 {
+ if _, err = w.Write(crlf); err != nil {
+ return n, err
+ }
+ n++
+ buf = buf[1:]
+ }
+ }
+
+ return n, nil
+}
+
func (t *Terminal) Write(buf []byte) (n int, err error) {
t.lock.Lock()
defer t.lock.Unlock()
@@ -600,7 +647,7 @@ func (t *Terminal) Write(buf []byte) (n int, err error) {
if t.cursorX == 0 && t.cursorY == 0 {
// This is the easy case: there's nothing on the screen that we
// have to move out of the way.
- return t.c.Write(buf)
+ return writeWithCRLF(t.c, buf)
}
// We have a prompt and possibly user input on the screen. We
@@ -620,7 +667,7 @@ func (t *Terminal) Write(buf []byte) (n int, err error) {
}
t.outBuf = t.outBuf[:0]
- if n, err = t.c.Write(buf); err != nil {
+ if n, err = writeWithCRLF(t.c, buf); err != nil {
return
}
@@ -740,8 +787,6 @@ func (t *Terminal) readLine() (line string, err error) {
t.remainder = t.inBuf[:n+len(t.remainder)]
}
-
- panic("unreachable") // for Go 1.0.
}
// SetPrompt sets the prompt to be used when reading subsequent lines.
@@ -890,3 +935,32 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
}
return s.entries[index], true
}
+
+// readPasswordLine reads from reader until it finds \n or io.EOF.
+// The slice returned does not include the \n.
+// readPasswordLine also ignores any \r it finds.
+func readPasswordLine(reader io.Reader) ([]byte, error) {
+ var buf [1]byte
+ var ret []byte
+
+ for {
+ n, err := reader.Read(buf[:])
+ if n > 0 {
+ switch buf[0] {
+ case '\n':
+ return ret, nil
+ case '\r':
+ // remove \r from passwords on Windows
+ default:
+ ret = append(ret, buf[0])
+ }
+ continue
+ }
+ if err != nil {
+ if err == io.EOF && len(ret) > 0 {
+ return ret, nil
+ }
+ return ret, err
+ }
+ }
+}
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util.go
index c869213ec..391104084 100644
--- a/src/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd
// Package terminal provides support functions for dealing with terminals, as
// commonly found on UNIX systems.
@@ -17,41 +17,41 @@
package terminal // import "golang.org/x/crypto/ssh/terminal"
import (
- "io"
- "syscall"
- "unsafe"
+ "golang.org/x/sys/unix"
)
// State contains the state of a terminal.
type State struct {
- termios syscall.Termios
+ termios unix.Termios
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
+ _, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ return err == nil
}
// MakeRaw put the terminal connected to the given file descriptor into raw
// mode and returns the previous state of the terminal so that it can be
// restored.
func MakeRaw(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
+ termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ if err != nil {
return nil, err
}
- newState := oldState.termios
+ oldState := State{termios: *termios}
+
// This attempts to replicate the behaviour documented for cfmakeraw in
// the termios(3) manpage.
- newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
- newState.Oflag &^= syscall.OPOST
- newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
- newState.Cflag &^= syscall.CSIZE | syscall.PARENB
- newState.Cflag |= syscall.CS8
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
+ termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+ termios.Oflag &^= unix.OPOST
+ termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+ termios.Cflag &^= unix.CSIZE | unix.PARENB
+ termios.Cflag |= unix.CS8
+ termios.Cc[unix.VMIN] = 1
+ termios.Cc[unix.VTIME] = 0
+ if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil {
return nil, err
}
@@ -61,73 +61,54 @@ func MakeRaw(fd int) (*State, error) {
// GetState returns the current state of a terminal which may be useful to
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
+ termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ if err != nil {
return nil, err
}
- return &oldState, nil
+ return &State{termios: *termios}, nil
}
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
- return err
+ return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios)
}
// GetSize returns the dimensions of the given terminal.
func GetSize(fd int) (width, height int, err error) {
- var dimensions [4]uint16
-
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
return -1, -1, err
}
- return int(dimensions[1]), int(dimensions[0]), nil
+ return int(ws.Col), int(ws.Row), nil
+}
+
+// passwordReader is an io.Reader that reads from a specific file descriptor.
+type passwordReader int
+
+func (r passwordReader) Read(buf []byte) (int, error) {
+ return unix.Read(int(r), buf)
}
// ReadPassword reads a line of input from a terminal without local echo. This
// is commonly used for inputting passwords and other sensitive data. The slice
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
- var oldState syscall.Termios
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
+ termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
+ if err != nil {
return nil, err
}
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
+ newState := *termios
+ newState.Lflag &^= unix.ECHO
+ newState.Lflag |= unix.ICANON | unix.ISIG
+ newState.Iflag |= unix.ICRNL
+ if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil {
return nil, err
}
- defer func() {
- syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
- }()
+ defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(fd, buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
+ return readPasswordLine(passwordReader(fd))
}
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
new file mode 100644
index 000000000..dfcd62785
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
index 9c1ffd145..cb23a5904 100644
--- a/src/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
@@ -6,7 +6,7 @@
package terminal
-import "syscall"
+import "golang.org/x/sys/unix"
-const ioctlReadTermios = syscall.TIOCGETA
-const ioctlWriteTermios = syscall.TIOCSETA
+const ioctlReadTermios = unix.TIOCGETA
+const ioctlWriteTermios = unix.TIOCSETA
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
index 5883b22d7..5fadfe8a1 100644
--- a/src/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
@@ -4,8 +4,7 @@
package terminal
-// These constants are declared here, rather than importing
-// them from the syscall package as some syscall packages, even
-// on linux, for example gccgo, do not declare them.
-const ioctlReadTermios = 0x5401 // syscall.TCGETS
-const ioctlWriteTermios = 0x5402 // syscall.TCSETS
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
index 799f049f0..9317ac7ed 100644
--- a/src/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
@@ -21,7 +21,7 @@ import (
type State struct{}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
return false
}
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
new file mode 100644
index 000000000..3d5f06a9f
--- /dev/null
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
@@ -0,0 +1,124 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package terminal // import "golang.org/x/crypto/ssh/terminal"
+
+import (
+ "golang.org/x/sys/unix"
+ "io"
+ "syscall"
+)
+
+// State contains the state of a terminal.
+type State struct {
+ termios unix.Termios
+}
+
+// IsTerminal returns whether the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+ _, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+ return err == nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo. This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+ // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
+ val, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+ oldState := *val
+
+ newState := oldState
+ newState.Lflag &^= syscall.ECHO
+ newState.Lflag |= syscall.ICANON | syscall.ISIG
+ newState.Iflag |= syscall.ICRNL
+ err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState)
+ if err != nil {
+ return nil, err
+ }
+
+ defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState)
+
+ var buf [16]byte
+ var ret []byte
+ for {
+ n, err := syscall.Read(fd, buf[:])
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ if len(ret) == 0 {
+ return nil, io.EOF
+ }
+ break
+ }
+ if buf[n-1] == '\n' {
+ n--
+ }
+ ret = append(ret, buf[:n]...)
+ if n < len(buf) {
+ break
+ }
+ }
+
+ return ret, nil
+}
+
+// MakeRaw puts the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+// see http://cr.illumos.org/~webrev/andy_js/1060/
+func MakeRaw(fd int) (*State, error) {
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+
+ oldState := State{termios: *termios}
+
+ termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
+ termios.Oflag &^= unix.OPOST
+ termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
+ termios.Cflag &^= unix.CSIZE | unix.PARENB
+ termios.Cflag |= unix.CS8
+ termios.Cc[unix.VMIN] = 1
+ termios.Cc[unix.VTIME] = 0
+
+ if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
+ return nil, err
+ }
+
+ return &oldState, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, oldState *State) error {
+ return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+ termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+ if err != nil {
+ return nil, err
+ }
+
+ return &State{termios: *termios}, nil
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+ ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+ if err != nil {
+ return 0, 0, err
+ }
+ return int(ws.Col), int(ws.Row), nil
+}
diff --git a/src/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/src/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
index ae9fa9ec1..5cfdf8f3f 100644
--- a/src/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ b/src/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -17,65 +17,20 @@
package terminal
import (
- "io"
- "syscall"
- "unsafe"
-)
+ "os"
-const (
- enableLineInput = 2
- enableEchoInput = 4
- enableProcessedInput = 1
- enableWindowInput = 8
- enableMouseInput = 16
- enableInsertMode = 32
- enableQuickEditMode = 64
- enableExtendedFlags = 128
- enableAutoPosition = 256
- enableProcessedOutput = 1
- enableWrapAtEolOutput = 2
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-)
-
-type (
- short int16
- word uint16
-
- coord struct {
- x short
- y short
- }
- smallRect struct {
- left short
- top short
- right short
- bottom short
- }
- consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
- }
+ "golang.org/x/sys/windows"
)
type State struct {
mode uint32
}
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns whether the given file descriptor is a terminal.
func IsTerminal(fd int) bool {
var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
+ err := windows.GetConsoleMode(windows.Handle(fd), &st)
+ return err == nil
}
// MakeRaw put the terminal connected to the given file descriptor into raw
@@ -83,14 +38,12 @@ func IsTerminal(fd int) bool {
// restored.
func MakeRaw(fd int) (*State, error) {
var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
+ if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
+ return nil, err
}
- raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
- if e != 0 {
- return nil, error(e)
+ raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil {
+ return nil, err
}
return &State{st}, nil
}
@@ -99,9 +52,8 @@ func MakeRaw(fd int) (*State, error) {
// restore the terminal after a signal.
func GetState(fd int) (*State, error) {
var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
+ if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
+ return nil, err
}
return &State{st}, nil
}
@@ -109,18 +61,18 @@ func GetState(fd int) (*State, error) {
// Restore restores the terminal connected to the given file descriptor to a
// previous state.
func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
- return err
+ return windows.SetConsoleMode(windows.Handle(fd), state.mode)
}
-// GetSize returns the dimensions of the given terminal.
+// GetSize returns the visible dimensions of the given terminal.
+//
+// These dimensions don't include any scrollback buffer height.
func GetSize(fd int) (width, height int, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return 0, 0, error(e)
+ var info windows.ConsoleScreenBufferInfo
+ if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil {
+ return 0, 0, err
}
- return int(info.size.x), int(info.size.y), nil
+ return int(info.Window.Right - info.Window.Left + 1), int(info.Window.Bottom - info.Window.Top + 1), nil
}
// ReadPassword reads a line of input from a terminal without local echo. This
@@ -128,47 +80,26 @@ func GetSize(fd int) (width, height int, err error) {
// returned does not include the \n.
func ReadPassword(fd int) ([]byte, error) {
var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
+ if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil {
+ return nil, err
}
old := st
- st &^= (enableEchoInput)
- st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
+ st &^= (windows.ENABLE_ECHO_INPUT)
+ st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT)
+ if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
+ return nil, err
}
- defer func() {
- syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
- }()
+ defer windows.SetConsoleMode(windows.Handle(fd), old)
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(syscall.Handle(fd), buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- if n > 0 && buf[n-1] == '\r' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
+ var h windows.Handle
+ p, _ := windows.GetCurrentProcess()
+ if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil {
+ return nil, err
}
- return ret, nil
+ f := os.NewFile(uintptr(h), "stdin")
+ defer f.Close()
+ return readPasswordLine(f)
}
diff --git a/src/vendor/golang.org/x/net/context/context.go b/src/vendor/golang.org/x/net/context/context.go
index 134654cf7..a3c021d3f 100644
--- a/src/vendor/golang.org/x/net/context/context.go
+++ b/src/vendor/golang.org/x/net/context/context.go
@@ -5,9 +5,11 @@
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
+// As of Go 1.7 this package is available in the standard library under the
+// name context. https://golang.org/pkg/context.
//
// Incoming requests to a server should create a Context, and outgoing calls to
-// servers should accept a Context. The chain of function calls between must
+// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
@@ -16,14 +18,14 @@
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
-// explicitly to each function that needs it. The Context should be the first
+// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
-// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
@@ -36,112 +38,15 @@
// Contexts.
package context // import "golang.org/x/net/context"
-import "time"
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- //
- // WithCancel arranges for Done to be closed when cancel is called;
- // WithDeadline arranges for Done to be closed when the deadline
- // expires; WithTimeout arranges for Done to be closed when the timeout
- // elapses.
- //
- // Done is provided for use in select statements:
- //
- // // Stream generates values with DoSomething and sends them to out
- // // until DoSomething returns an error or ctx.Done is closed.
- // func Stream(ctx context.Context, out chan<- Value) error {
- // for {
- // v, err := DoSomething(ctx)
- // if err != nil {
- // return err
- // }
- // select {
- // case <-ctx.Done():
- // return ctx.Err()
- // case out <- v:
- // }
- // }
- // }
- //
- // See http://blog.golang.org/pipelines for more examples of how to use
- // a Done channel for cancelation.
- Done() <-chan struct{}
-
- // Err returns a non-nil error value after Done is closed. Err returns
- // Canceled if the context was canceled or DeadlineExceeded if the
- // context's deadline passed. No other values for Err are defined.
- // After Done is closed, successive calls to Err return the same value.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- //
- // A key identifies a specific value in a Context. Functions that wish
- // to store values in Context typically allocate a key in a global
- // variable then use that key as the argument to context.WithValue and
- // Context.Value. A key can be any type that supports equality;
- // packages should define keys as an unexported type to avoid
- // collisions.
- //
- // Packages that define a Context key should provide type-safe accessors
- // for the values stores using that key:
- //
- // // Package user defines a User type that's stored in Contexts.
- // package user
- //
- // import "golang.org/x/net/context"
- //
- // // User is the type of value stored in the Contexts.
- // type User struct {...}
- //
- // // key is an unexported type for keys defined in this package.
- // // This prevents collisions with keys defined in other packages.
- // type key int
- //
- // // userKey is the key for user.User values in Contexts. It is
- // // unexported; clients use user.NewContext and user.FromContext
- // // instead of using this key directly.
- // var userKey key = 0
- //
- // // NewContext returns a new Context that carries value u.
- // func NewContext(ctx context.Context, u *User) context.Context {
- // return context.WithValue(ctx, userKey, u)
- // }
- //
- // // FromContext returns the User value stored in ctx, if any.
- // func FromContext(ctx context.Context) (*User, bool) {
- // u, ok := ctx.Value(userKey).(*User)
- // return u, ok
- // }
- Value(key interface{}) interface{}
-}
-
// Background returns a non-nil, empty Context. It is never canceled, has no
-// values, and has no deadline. It is typically used by the main function,
+// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
-// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
@@ -149,8 +54,3 @@ func Background() Context {
func TODO() Context {
return todo
}
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
diff --git a/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
index 606cf1f97..37dc0cfdb 100644
--- a/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
+++ b/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -2,18 +2,15 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build go1.7
-
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
import (
+ "context"
"io"
"net/http"
"net/url"
"strings"
-
- "golang.org/x/net/context"
)
// Do sends an HTTP request with the provided http.Client and returns
diff --git a/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
deleted file mode 100644
index 926870cc2..000000000
--- a/src/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.7
-
-package ctxhttp // import "golang.org/x/net/context/ctxhttp"
-
-import (
- "io"
- "net/http"
- "net/url"
- "strings"
-
- "golang.org/x/net/context"
-)
-
-func nop() {}
-
-var (
- testHookContextDoneBeforeHeaders = nop
- testHookDoReturned = nop
- testHookDidBodyClose = nop
-)
-
-// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
-// If the client is nil, http.DefaultClient is used.
-// If the context is canceled or times out, ctx.Err() will be returned.
-func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
- if client == nil {
- client = http.DefaultClient
- }
-
- // TODO(djd): Respect any existing value of req.Cancel.
- cancel := make(chan struct{})
- req.Cancel = cancel
-
- type responseAndError struct {
- resp *http.Response
- err error
- }
- result := make(chan responseAndError, 1)
-
- // Make local copies of test hooks closed over by goroutines below.
- // Prevents data races in tests.
- testHookDoReturned := testHookDoReturned
- testHookDidBodyClose := testHookDidBodyClose
-
- go func() {
- resp, err := client.Do(req)
- testHookDoReturned()
- result <- responseAndError{resp, err}
- }()
-
- var resp *http.Response
-
- select {
- case <-ctx.Done():
- testHookContextDoneBeforeHeaders()
- close(cancel)
- // Clean up after the goroutine calling client.Do:
- go func() {
- if r := <-result; r.resp != nil {
- testHookDidBodyClose()
- r.resp.Body.Close()
- }
- }()
- return nil, ctx.Err()
- case r := <-result:
- var err error
- resp, err = r.resp, r.err
- if err != nil {
- return resp, err
- }
- }
-
- c := make(chan struct{})
- go func() {
- select {
- case <-ctx.Done():
- close(cancel)
- case <-c:
- // The response's Body is closed.
- }
- }()
- resp.Body = ¬ifyingReader{resp.Body, c}
-
- return resp, nil
-}
-
-// Get issues a GET request via the Do function.
-func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Head issues a HEAD request via the Do function.
-func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
- req, err := http.NewRequest("HEAD", url, nil)
- if err != nil {
- return nil, err
- }
- return Do(ctx, client, req)
-}
-
-// Post issues a POST request via the Do function.
-func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
- req, err := http.NewRequest("POST", url, body)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", bodyType)
- return Do(ctx, client, req)
-}
-
-// PostForm issues a POST request via the Do function.
-func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
- return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
-}
-
-// notifyingReader is an io.ReadCloser that closes the notify channel after
-// Close is called or a Read fails on the underlying ReadCloser.
-type notifyingReader struct {
- io.ReadCloser
- notify chan<- struct{}
-}
-
-func (r *notifyingReader) Read(p []byte) (int, error) {
- n, err := r.ReadCloser.Read(p)
- if err != nil && r.notify != nil {
- close(r.notify)
- r.notify = nil
- }
- return n, err
-}
-
-func (r *notifyingReader) Close() error {
- err := r.ReadCloser.Close()
- if r.notify != nil {
- close(r.notify)
- r.notify = nil
- }
- return err
-}
diff --git a/src/vendor/golang.org/x/net/context/go17.go b/src/vendor/golang.org/x/net/context/go17.go
index f8cda19ad..d20f52b7d 100644
--- a/src/vendor/golang.org/x/net/context/go17.go
+++ b/src/vendor/golang.org/x/net/context/go17.go
@@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
diff --git a/src/vendor/golang.org/x/net/context/go19.go b/src/vendor/golang.org/x/net/context/go19.go
new file mode 100644
index 000000000..d88bd1db1
--- /dev/null
+++ b/src/vendor/golang.org/x/net/context/go19.go
@@ -0,0 +1,20 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.9
+
+package context
+
+import "context" // standard library's context, as of Go 1.7
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context = context.Context
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
diff --git a/src/vendor/golang.org/x/net/context/pre_go17.go b/src/vendor/golang.org/x/net/context/pre_go17.go
index 5a30acabd..0f35592df 100644
--- a/src/vendor/golang.org/x/net/context/pre_go17.go
+++ b/src/vendor/golang.org/x/net/context/pre_go17.go
@@ -13,7 +13,7 @@ import (
"time"
)
-// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
@@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) {
}
// parentCancelCtx follows a chain of parent references until it finds a
-// *cancelCtx. This function understands how each of the concrete types in this
+// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
@@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) {
p.mu.Unlock()
}
-// A canceler is a context type that can be canceled directly. The
+// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
-// A cancelCtx can be canceled. When canceled, it also cancels any children
+// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
@@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) {
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
@@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
return c, func() { c.cancel(true, Canceled) }
}
-// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
-// implement Done and Err. It implements cancel by stopping its timer then
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
@@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
-// A valueCtx carries a key-value pair. It implements Value for that key and
+// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
diff --git a/src/vendor/golang.org/x/net/context/pre_go19.go b/src/vendor/golang.org/x/net/context/pre_go19.go
new file mode 100644
index 000000000..b105f80be
--- /dev/null
+++ b/src/vendor/golang.org/x/net/context/pre_go19.go
@@ -0,0 +1,109 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.9
+
+package context
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ //
+ // WithCancel arranges for Done to be closed when cancel is called;
+ // WithDeadline arranges for Done to be closed when the deadline
+ // expires; WithTimeout arranges for Done to be closed when the timeout
+ // elapses.
+ //
+ // Done is provided for use in select statements:
+ //
+ // // Stream generates values with DoSomething and sends them to out
+ // // until DoSomething returns an error or ctx.Done is closed.
+ // func Stream(ctx context.Context, out chan<- Value) error {
+ // for {
+ // v, err := DoSomething(ctx)
+ // if err != nil {
+ // return err
+ // }
+ // select {
+ // case <-ctx.Done():
+ // return ctx.Err()
+ // case out <- v:
+ // }
+ // }
+ // }
+ //
+ // See http://blog.golang.org/pipelines for more examples of how to use
+ // a Done channel for cancelation.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ //
+ // A key identifies a specific value in a Context. Functions that wish
+ // to store values in Context typically allocate a key in a global
+ // variable then use that key as the argument to context.WithValue and
+ // Context.Value. A key can be any type that supports equality;
+ // packages should define keys as an unexported type to avoid
+ // collisions.
+ //
+ // Packages that define a Context key should provide type-safe accessors
+ // for the values stores using that key:
+ //
+ // // Package user defines a User type that's stored in Contexts.
+ // package user
+ //
+ // import "golang.org/x/net/context"
+ //
+ // // User is the type of value stored in the Contexts.
+ // type User struct {...}
+ //
+ // // key is an unexported type for keys defined in this package.
+ // // This prevents collisions with keys defined in other packages.
+ // type key int
+ //
+ // // userKey is the key for user.User values in Contexts. It is
+ // // unexported; clients use user.NewContext and user.FromContext
+ // // instead of using this key directly.
+ // var userKey key = 0
+ //
+ // // NewContext returns a new Context that carries value u.
+ // func NewContext(ctx context.Context, u *User) context.Context {
+ // return context.WithValue(ctx, userKey, u)
+ // }
+ //
+ // // FromContext returns the User value stored in ctx, if any.
+ // func FromContext(ctx context.Context) (*User, bool) {
+ // u, ok := ctx.Value(userKey).(*User)
+ // return u, ok
+ // }
+ Value(key interface{}) interface{}
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/src/vendor/golang.org/x/net/http/httpguts/guts.go b/src/vendor/golang.org/x/net/http/httpguts/guts.go
new file mode 100644
index 000000000..e6cd0ced3
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http/httpguts/guts.go
@@ -0,0 +1,50 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httpguts provides functions implementing various details
+// of the HTTP specification.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httpguts
+
+import (
+ "net/textproto"
+ "strings"
+)
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See RFC 7230, Section 4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = textproto.CanonicalMIMEHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
diff --git a/src/vendor/golang.org/x/net/lex/httplex/httplex.go b/src/vendor/golang.org/x/net/http/httpguts/httplex.go
similarity index 91%
rename from src/vendor/golang.org/x/net/lex/httplex/httplex.go
rename to src/vendor/golang.org/x/net/http/httpguts/httplex.go
index bd0ec24f4..e7de24ee6 100644
--- a/src/vendor/golang.org/x/net/lex/httplex/httplex.go
+++ b/src/vendor/golang.org/x/net/http/httpguts/httplex.go
@@ -2,16 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package httplex contains rules around lexical matters of various
-// HTTP-related specifications.
-//
-// This package is shared by the standard library (which vendors it)
-// and x/net/http2. It comes with no API stability promise.
-package httplex
+package httpguts
import (
+ "net"
"strings"
"unicode/utf8"
+
+ "golang.org/x/net/idna"
)
var isTokenTable = [127]bool{
@@ -310,3 +308,39 @@ func ValidHeaderFieldValue(v string) bool {
}
return true
}
+
+func isASCII(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
+
+// PunycodeHostPort returns the IDNA Punycode version
+// of the provided "host" or "host:port" string.
+func PunycodeHostPort(v string) (string, error) {
+ if isASCII(v) {
+ return v, nil
+ }
+
+ host, port, err := net.SplitHostPort(v)
+ if err != nil {
+ // The input 'v' argument was just a "host" argument,
+ // without a port. This error should not be returned
+ // to the caller.
+ host = v
+ port = ""
+ }
+ host, err = idna.ToASCII(host)
+ if err != nil {
+ // Non-UTF-8? Not representable in Punycode, in any
+ // case.
+ return "", err
+ }
+ if port == "" {
+ return host, nil
+ }
+ return net.JoinHostPort(host, port), nil
+}
diff --git a/src/vendor/golang.org/x/net/http2/ciphers.go b/src/vendor/golang.org/x/net/http2/ciphers.go
new file mode 100644
index 000000000..c9a0cf3b4
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http2/ciphers.go
@@ -0,0 +1,641 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+// A list of the possible cipher suite ids. Taken from
+// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
+
+const (
+ cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
+ cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
+ cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
+ cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
+ cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
+ cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
+ // Reserved uint16 = 0x001C-1D
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
+ cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
+ cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
+ cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
+ cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
+ // Reserved uint16 = 0x0047-4F
+ // Reserved uint16 = 0x0050-58
+ // Reserved uint16 = 0x0059-5C
+ // Unassigned uint16 = 0x005D-5F
+ // Reserved uint16 = 0x0060-66
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
+ // Unassigned uint16 = 0x006E-83
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
+ cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
+ cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
+ cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
+ cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
+ cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
+ cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
+ cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
+ cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
+ cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
+ // Unassigned uint16 = 0x00C6-FE
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
+ // Unassigned uint16 = 0x01-55,*
+ cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
+ // Unassigned uint16 = 0x5601 - 0xC000
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
+ cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
+ cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
+ cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
+ cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
+ cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
+ cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
+ cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
+ cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
+ cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
+ cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
+ cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
+ cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
+ cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
+ cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
+ cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
+ // Unassigned uint16 = 0xC0B0-FF
+ // Unassigned uint16 = 0xC1-CB,*
+ // Unassigned uint16 = 0xCC00-A7
+ cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
+ cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
+ cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
+ cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
+ cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
+ cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
+ cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
+)
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+// References:
+// https://tools.ietf.org/html/rfc7540#appendix-A
+// Reject cipher suites from Appendix A.
+// "This list includes those cipher suites that do not
+// offer an ephemeral key exchange and those that are
+// based on the TLS null, stream or block cipher type"
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case cipher_TLS_NULL_WITH_NULL_NULL,
+ cipher_TLS_RSA_WITH_NULL_MD5,
+ cipher_TLS_RSA_WITH_NULL_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_MD5,
+ cipher_TLS_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
+ cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_DH_anon_WITH_RC4_128_MD5,
+ cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_SHA,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_KRB5_WITH_RC4_128_SHA,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
+ cipher_TLS_KRB5_WITH_DES_CBC_MD5,
+ cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
+ cipher_TLS_KRB5_WITH_RC4_128_MD5,
+ cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
+ cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
+ cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
+ cipher_TLS_PSK_WITH_NULL_SHA,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_NULL_SHA256,
+ cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
+ cipher_TLS_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
+ cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
+ cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
+ cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_NULL_SHA256,
+ cipher_TLS_PSK_WITH_NULL_SHA384,
+ cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
+ cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
+ cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
+ cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_NULL_SHA,
+ cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
+ cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
+ cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
+ cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
+ cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
+ cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
+ cipher_TLS_RSA_WITH_AES_128_CCM,
+ cipher_TLS_RSA_WITH_AES_256_CCM,
+ cipher_TLS_RSA_WITH_AES_128_CCM_8,
+ cipher_TLS_RSA_WITH_AES_256_CCM_8,
+ cipher_TLS_PSK_WITH_AES_128_CCM,
+ cipher_TLS_PSK_WITH_AES_256_CCM,
+ cipher_TLS_PSK_WITH_AES_128_CCM_8,
+ cipher_TLS_PSK_WITH_AES_256_CCM_8:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/vendor/golang.org/x/net/http2/client_conn_pool.go b/src/vendor/golang.org/x/net/http2/client_conn_pool.go
index b13941258..f4d9b5ece 100644
--- a/src/vendor/golang.org/x/net/http2/client_conn_pool.go
+++ b/src/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -52,9 +52,31 @@ const (
noDialOnMiss = false
)
+// shouldTraceGetConn reports whether getClientConn should call any
+// ClientTrace.GetConn hook associated with the http.Request.
+//
+// This complexity is needed to avoid double calls of the GetConn hook
+// during the back-and-forth between net/http and x/net/http2 (when the
+// net/http.Transport is upgraded to also speak http2), as well as support
+// the case where x/net/http2 is being used directly.
+func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
+ // If our Transport wasn't made via ConfigureTransport, always
+ // trace the GetConn hook if provided, because that means the
+ // http2 package is being used directly and it's the one
+ // dialing, as opposed to net/http.
+ if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
+ return true
+ }
+ // Otherwise, only use the GetConn hook if this connection has
+ // been used previously for other requests. For fresh
+ // connections, the net/http package does the dialing.
+ return !st.freshConn
+}
+
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
if isConnectionCloseRequest(req) && dialOnMiss {
// It gets its own connection.
+ traceGetConn(req, addr)
const singleUse = true
cc, err := p.t.dialClientConn(addr, singleUse)
if err != nil {
@@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
}
p.mu.Lock()
for _, cc := range p.conns[addr] {
- if cc.CanTakeNewRequest() {
+ if st := cc.idleState(); st.canTakeNewRequest {
+ if p.shouldTraceGetConn(st) {
+ traceGetConn(req, addr)
+ }
p.mu.Unlock()
return cc, nil
}
@@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
p.mu.Unlock()
return nil, ErrNoCachedConn
}
+ traceGetConn(req, addr)
call := p.getStartDialLocked(addr)
p.mu.Unlock()
<-call.done
@@ -247,7 +273,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
}
// noDialClientConnPool is an implementation of http2.ClientConnPool
-// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
// connection instead.
type noDialClientConnPool struct{ *clientConnPool }
diff --git a/src/vendor/golang.org/x/net/http2/configure_transport.go b/src/vendor/golang.org/x/net/http2/configure_transport.go
deleted file mode 100644
index 4f720f530..000000000
--- a/src/vendor/golang.org/x/net/http2/configure_transport.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.6
-
-package http2
-
-import (
- "crypto/tls"
- "fmt"
- "net/http"
-)
-
-func configureTransport(t1 *http.Transport) (*Transport, error) {
- connPool := new(clientConnPool)
- t2 := &Transport{
- ConnPool: noDialClientConnPool{connPool},
- t1: t1,
- }
- connPool.t = t2
- if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
- return nil, err
- }
- if t1.TLSClientConfig == nil {
- t1.TLSClientConfig = new(tls.Config)
- }
- if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
- t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
- }
- if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
- t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
- }
- upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
- addr := authorityAddr("https", authority)
- if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
- go c.Close()
- return erringRoundTripper{err}
- } else if !used {
- // Turns out we don't need this c.
- // For example, two goroutines made requests to the same host
- // at the same time, both kicking off TCP dials. (since protocol
- // was unknown)
- go c.Close()
- }
- return t2
- }
- if m := t1.TLSNextProto; len(m) == 0 {
- t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
- "h2": upgradeFn,
- }
- } else {
- m["h2"] = upgradeFn
- }
- return t2, nil
-}
-
-// registerHTTPSProtocol calls Transport.RegisterProtocol but
-// convering panics into errors.
-func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
- defer func() {
- if e := recover(); e != nil {
- err = fmt.Errorf("%v", e)
- }
- }()
- t.RegisterProtocol("https", rt)
- return nil
-}
-
-// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
-// if there's already has a cached connection to the host.
-type noDialH2RoundTripper struct{ t *Transport }
-
-func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- res, err := rt.t.RoundTrip(req)
- if err == ErrNoCachedConn {
- return nil, http.ErrSkipAltProtocol
- }
- return res, err
-}
diff --git a/src/vendor/golang.org/x/net/http2/databuffer.go b/src/vendor/golang.org/x/net/http2/databuffer.go
new file mode 100644
index 000000000..a3067f8de
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http2/databuffer.go
@@ -0,0 +1,146 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+)
+
+// Buffer chunks are allocated from a pool to reduce pressure on GC.
+// The maximum wasted space per dataBuffer is 2x the largest size class,
+// which happens when the dataBuffer has multiple chunks and there is
+// one unread byte in both the first and last chunks. We use a few size
+// classes to minimize overheads for servers that typically receive very
+// small request bodies.
+//
+// TODO: Benchmark to determine if the pools are necessary. The GC may have
+// improved enough that we can instead allocate chunks like this:
+// make([]byte, max(16<<10, expectedBytesRemaining))
+var (
+ dataChunkSizeClasses = []int{
+ 1 << 10,
+ 2 << 10,
+ 4 << 10,
+ 8 << 10,
+ 16 << 10,
+ }
+ dataChunkPools = [...]sync.Pool{
+ {New: func() interface{} { return make([]byte, 1<<10) }},
+ {New: func() interface{} { return make([]byte, 2<<10) }},
+ {New: func() interface{} { return make([]byte, 4<<10) }},
+ {New: func() interface{} { return make([]byte, 8<<10) }},
+ {New: func() interface{} { return make([]byte, 16<<10) }},
+ }
+)
+
+func getDataBufferChunk(size int64) []byte {
+ i := 0
+ for ; i < len(dataChunkSizeClasses)-1; i++ {
+ if size <= int64(dataChunkSizeClasses[i]) {
+ break
+ }
+ }
+ return dataChunkPools[i].Get().([]byte)
+}
+
+func putDataBufferChunk(p []byte) {
+ for i, n := range dataChunkSizeClasses {
+ if len(p) == n {
+ dataChunkPools[i].Put(p)
+ return
+ }
+ }
+ panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
+}
+
+// dataBuffer is an io.ReadWriter backed by a list of data chunks.
+// Each dataBuffer is used to read DATA frames on a single stream.
+// The buffer is divided into chunks so the server can limit the
+// total memory used by a single connection without limiting the
+// request body size on any single stream.
+type dataBuffer struct {
+ chunks [][]byte
+ r int // next byte to read is chunks[0][r]
+ w int // next byte to write is chunks[len(chunks)-1][w]
+ size int // total buffered bytes
+ expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
+}
+
+var errReadEmpty = errors.New("read from empty dataBuffer")
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *dataBuffer) Read(p []byte) (int, error) {
+ if b.size == 0 {
+ return 0, errReadEmpty
+ }
+ var ntotal int
+ for len(p) > 0 && b.size > 0 {
+ readFrom := b.bytesFromFirstChunk()
+ n := copy(p, readFrom)
+ p = p[n:]
+ ntotal += n
+ b.r += n
+ b.size -= n
+ // If the first chunk has been consumed, advance to the next chunk.
+ if b.r == len(b.chunks[0]) {
+ putDataBufferChunk(b.chunks[0])
+ end := len(b.chunks) - 1
+ copy(b.chunks[:end], b.chunks[1:])
+ b.chunks[end] = nil
+ b.chunks = b.chunks[:end]
+ b.r = 0
+ }
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) bytesFromFirstChunk() []byte {
+ if len(b.chunks) == 1 {
+ return b.chunks[0][b.r:b.w]
+ }
+ return b.chunks[0][b.r:]
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *dataBuffer) Len() int {
+ return b.size
+}
+
+// Write appends p to the buffer.
+func (b *dataBuffer) Write(p []byte) (int, error) {
+ ntotal := len(p)
+ for len(p) > 0 {
+ // If the last chunk is empty, allocate a new chunk. Try to allocate
+ // enough to fully copy p plus any additional bytes we expect to
+ // receive. However, this may allocate less than len(p).
+ want := int64(len(p))
+ if b.expected > want {
+ want = b.expected
+ }
+ chunk := b.lastChunkOrAlloc(want)
+ n := copy(chunk[b.w:], p)
+ p = p[n:]
+ b.w += n
+ b.size += n
+ b.expected -= int64(n)
+ }
+ return ntotal, nil
+}
+
+func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
+ if len(b.chunks) != 0 {
+ last := b.chunks[len(b.chunks)-1]
+ if b.w < len(last) {
+ return last
+ }
+ }
+ chunk := getDataBufferChunk(want)
+ b.chunks = append(b.chunks, chunk)
+ b.w = 0
+ return chunk
+}
diff --git a/src/vendor/golang.org/x/net/http2/errors.go b/src/vendor/golang.org/x/net/http2/errors.go
index 20fd7626a..71f2c4631 100644
--- a/src/vendor/golang.org/x/net/http2/errors.go
+++ b/src/vendor/golang.org/x/net/http2/errors.go
@@ -87,13 +87,16 @@ type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
-// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
-
+// connError represents an HTTP/2 ConnectionError error code, along
+// with a string (for debugging) explaining why.
+//
// Errors of this type are only returned by the frame parser functions
-// and converted into ConnectionError(ErrCodeProtocol).
+// and converted into ConnectionError(Code), after stashing away
+// the Reason into the Framer's errDetail field, accessible via
+// the (*Framer).ErrorDetail method.
type connError struct {
- Code ErrCode
- Reason string
+ Code ErrCode // the ConnectionError error code
+ Reason string // additional reason
}
func (e connError) Error() string {
diff --git a/src/vendor/golang.org/x/net/http2/fixed_buffer.go b/src/vendor/golang.org/x/net/http2/fixed_buffer.go
deleted file mode 100644
index 47da0f0bf..000000000
--- a/src/vendor/golang.org/x/net/http2/fixed_buffer.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "errors"
-)
-
-// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
-// It never allocates, but moves old data as new data is written.
-type fixedBuffer struct {
- buf []byte
- r, w int
-}
-
-var (
- errReadEmpty = errors.New("read from empty fixedBuffer")
- errWriteFull = errors.New("write on full fixedBuffer")
-)
-
-// Read copies bytes from the buffer into p.
-// It is an error to read when no data is available.
-func (b *fixedBuffer) Read(p []byte) (n int, err error) {
- if b.r == b.w {
- return 0, errReadEmpty
- }
- n = copy(p, b.buf[b.r:b.w])
- b.r += n
- if b.r == b.w {
- b.r = 0
- b.w = 0
- }
- return n, nil
-}
-
-// Len returns the number of bytes of the unread portion of the buffer.
-func (b *fixedBuffer) Len() int {
- return b.w - b.r
-}
-
-// Write copies bytes from p into the buffer.
-// It is an error to write more data than the buffer can hold.
-func (b *fixedBuffer) Write(p []byte) (n int, err error) {
- // Slide existing data to beginning.
- if b.r > 0 && len(p) > len(b.buf)-b.w {
- copy(b.buf, b.buf[b.r:b.w])
- b.w -= b.r
- b.r = 0
- }
-
- // Write new data.
- n = copy(b.buf[b.w:], p)
- b.w += n
- if n < len(p) {
- err = errWriteFull
- }
- return n, err
-}
diff --git a/src/vendor/golang.org/x/net/http2/flow.go b/src/vendor/golang.org/x/net/http2/flow.go
index 957de2542..cea601fcd 100644
--- a/src/vendor/golang.org/x/net/http2/flow.go
+++ b/src/vendor/golang.org/x/net/http2/flow.go
@@ -41,10 +41,10 @@ func (f *flow) take(n int32) {
// add adds n bytes (positive or negative) to the flow control window.
// It returns false if the sum would exceed 2^31-1.
func (f *flow) add(n int32) bool {
- remain := (1<<31 - 1) - f.n
- if n > remain {
- return false
+ sum := f.n + n
+ if (sum > n) == (f.n > 0) {
+ f.n = sum
+ return true
}
- f.n += n
- return true
+ return false
}
diff --git a/src/vendor/golang.org/x/net/http2/frame.go b/src/vendor/golang.org/x/net/http2/frame.go
index c9b09bb67..514c126c5 100644
--- a/src/vendor/golang.org/x/net/http2/frame.go
+++ b/src/vendor/golang.org/x/net/http2/frame.go
@@ -14,8 +14,8 @@ import (
"strings"
"sync"
+ "golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
- "golang.org/x/net/lex/httplex"
)
const frameHeaderLen = 9
@@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
// a frameParser parses a frame given its FrameHeader and payload
// bytes. The length of payload will always equal fh.Length (which
// might be 0).
-type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
var frameParsers = map[FrameType]frameParser{
FrameData: parseDataFrame,
@@ -312,15 +312,19 @@ type Framer struct {
MaxHeaderListSize uint32
// TODO: track which type of frame & with which flags was sent
- // last. Then return an error (unless AllowIllegalWrites) if
+ // last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is
// attempted to be written.
- logReads bool
+ logReads, logWrites bool
- debugFramer *Framer // only use for logging written writes
- debugFramerBuf *bytes.Buffer
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+ debugReadLoggerf func(string, ...interface{})
+ debugWriteLoggerf func(string, ...interface{})
+
+ frameCache *frameCache // nil if frames aren't reused (default)
}
func (fr *Framer) maxHeaderListSize() uint32 {
@@ -355,7 +359,7 @@ func (f *Framer) endWrite() error {
byte(length>>16),
byte(length>>8),
byte(length))
- if logFrameWrites {
+ if f.logWrites {
f.logWrite()
}
@@ -378,10 +382,10 @@ func (f *Framer) logWrite() {
f.debugFramerBuf.Write(f.wbuf)
fr, err := f.debugFramer.ReadFrame()
if err != nil {
- log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
return
}
- log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+ f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
}
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
@@ -396,12 +400,36 @@ const (
maxFrameSize = 1<<24 - 1
)
+// SetReuseFrames allows the Framer to reuse Frames.
+// If called on a Framer, Frames returned by calls to ReadFrame are only
+// valid until the next call to ReadFrame.
+func (fr *Framer) SetReuseFrames() {
+ if fr.frameCache != nil {
+ return
+ }
+ fr.frameCache = &frameCache{}
+}
+
+type frameCache struct {
+ dataFrame DataFrame
+}
+
+func (fc *frameCache) getDataFrame() *DataFrame {
+ if fc == nil {
+ return &DataFrame{}
+ }
+ return &fc.dataFrame
+}
+
// NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
- w: w,
- r: r,
- logReads: logFrameReads,
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ logWrites: logFrameWrites,
+ debugReadLoggerf: log.Printf,
+ debugWriteLoggerf: log.Printf,
}
fr.getReadBuf = func(size uint32) []byte {
if cap(fr.readBuf) >= int(size) {
@@ -472,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
if _, err := io.ReadFull(fr.r, payload); err != nil {
return nil, err
}
- f, err := typeFrameParser(fh.Type)(fh, payload)
+ f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
if err != nil {
if ce, ok := err.(connError); ok {
return nil, fr.connError(ce.Code, ce.Reason)
@@ -483,7 +511,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return nil, err
}
if fr.logReads {
- log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
}
if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
return fr.readMetaFrame(f.(*HeadersFrame))
@@ -560,7 +588,7 @@ func (f *DataFrame) Data() []byte {
return f.data
}
-func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
// DATA frames MUST be associated with a stream. If a
// DATA frame is received whose stream identifier
@@ -569,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// PROTOCOL_ERROR.
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
}
- f := &DataFrame{
- FrameHeader: fh,
- }
+ f := fc.getDataFrame()
+ f.FrameHeader = fh
+
var padSize byte
if fh.Flags.Has(FlagDataPadded) {
var err error
@@ -595,6 +623,7 @@ var (
errStreamID = errors.New("invalid stream ID")
errDepStreamID = errors.New("invalid dependent stream ID")
errPadLength = errors.New("pad length too large")
+ errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
)
func validStreamIDOrZero(streamID uint32) bool {
@@ -614,10 +643,11 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
return f.WriteDataPadded(streamID, endStream, data, nil)
}
-// WriteData writes a DATA frame with optional padding.
+// WriteDataPadded writes a DATA frame with optional padding.
//
// If pad is nil, the padding bit is not sent.
// The length of pad must not exceed 255 bytes.
+// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility not to violate the maximum frame size
@@ -626,8 +656,18 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID
}
- if len(pad) > 255 {
- return errPadLength
+ if len(pad) > 0 {
+ if len(pad) > 255 {
+ return errPadLength
+ }
+ if !f.AllowIllegalWrites {
+ for _, b := range pad {
+ if b != 0 {
+ // "Padding octets MUST be set to zero when sending."
+ return errPadBytes
+ }
+ }
+ }
}
var flags Flags
if endStream {
@@ -655,10 +695,10 @@ type SettingsFrame struct {
p []byte
}
-func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
// When this (ACK 0x1) bit is set, the payload of the
- // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame MUST be empty. Receipt of a
// SETTINGS frame with the ACK flag set and a length
// field value other than 0 MUST be treated as a
// connection error (Section 5.4.1) of type
@@ -667,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
}
if fh.StreamID != 0 {
// SETTINGS frames always apply to a connection,
- // never a single stream. The stream identifier for a
+ // never a single stream. The stream identifier for a
// SETTINGS frame MUST be zero (0x0). If an endpoint
// receives a SETTINGS frame whose stream identifier
// field is anything other than 0x0, the endpoint MUST
@@ -693,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool {
return f.FrameHeader.Flags.Has(FlagSettingsAck)
}
-func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) {
f.checkValid()
- buf := f.p
- for len(buf) > 0 {
- settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
- if settingID == s {
- return binary.BigEndian.Uint32(buf[2:6]), true
+ for i := 0; i < f.NumSettings(); i++ {
+ if s := f.Setting(i); s.ID == id {
+ return s.Val, true
}
- buf = buf[6:]
}
return 0, false
}
+// Setting returns the setting from the frame at the given 0-based index.
+// The index must be >= 0 and less than f.NumSettings().
+func (f *SettingsFrame) Setting(i int) Setting {
+ buf := f.p
+ return Setting{
+ ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
+ Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
+ }
+}
+
+func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 }
+
+// HasDuplicates reports whether f contains any duplicate setting IDs.
+func (f *SettingsFrame) HasDuplicates() bool {
+ num := f.NumSettings()
+ if num == 0 {
+ return false
+ }
+ // If it's small enough (the common case), just do the n^2
+ // thing and avoid a map allocation.
+ if num < 10 {
+ for i := 0; i < num; i++ {
+ idi := f.Setting(i).ID
+ for j := i + 1; j < num; j++ {
+ idj := f.Setting(j).ID
+ if idi == idj {
+ return true
+ }
+ }
+ }
+ return false
+ }
+ seen := map[SettingID]bool{}
+ for i := 0; i < num; i++ {
+ id := f.Setting(i).ID
+ if seen[id] {
+ return true
+ }
+ seen[id] = true
+ }
+ return false
+}
+
// ForeachSetting runs fn for each setting.
// It stops and returns the first error.
func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
f.checkValid()
- buf := f.p
- for len(buf) > 0 {
- if err := fn(Setting{
- SettingID(binary.BigEndian.Uint16(buf[:2])),
- binary.BigEndian.Uint32(buf[2:6]),
- }); err != nil {
+ for i := 0; i < f.NumSettings(); i++ {
+ if err := fn(f.Setting(i)); err != nil {
return err
}
- buf = buf[6:]
}
return nil
}
@@ -737,7 +812,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error {
return f.endWrite()
}
-// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
+// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
//
// It will perform exactly one Write to the underlying Writer.
// It is the caller's responsibility to not call other Write methods concurrently.
@@ -757,7 +832,7 @@ type PingFrame struct {
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
-func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@@ -797,7 +872,7 @@ func (f *GoAwayFrame) DebugData() []byte {
return f.debugData
}
-func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID != 0 {
return nil, ConnectionError(ErrCodeProtocol)
}
@@ -837,7 +912,7 @@ func (f *UnknownFrame) Payload() []byte {
return f.p
}
-func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
return &UnknownFrame{fh, p}, nil
}
@@ -848,7 +923,7 @@ type WindowUpdateFrame struct {
Increment uint32 // never read with high bit set
}
-func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@@ -913,12 +988,12 @@ func (f *HeadersFrame) HasPriority() bool {
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
}
-func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
hf := &HeadersFrame{
FrameHeader: fh,
}
if fh.StreamID == 0 {
- // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
@@ -1040,7 +1115,7 @@ type PriorityParam struct {
Exclusive bool
// Weight is the stream's zero-indexed weight. It should be
- // set together with StreamDep, or neither should be set. Per
+ // set together with StreamDep, or neither should be set. Per
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
@@ -1050,7 +1125,7 @@ func (p PriorityParam) IsZero() bool {
return p == PriorityParam{}
}
-func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
@@ -1097,7 +1172,7 @@ type RSTStreamFrame struct {
ErrCode ErrCode
}
-func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if len(p) != 4 {
return nil, ConnectionError(ErrCodeFrameSize)
}
@@ -1127,7 +1202,7 @@ type ContinuationFrame struct {
headerFragBuf []byte
}
-func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
}
@@ -1177,7 +1252,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
}
-func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
pp := &PushPromiseFrame{
FrameHeader: fh,
}
@@ -1402,7 +1477,7 @@ func (fr *Framer) maxHeaderStringLen() int {
}
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
-// merge them into into the provided hf and returns a MetaHeadersFrame
+// merge them into the provided hf and returns a MetaHeadersFrame
// with the decoded hpack values.
func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
if fr.AllowIllegalReads {
@@ -1419,10 +1494,10 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
hdec.SetEmitEnabled(true)
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
- if VerboseLogs && logFrameReads {
- log.Printf("http2: decoded hpack field %+v", hf)
+ if VerboseLogs && fr.logReads {
+ fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
}
- if !httplex.ValidHeaderFieldValue(hf.Value) {
+ if !httpguts.ValidHeaderFieldValue(hf.Value) {
invalid = headerFieldValueError(hf.Value)
}
isPseudo := strings.HasPrefix(hf.Name, ":")
diff --git a/src/vendor/golang.org/x/net/http2/go111.go b/src/vendor/golang.org/x/net/http2/go111.go
new file mode 100644
index 000000000..3a131016b
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http2/go111.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.11
+
+package http2
+
+import (
+ "net/http/httptrace"
+ "net/textproto"
+)
+
+func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
+ return trace != nil && trace.WroteHeaderField != nil
+}
+
+func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(k, []string{v})
+ }
+}
+
+func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ if trace != nil {
+ return trace.Got1xxResponse
+ }
+ return nil
+}
diff --git a/src/vendor/golang.org/x/net/http2/go16.go b/src/vendor/golang.org/x/net/http2/go16.go
deleted file mode 100644
index 2b72855f5..000000000
--- a/src/vendor/golang.org/x/net/http2/go16.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.6
-
-package http2
-
-import (
- "crypto/tls"
- "net/http"
- "time"
-)
-
-func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
- return t1.ExpectContinueTimeout
-}
-
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-func isBadCipher(cipher uint16) bool {
- switch cipher {
- case tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
- // Reject cipher suites from Appendix A.
- // "This list includes those cipher suites that do not
- // offer an ephemeral key exchange and those that are
- // based on the TLS null, stream or block cipher type"
- return true
- default:
- return false
- }
-}
diff --git a/src/vendor/golang.org/x/net/http2/go17.go b/src/vendor/golang.org/x/net/http2/go17.go
deleted file mode 100644
index 730319dd5..000000000
--- a/src/vendor/golang.org/x/net/http2/go17.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.7
-
-package http2
-
-import (
- "context"
- "net"
- "net/http"
- "net/http/httptrace"
- "time"
-)
-
-type contextContext interface {
- context.Context
-}
-
-func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
- ctx, cancel = context.WithCancel(context.Background())
- ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
- if hs := opts.baseConfig(); hs != nil {
- ctx = context.WithValue(ctx, http.ServerContextKey, hs)
- }
- return
-}
-
-func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
- return context.WithCancel(ctx)
-}
-
-func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
- return req.WithContext(ctx)
-}
-
-type clientTrace httptrace.ClientTrace
-
-func reqContext(r *http.Request) context.Context { return r.Context() }
-
-func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
-
-func traceGotConn(req *http.Request, cc *ClientConn) {
- trace := httptrace.ContextClientTrace(req.Context())
- if trace == nil || trace.GotConn == nil {
- return
- }
- ci := httptrace.GotConnInfo{Conn: cc.tconn}
- cc.mu.Lock()
- ci.Reused = cc.nextStreamID > 1
- ci.WasIdle = len(cc.streams) == 0 && ci.Reused
- if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = time.Now().Sub(cc.lastActive)
- }
- cc.mu.Unlock()
-
- trace.GotConn(ci)
-}
-
-func traceWroteHeaders(trace *clientTrace) {
- if trace != nil && trace.WroteHeaders != nil {
- trace.WroteHeaders()
- }
-}
-
-func traceGot100Continue(trace *clientTrace) {
- if trace != nil && trace.Got100Continue != nil {
- trace.Got100Continue()
- }
-}
-
-func traceWait100Continue(trace *clientTrace) {
- if trace != nil && trace.Wait100Continue != nil {
- trace.Wait100Continue()
- }
-}
-
-func traceWroteRequest(trace *clientTrace, err error) {
- if trace != nil && trace.WroteRequest != nil {
- trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
- }
-}
-
-func traceFirstResponseByte(trace *clientTrace) {
- if trace != nil && trace.GotFirstResponseByte != nil {
- trace.GotFirstResponseByte()
- }
-}
-
-func requestTrace(req *http.Request) *clientTrace {
- trace := httptrace.ContextClientTrace(req.Context())
- return (*clientTrace)(trace)
-}
diff --git a/src/vendor/golang.org/x/net/http2/headermap.go b/src/vendor/golang.org/x/net/http2/headermap.go
index c2805f6ac..c3ff3fa1c 100644
--- a/src/vendor/golang.org/x/net/http2/headermap.go
+++ b/src/vendor/golang.org/x/net/http2/headermap.go
@@ -7,15 +7,21 @@ package http2
import (
"net/http"
"strings"
+ "sync"
)
var (
- commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
- commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+ commonBuildOnce sync.Once
+ commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
+ commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
)
-func init() {
- for _, v := range []string{
+func buildCommonHeaderMapsOnce() {
+ commonBuildOnce.Do(buildCommonHeaderMaps)
+}
+
+func buildCommonHeaderMaps() {
+ common := []string{
"accept",
"accept-charset",
"accept-encoding",
@@ -63,7 +69,10 @@ func init() {
"vary",
"via",
"www-authenticate",
- } {
+ }
+ commonLowerHeader = make(map[string]string, len(common))
+ commonCanonHeader = make(map[string]string, len(common))
+ for _, v := range common {
chk := http.CanonicalHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
@@ -71,6 +80,7 @@ func init() {
}
func lowerHeader(v string) string {
+ buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s
}
diff --git a/src/vendor/golang.org/x/net/http2/hpack/encode.go b/src/vendor/golang.org/x/net/http2/hpack/encode.go
index f9bb03398..1565cf270 100644
--- a/src/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/src/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder {
tableSizeUpdate: false,
w: w,
}
+ e.dynTab.table.init()
e.dynTab.setMaxSize(initialHeaderTableSize)
return e
}
// WriteField encodes f into a single Write to e's underlying Writer.
// This function may also produce bytes for "Header Table Size Update"
-// if necessary. If produced, it is done before encoding f.
+// if necessary. If produced, it is done before encoding f.
func (e *Encoder) WriteField(f HeaderField) error {
e.buf = e.buf[:0]
@@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error {
// only name matches, i points to that index and nameValueMatch
// becomes false.
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
- for idx, hf := range staticTable {
- if !constantTimeStringCompare(hf.Name, f.Name) {
- continue
- }
- if i == 0 {
- i = uint64(idx + 1)
- }
- if f.Sensitive {
- continue
- }
- if !constantTimeStringCompare(hf.Value, f.Value) {
- continue
- }
- i = uint64(idx + 1)
- nameValueMatch = true
- return
+ i, nameValueMatch = staticTable.search(f)
+ if nameValueMatch {
+ return i, true
}
- j, nameValueMatch := e.dynTab.search(f)
+ j, nameValueMatch := e.dynTab.table.search(f)
if nameValueMatch || (i == 0 && j != 0) {
- i = j + uint64(len(staticTable))
+ return j + uint64(staticTable.len()), nameValueMatch
}
- return
+
+ return i, false
}
// SetMaxDynamicTableSize changes the dynamic header table size to v.
@@ -217,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte {
}
// appendHpackString appends s, as encoded in "String Literal"
-// representation, to dst and returns the the extended buffer.
+// representation, to dst and returns the extended buffer.
//
// s will be encoded in Huffman codes only when it produces strictly
// shorter byte string.
diff --git a/src/vendor/golang.org/x/net/http2/hpack/hpack.go b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
index 8aa197ad6..85f18a2b0 100644
--- a/src/vendor/golang.org/x/net/http2/hpack/hpack.go
+++ b/src/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -57,11 +57,11 @@ func (hf HeaderField) String() string {
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
}
-// Size returns the size of an entry per RFC 7540 section 5.2.
+// Size returns the size of an entry per RFC 7541 section 4.1.
func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of
- // its entries. The size of an entry is the sum of its name's
+ // its entries. The size of an entry is the sum of its name's
// length in octets (as defined in Section 5.2), its value's
// length in octets (see Section 5.2), plus 32. The size of
// an entry is calculated using the length of the name and
@@ -92,6 +92,8 @@ type Decoder struct {
// saveBuf is previous data passed to Write which we weren't able
// to fully parse before. Unlike buf, we own this data.
saveBuf bytes.Buffer
+
+ firstField bool // processing the first field of the header block
}
// NewDecoder returns a new decoder with the provided maximum dynamic
@@ -101,7 +103,9 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
d := &Decoder{
emit: emitFunc,
emitEnabled: true,
+ firstField: true,
}
+ d.dynTab.table.init()
d.dynTab.allowedMaxSize = maxDynamicTableSize
d.dynTab.setMaxSize(maxDynamicTableSize)
return d
@@ -154,12 +158,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
}
type dynamicTable struct {
- // ents is the FIFO described at
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
- // The newest (low index) is append at the end, and items are
- // evicted from the front.
- ents []HeaderField
- size uint32
+ table headerFieldTable
+ size uint32 // in bytes
maxSize uint32 // current maxSize
allowedMaxSize uint32 // maxSize may go up to this, inclusive
}
@@ -169,95 +170,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
dt.evict()
}
-// TODO: change dynamicTable to be a struct with a slice and a size int field,
-// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
-//
-//
-// Then make add increment the size. maybe the max size should move from Decoder to
-// dynamicTable and add should return an ok bool if there was enough space.
-//
-// Later we'll need a remove operation on dynamicTable.
-
func (dt *dynamicTable) add(f HeaderField) {
- dt.ents = append(dt.ents, f)
+ dt.table.addEntry(f)
dt.size += f.Size()
dt.evict()
}
-// If we're too big, evict old stuff (front of the slice)
+// If we're too big, evict old stuff.
func (dt *dynamicTable) evict() {
- base := dt.ents // keep base pointer of slice
- for dt.size > dt.maxSize {
- dt.size -= dt.ents[0].Size()
- dt.ents = dt.ents[1:]
+ var n int
+ for dt.size > dt.maxSize && n < dt.table.len() {
+ dt.size -= dt.table.ents[n].Size()
+ n++
}
-
- // Shift slice contents down if we evicted things.
- if len(dt.ents) != len(base) {
- copy(base, dt.ents)
- dt.ents = base[:len(dt.ents)]
- }
-}
-
-// constantTimeStringCompare compares string a and b in a constant
-// time manner.
-func constantTimeStringCompare(a, b string) bool {
- if len(a) != len(b) {
- return false
- }
-
- c := byte(0)
-
- for i := 0; i < len(a); i++ {
- c |= a[i] ^ b[i]
- }
-
- return c == 0
-}
-
-// Search searches f in the table. The return value i is 0 if there is
-// no name match. If there is name match or name/value match, i is the
-// index of that entry (1-based). If both name and value match,
-// nameValueMatch becomes true.
-func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
- l := len(dt.ents)
- for j := l - 1; j >= 0; j-- {
- ent := dt.ents[j]
- if !constantTimeStringCompare(ent.Name, f.Name) {
- continue
- }
- if i == 0 {
- i = uint64(l - j)
- }
- if f.Sensitive {
- continue
- }
- if !constantTimeStringCompare(ent.Value, f.Value) {
- continue
- }
- i = uint64(l - j)
- nameValueMatch = true
- return
- }
- return
+ dt.table.evictOldest(n)
}
func (d *Decoder) maxTableIndex() int {
- return len(d.dynTab.ents) + len(staticTable)
+ // This should never overflow. RFC 7540 Section 6.5.2 limits the size of
+ // the dynamic table to 2^32 bytes, where each entry will occupy more than
+ // one byte. Further, the staticTable has a fixed, small length.
+ return d.dynTab.table.len() + staticTable.len()
}
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
- if i < 1 {
+ // See Section 2.3.3.
+ if i == 0 {
return
}
+ if i <= uint64(staticTable.len()) {
+ return staticTable.ents[i-1], true
+ }
if i > uint64(d.maxTableIndex()) {
return
}
- if i <= uint64(len(staticTable)) {
- return staticTable[i-1], true
- }
- dents := d.dynTab.ents
- return dents[len(dents)-(int(i)-len(staticTable))], true
+ // In the dynamic table, newer entries have lower indices.
+ // However, dt.ents[0] is the oldest entry. Hence, dt.ents is
+ // the reversed dynamic table.
+ dt := d.dynTab.table
+ return dt.ents[dt.len()-(int(i)-staticTable.len())], true
}
// Decode decodes an entire block.
@@ -278,11 +229,15 @@ func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
return hf, nil
}
+// Close declares that the decoding is complete and resets the Decoder
+// to be reused again for a new header block. If there is any remaining
+// data in the decoder's buffer, Close returns an error.
func (d *Decoder) Close() error {
if d.saveBuf.Len() > 0 {
d.saveBuf.Reset()
return DecodingError{errors.New("truncated headers")}
}
+ d.firstField = true
return nil
}
@@ -307,7 +262,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
err = d.parseHeaderFieldRepr()
if err == errNeedMore {
// Extra paranoia, making sure saveBuf won't
- // get too large. All the varint and string
+ // get too large. All the varint and string
// reading code earlier should already catch
// overlong things and return ErrStringLength,
// but keep this as a last resort.
@@ -318,6 +273,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) {
d.saveBuf.Write(d.buf)
return len(p), nil
}
+ d.firstField = false
if err != nil {
break
}
@@ -441,6 +397,12 @@ func (d *Decoder) callEmit(hf HeaderField) error {
// (same invariants and behavior as parseHeaderFieldRepr)
func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
+ // beginning of the first header block following the change to the dynamic table size.
+ if !d.firstField && d.dynTab.size > 0 {
+ return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
+ }
+
buf := d.buf
size, buf, err := readVarInt(5, buf)
if err != nil {
diff --git a/src/vendor/golang.org/x/net/http2/hpack/huffman.go b/src/vendor/golang.org/x/net/http2/hpack/huffman.go
index 8850e3946..b412a96c5 100644
--- a/src/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/src/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -47,6 +47,7 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
// If maxLen is greater than 0, attempts to write more to buf than
// maxLen bytes will return ErrStringLength.
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ rootHuffmanNode := getRootHuffmanNode()
n := rootHuffmanNode
// cur is the bit buffer that has not been fed into n.
// cbits is the number of low order bits in cur that are valid.
@@ -106,7 +107,7 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
type node struct {
// children is non-nil for internal nodes
- children []*node
+ children *[256]*node
// The following are only valid if children is nil:
codeLen uint8 // number of bits that led to the output of sym
@@ -114,22 +115,31 @@ type node struct {
}
func newInternalNode() *node {
- return &node{children: make([]*node, 256)}
+ return &node{children: new([256]*node)}
}
-var rootHuffmanNode = newInternalNode()
+var (
+ buildRootOnce sync.Once
+ lazyRootHuffmanNode *node
+)
-func init() {
+func getRootHuffmanNode() *node {
+ buildRootOnce.Do(buildRootHuffmanNode)
+ return lazyRootHuffmanNode
+}
+
+func buildRootHuffmanNode() {
if len(huffmanCodes) != 256 {
panic("unexpected size")
}
+ lazyRootHuffmanNode = newInternalNode()
for i, code := range huffmanCodes {
addDecoderNode(byte(i), code, huffmanCodeLen[i])
}
}
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
- cur := rootHuffmanNode
+ cur := lazyRootHuffmanNode
for codeLen > 8 {
codeLen -= 8
i := uint8(code >> codeLen)
diff --git a/src/vendor/golang.org/x/net/http2/hpack/tables.go b/src/vendor/golang.org/x/net/http2/hpack/tables.go
index b9283a023..a66cfbea6 100644
--- a/src/vendor/golang.org/x/net/http2/hpack/tables.go
+++ b/src/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -4,73 +4,200 @@
package hpack
-func pair(name, value string) HeaderField {
- return HeaderField{Name: name, Value: value}
+import (
+ "fmt"
+)
+
+// headerFieldTable implements a list of HeaderFields.
+// This is used to implement the static and dynamic tables.
+type headerFieldTable struct {
+ // For static tables, entries are never evicted.
+ //
+ // For dynamic tables, entries are evicted from ents[0] and added to the end.
+ // Each entry has a unique id that starts at one and increments for each
+ // entry that is added. This unique id is stable across evictions, meaning
+ // it can be used as a pointer to a specific entry. As in hpack, unique ids
+ // are 1-based. The unique id for ents[k] is k + evictCount + 1.
+ //
+ // Zero is not a valid unique id.
+ //
+ // evictCount should not overflow in any remotely practical situation. In
+ // practice, we will have one dynamic table per HTTP/2 connection. If we
+ // assume a very powerful server that handles 1M QPS per connection and each
+ // request adds (then evicts) 100 entries from the table, it would still take
+ // 2M years for evictCount to overflow.
+ ents []HeaderField
+ evictCount uint64
+
+ // byName maps a HeaderField name to the unique id of the newest entry with
+ // the same name. See above for a definition of "unique id".
+ byName map[string]uint64
+
+ // byNameValue maps a HeaderField name/value pair to the unique id of the newest
+ // entry with the same name and value. See above for a definition of "unique id".
+ byNameValue map[pairNameValue]uint64
+}
+
+type pairNameValue struct {
+ name, value string
+}
+
+func (t *headerFieldTable) init() {
+ t.byName = make(map[string]uint64)
+ t.byNameValue = make(map[pairNameValue]uint64)
+}
+
+// len reports the number of entries in the table.
+func (t *headerFieldTable) len() int {
+ return len(t.ents)
+}
+
+// addEntry adds a new entry.
+func (t *headerFieldTable) addEntry(f HeaderField) {
+ id := uint64(t.len()) + t.evictCount + 1
+ t.byName[f.Name] = id
+ t.byNameValue[pairNameValue{f.Name, f.Value}] = id
+ t.ents = append(t.ents, f)
+}
+
+// evictOldest evicts the n oldest entries in the table.
+func (t *headerFieldTable) evictOldest(n int) {
+ if n > t.len() {
+ panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
+ }
+ for k := 0; k < n; k++ {
+ f := t.ents[k]
+ id := t.evictCount + uint64(k) + 1
+ if t.byName[f.Name] == id {
+ delete(t.byName, f.Name)
+ }
+ if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
+ delete(t.byNameValue, p)
+ }
+ }
+ copy(t.ents, t.ents[n:])
+ for k := t.len() - n; k < t.len(); k++ {
+ t.ents[k] = HeaderField{} // so strings can be garbage collected
+ }
+ t.ents = t.ents[:t.len()-n]
+ if t.evictCount+uint64(n) < t.evictCount {
+ panic("evictCount overflow")
+ }
+ t.evictCount += uint64(n)
+}
+
+// search finds f in the table. If there is no match, i is 0.
+// If both name and value match, i is the matched index and nameValueMatch
+// becomes true. If only name matches, i points to that index and
+// nameValueMatch becomes false.
+//
+// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
+// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
+// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
+// table, the return value i actually refers to the entry t.ents[t.len()-i].
+//
+// All tables are assumed to be a dynamic tables except for the global
+// staticTable pointer.
+//
+// See Section 2.3.3.
+func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ if !f.Sensitive {
+ if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
+ return t.idToIndex(id), true
+ }
+ }
+ if id := t.byName[f.Name]; id != 0 {
+ return t.idToIndex(id), false
+ }
+ return 0, false
+}
+
+// idToIndex converts a unique id to an HPACK index.
+// See Section 2.3.3.
+func (t *headerFieldTable) idToIndex(id uint64) uint64 {
+ if id <= t.evictCount {
+ panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
+ }
+ k := id - t.evictCount - 1 // convert id to an index t.ents[k]
+ if t != staticTable {
+ return uint64(t.len()) - k // dynamic table
+ }
+ return k + 1
}
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
-var staticTable = [...]HeaderField{
- pair(":authority", ""), // index 1 (1-based)
- pair(":method", "GET"),
- pair(":method", "POST"),
- pair(":path", "/"),
- pair(":path", "/index.html"),
- pair(":scheme", "http"),
- pair(":scheme", "https"),
- pair(":status", "200"),
- pair(":status", "204"),
- pair(":status", "206"),
- pair(":status", "304"),
- pair(":status", "400"),
- pair(":status", "404"),
- pair(":status", "500"),
- pair("accept-charset", ""),
- pair("accept-encoding", "gzip, deflate"),
- pair("accept-language", ""),
- pair("accept-ranges", ""),
- pair("accept", ""),
- pair("access-control-allow-origin", ""),
- pair("age", ""),
- pair("allow", ""),
- pair("authorization", ""),
- pair("cache-control", ""),
- pair("content-disposition", ""),
- pair("content-encoding", ""),
- pair("content-language", ""),
- pair("content-length", ""),
- pair("content-location", ""),
- pair("content-range", ""),
- pair("content-type", ""),
- pair("cookie", ""),
- pair("date", ""),
- pair("etag", ""),
- pair("expect", ""),
- pair("expires", ""),
- pair("from", ""),
- pair("host", ""),
- pair("if-match", ""),
- pair("if-modified-since", ""),
- pair("if-none-match", ""),
- pair("if-range", ""),
- pair("if-unmodified-since", ""),
- pair("last-modified", ""),
- pair("link", ""),
- pair("location", ""),
- pair("max-forwards", ""),
- pair("proxy-authenticate", ""),
- pair("proxy-authorization", ""),
- pair("range", ""),
- pair("referer", ""),
- pair("refresh", ""),
- pair("retry-after", ""),
- pair("server", ""),
- pair("set-cookie", ""),
- pair("strict-transport-security", ""),
- pair("transfer-encoding", ""),
- pair("user-agent", ""),
- pair("vary", ""),
- pair("via", ""),
- pair("www-authenticate", ""),
+var staticTable = newStaticTable()
+var staticTableEntries = [...]HeaderField{
+ {Name: ":authority"},
+ {Name: ":method", Value: "GET"},
+ {Name: ":method", Value: "POST"},
+ {Name: ":path", Value: "/"},
+ {Name: ":path", Value: "/index.html"},
+ {Name: ":scheme", Value: "http"},
+ {Name: ":scheme", Value: "https"},
+ {Name: ":status", Value: "200"},
+ {Name: ":status", Value: "204"},
+ {Name: ":status", Value: "206"},
+ {Name: ":status", Value: "304"},
+ {Name: ":status", Value: "400"},
+ {Name: ":status", Value: "404"},
+ {Name: ":status", Value: "500"},
+ {Name: "accept-charset"},
+ {Name: "accept-encoding", Value: "gzip, deflate"},
+ {Name: "accept-language"},
+ {Name: "accept-ranges"},
+ {Name: "accept"},
+ {Name: "access-control-allow-origin"},
+ {Name: "age"},
+ {Name: "allow"},
+ {Name: "authorization"},
+ {Name: "cache-control"},
+ {Name: "content-disposition"},
+ {Name: "content-encoding"},
+ {Name: "content-language"},
+ {Name: "content-length"},
+ {Name: "content-location"},
+ {Name: "content-range"},
+ {Name: "content-type"},
+ {Name: "cookie"},
+ {Name: "date"},
+ {Name: "etag"},
+ {Name: "expect"},
+ {Name: "expires"},
+ {Name: "from"},
+ {Name: "host"},
+ {Name: "if-match"},
+ {Name: "if-modified-since"},
+ {Name: "if-none-match"},
+ {Name: "if-range"},
+ {Name: "if-unmodified-since"},
+ {Name: "last-modified"},
+ {Name: "link"},
+ {Name: "location"},
+ {Name: "max-forwards"},
+ {Name: "proxy-authenticate"},
+ {Name: "proxy-authorization"},
+ {Name: "range"},
+ {Name: "referer"},
+ {Name: "refresh"},
+ {Name: "retry-after"},
+ {Name: "server"},
+ {Name: "set-cookie"},
+ {Name: "strict-transport-security"},
+ {Name: "transfer-encoding"},
+ {Name: "user-agent"},
+ {Name: "vary"},
+ {Name: "via"},
+ {Name: "www-authenticate"},
+}
+
+func newStaticTable() *headerFieldTable {
+ t := &headerFieldTable{}
+ t.init()
+ for _, e := range staticTableEntries[:] {
+ t.addEntry(e)
+ }
+ return t
}
var huffmanCodes = [256]uint32{
diff --git a/src/vendor/golang.org/x/net/http2/http2.go b/src/vendor/golang.org/x/net/http2/http2.go
index f06e87b3e..bdaba1d46 100644
--- a/src/vendor/golang.org/x/net/http2/http2.go
+++ b/src/vendor/golang.org/x/net/http2/http2.go
@@ -29,13 +29,14 @@ import (
"strings"
"sync"
- "golang.org/x/net/lex/httplex"
+ "golang.org/x/net/http/httpguts"
)
var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
+ inTests bool
)
func init() {
@@ -77,13 +78,23 @@ var (
type streamState int
+// HTTP/2 stream states.
+//
+// See http://tools.ietf.org/html/rfc7540#section-5.1.
+//
+// For simplicity, the server code merges "reserved (local)" into
+// "half-closed (remote)". This is one less state transition to track.
+// The only downside is that we send PUSH_PROMISEs slightly less
+// liberally than allowable. More discussion here:
+// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
+//
+// "reserved (remote)" is omitted since the client code does not
+// support server push.
const (
stateIdle streamState = iota
stateOpen
stateHalfClosedLocal
stateHalfClosedRemote
- stateResvLocal
- stateResvRemote
stateClosed
)
@@ -92,8 +103,6 @@ var stateName = [...]string{
stateOpen: "Open",
stateHalfClosedLocal: "HalfClosedLocal",
stateHalfClosedRemote: "HalfClosedRemote",
- stateResvLocal: "ResvLocal",
- stateResvRemote: "ResvRemote",
stateClosed: "Closed",
}
@@ -170,7 +179,7 @@ var (
)
// validWireHeaderFieldName reports whether v is a valid header field
-// name (key). See httplex.ValidHeaderName for the base rules.
+// name (key). See httpguts.ValidHeaderName for the base rules.
//
// Further, http2 says:
// "Just as in HTTP/1.x, header field names are strings of ASCII
@@ -182,7 +191,7 @@ func validWireHeaderFieldName(v string) bool {
return false
}
for _, r := range v {
- if !httplex.IsTokenRune(r) {
+ if !httpguts.IsTokenRune(r) {
return false
}
if 'A' <= r && r <= 'Z' {
@@ -192,19 +201,12 @@ func validWireHeaderFieldName(v string) bool {
return true
}
-var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
-
-func init() {
- for i := 100; i <= 999; i++ {
- if v := http.StatusText(i); v != "" {
- httpCodeStringCommon[i] = strconv.Itoa(i)
- }
- }
-}
-
func httpCodeString(code int) string {
- if s, ok := httpCodeStringCommon[code]; ok {
- return s
+ switch code {
+ case 200:
+ return "200"
+ case 404:
+ return "404"
}
return strconv.Itoa(code)
}
@@ -253,14 +255,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter {
return &bufferedWriter{w: w}
}
+// bufWriterPoolBufferSize is the size of bufio.Writer's
+// buffers created using bufWriterPool.
+//
+// TODO: pick a less arbitrary value? this is a bit under
+// (3 x typical 1500 byte MTU) at least. Other than that,
+// not much thought went into it.
+const bufWriterPoolBufferSize = 4 << 10
+
var bufWriterPool = sync.Pool{
New: func() interface{} {
- // TODO: pick something better? this is a bit under
- // (3 x typical 1500 byte MTU) at least.
- return bufio.NewWriterSize(nil, 4<<10)
+ return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
},
}
+func (w *bufferedWriter) Available() int {
+ if w.bw == nil {
+ return bufWriterPoolBufferSize
+ }
+ return w.bw.Available()
+}
+
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
if w.bw == nil {
bw := bufWriterPool.Get().(*bufio.Writer)
@@ -290,7 +305,7 @@ func mustUint31(v int32) uint32 {
}
// bodyAllowedForStatus reports whether a given response status code
-// permits a body. See RFC 2616, section 4.4.
+// permits a body. See RFC 7230, section 3.3.
func bodyAllowedForStatus(status int) bool {
switch {
case status >= 100 && status <= 199:
@@ -343,10 +358,27 @@ func (s *sorter) Keys(h http.Header) []string {
}
func (s *sorter) SortStrings(ss []string) {
- // Our sorter works on s.v, which sorter owners, so
+ // Our sorter works on s.v, which sorter owns, so
// stash it away while we sort the user's buffer.
save := s.v
s.v = ss
sort.Sort(s)
s.v = save
}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// *) a non-empty string starting with '/'
+// *) the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
diff --git a/src/vendor/golang.org/x/net/http2/not_go111.go b/src/vendor/golang.org/x/net/http2/not_go111.go
new file mode 100644
index 000000000..161bca7ce
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http2/not_go111.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.11
+
+package http2
+
+import (
+ "net/http/httptrace"
+ "net/textproto"
+)
+
+func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false }
+
+func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {}
+
+func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
+ return nil
+}
diff --git a/src/vendor/golang.org/x/net/http2/not_go16.go b/src/vendor/golang.org/x/net/http2/not_go16.go
deleted file mode 100644
index efd2e1282..000000000
--- a/src/vendor/golang.org/x/net/http2/not_go16.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.6
-
-package http2
-
-import (
- "crypto/tls"
- "net/http"
- "time"
-)
-
-func configureTransport(t1 *http.Transport) (*Transport, error) {
- return nil, errTransportVersion
-}
-
-func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
- return 0
-
-}
-
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-func isBadCipher(cipher uint16) bool {
- switch cipher {
- case tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
- // Reject cipher suites from Appendix A.
- // "This list includes those cipher suites that do not
- // offer an ephemeral key exchange and those that are
- // based on the TLS null, stream or block cipher type"
- return true
- default:
- return false
- }
-}
diff --git a/src/vendor/golang.org/x/net/http2/not_go17.go b/src/vendor/golang.org/x/net/http2/not_go17.go
deleted file mode 100644
index 28df0c16b..000000000
--- a/src/vendor/golang.org/x/net/http2/not_go17.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !go1.7
-
-package http2
-
-import (
- "net"
- "net/http"
-)
-
-type contextContext interface{}
-
-type fakeContext struct{}
-
-func (fakeContext) Done() <-chan struct{} { return nil }
-func (fakeContext) Err() error { panic("should not be called") }
-
-func reqContext(r *http.Request) fakeContext {
- return fakeContext{}
-}
-
-func setResponseUncompressed(res *http.Response) {
- // Nothing.
-}
-
-type clientTrace struct{}
-
-func requestTrace(*http.Request) *clientTrace { return nil }
-func traceGotConn(*http.Request, *ClientConn) {}
-func traceFirstResponseByte(*clientTrace) {}
-func traceWroteHeaders(*clientTrace) {}
-func traceWroteRequest(*clientTrace, error) {}
-func traceGot100Continue(trace *clientTrace) {}
-func traceWait100Continue(trace *clientTrace) {}
-
-func nop() {}
-
-func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
- return nil, nop
-}
-
-func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
- return ctx, nop
-}
-
-func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
- return req
-}
diff --git a/src/vendor/golang.org/x/net/http2/pipe.go b/src/vendor/golang.org/x/net/http2/pipe.go
index 53b7a1daf..a6140099c 100644
--- a/src/vendor/golang.org/x/net/http2/pipe.go
+++ b/src/vendor/golang.org/x/net/http2/pipe.go
@@ -10,13 +10,13 @@ import (
"sync"
)
-// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct {
mu sync.Mutex
- c sync.Cond // c.L lazily initialized to &p.mu
- b pipeBuffer
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer // nil when done reading
err error // read error once empty. non-nil means closed.
breakErr error // immediate read error (caller doesn't see rest of b)
donec chan struct{} // closed on error
@@ -32,6 +32,9 @@ type pipeBuffer interface {
func (p *pipe) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
+ if p.b == nil {
+ return 0
+ }
return p.b.Len()
}
@@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
if p.breakErr != nil {
return 0, p.breakErr
}
- if p.b.Len() > 0 {
+ if p.b != nil && p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {
@@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
p.readFn() // e.g. copy trailers
p.readFn = nil // not sticky like p.err
}
+ p.b = nil
return 0, p.err
}
p.c.Wait()
@@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
if p.err != nil {
return 0, errClosedPipeWrite
}
+ if p.breakErr != nil {
+ return len(d), nil // discard when there is no reader
+ }
return p.b.Write(d)
}
@@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
return
}
p.readFn = fn
+ if dst == &p.breakErr {
+ p.b = nil
+ }
*dst = err
p.closeDoneLocked()
}
diff --git a/src/vendor/golang.org/x/net/http2/server.go b/src/vendor/golang.org/x/net/http2/server.go
index 8206fa79d..d4abeb2b9 100644
--- a/src/vendor/golang.org/x/net/http2/server.go
+++ b/src/vendor/golang.org/x/net/http2/server.go
@@ -2,17 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// TODO: replace all <-sc.doneServing with reads from the stream's cw
-// instead, and make sure that on close we close all open
-// streams. then remove doneServing?
-
-// TODO: re-audit GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during graceful shutdown.
-
-// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
-// configurable? or maximum number of idle clients and remove the
-// oldest?
-
// TODO: turn off the serve goroutine when idle, so
// an idle conn only has the readFrames goroutine active. (which could
// also be optimized probably to pin less memory in crypto/tls). This
@@ -39,11 +28,13 @@ package http2
import (
"bufio"
"bytes"
+ "context"
"crypto/tls"
"errors"
"fmt"
"io"
"log"
+ "math"
"net"
"net/http"
"net/textproto"
@@ -56,6 +47,7 @@ import (
"sync"
"time"
+ "golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
)
@@ -114,6 +106,47 @@ type Server struct {
// PermitProhibitedCipherSuites, if true, permits the use of
// cipher suites prohibited by the HTTP/2 spec.
PermitProhibitedCipherSuites bool
+
+ // IdleTimeout specifies how long until idle clients should be
+ // closed with a GOAWAY frame. PING frames are not considered
+ // activity for the purposes of IdleTimeout.
+ IdleTimeout time.Duration
+
+ // MaxUploadBufferPerConnection is the size of the initial flow
+ // control window for each connections. The HTTP/2 spec does not
+ // allow this to be smaller than 65535 or larger than 2^32-1.
+ // If the value is outside this range, a default value will be
+ // used instead.
+ MaxUploadBufferPerConnection int32
+
+ // MaxUploadBufferPerStream is the size of the initial flow control
+ // window for each stream. The HTTP/2 spec does not allow this to
+ // be larger than 2^32-1. If the value is zero or larger than the
+ // maximum, a default value will be used instead.
+ MaxUploadBufferPerStream int32
+
+ // NewWriteScheduler constructs a write scheduler for a connection.
+ // If nil, a default scheduler is chosen.
+ NewWriteScheduler func() WriteScheduler
+
+ // Internal state. This is a pointer (rather than embedded directly)
+ // so that we don't embed a Mutex in this struct, which will make the
+ // struct non-copyable, which might break some callers.
+ state *serverInternalState
+}
+
+func (s *Server) initialConnRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerConnection > initialWindowSize {
+ return s.MaxUploadBufferPerConnection
+ }
+ return 1 << 20
+}
+
+func (s *Server) initialStreamRecvWindowSize() int32 {
+ if s.MaxUploadBufferPerStream > 0 {
+ return s.MaxUploadBufferPerStream
+ }
+ return 1 << 20
}
func (s *Server) maxReadFrameSize() uint32 {
@@ -130,27 +163,76 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams
}
+type serverInternalState struct {
+ mu sync.Mutex
+ activeConns map[*serverConn]struct{}
+}
+
+func (s *serverInternalState) registerConn(sc *serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ s.activeConns[sc] = struct{}{}
+ s.mu.Unlock()
+}
+
+func (s *serverInternalState) unregisterConn(sc *serverConn) {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ delete(s.activeConns, sc)
+ s.mu.Unlock()
+}
+
+func (s *serverInternalState) startGracefulShutdown() {
+ if s == nil {
+ return // if the Server was used without calling ConfigureServer
+ }
+ s.mu.Lock()
+ for sc := range s.activeConns {
+ sc.startGracefulShutdown()
+ }
+ s.mu.Unlock()
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
//
// ConfigureServer must be called before s begins serving.
func ConfigureServer(s *http.Server, conf *Server) error {
+ if s == nil {
+ panic("nil *http.Server")
+ }
if conf == nil {
conf = new(Server)
}
+ conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ if h1, h2 := s, conf; h2.IdleTimeout == 0 {
+ if h1.IdleTimeout != 0 {
+ h2.IdleTimeout = h1.IdleTimeout
+ } else {
+ h2.IdleTimeout = h1.ReadTimeout
+ }
+ }
+ s.RegisterOnShutdown(conf.state.startGracefulShutdown)
if s.TLSConfig == nil {
s.TLSConfig = new(tls.Config)
} else if s.TLSConfig.CipherSuites != nil {
// If they already provided a CipherSuite list, return
// an error if it has a bad order or is missing
- // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
- const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256.
haveRequired := false
sawBad := false
for i, cs := range s.TLSConfig.CipherSuites {
- if cs == requiredCipher {
+ switch cs {
+ case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ // Alternative MTI cipher to not discourage ECDSA-only servers.
+ // See http://golang.org/cl/30721 for further information.
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
haveRequired = true
}
if isBadCipher(cs) {
@@ -160,7 +242,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
}
}
if !haveRequired {
- return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.")
}
}
@@ -183,9 +265,6 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if !haveNPN {
s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
}
- // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
- // to switch to "h2".
- s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
@@ -200,7 +279,6 @@ func ConfigureServer(s *http.Server, conf *Server) error {
})
}
s.TLSNextProto[NextProtoTLS] = protoHandler
- s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
return nil
}
@@ -254,29 +332,50 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
defer cancel()
sc := &serverConn{
- srv: s,
- hs: opts.baseConfig(),
- conn: c,
- baseCtx: baseCtx,
- remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(c),
- handler: opts.handler(),
- streams: make(map[uint32]*stream),
- readFrameCh: make(chan readFrameResult),
- wantWriteFrameCh: make(chan frameWriteMsg, 8),
- wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
- bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
- doneServing: make(chan struct{}),
- advMaxStreams: s.maxConcurrentStreams(),
- writeSched: writeScheduler{
- maxFrameSize: initialMaxFrameSize,
- },
- initialWindowSize: initialWindowSize,
- headerTableSize: initialHeaderTableSize,
- serveG: newGoroutineLock(),
- pushEnabled: true,
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan FrameWriteRequest, 8),
+ serveMsgCh: make(chan interface{}, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
+ advMaxStreams: s.maxConcurrentStreams(),
+ initialStreamSendWindowSize: initialWindowSize,
+ maxFrameSize: initialMaxFrameSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
}
+ s.state.registerConn(sc)
+ defer s.state.unregisterConn(sc)
+
+ // The net/http package sets the write deadline from the
+ // http.Server.WriteTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already set.
+ // Write deadlines are set per stream in serverConn.newStream.
+ // Disarm the net.Conn write deadline here.
+ if sc.hs.WriteTimeout != 0 {
+ sc.conn.SetWriteDeadline(time.Time{})
+ }
+
+ if s.NewWriteScheduler != nil {
+ sc.writeSched = s.NewWriteScheduler()
+ } else {
+ sc.writeSched = NewRandomWriteScheduler()
+ }
+
+ // These start at the RFC-specified defaults. If there is a higher
+ // configured value for inflow, that will be updated when we send a
+ // WINDOW_UPDATE shortly after sending SETTINGS.
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
@@ -311,7 +410,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// addresses during development.
//
// TODO: optionally enforce? Or enforce at the time we receive
- // a new request, and verify the the ServerName matches the :authority?
+ // a new request, and verify the ServerName matches the :authority?
// But that precludes proxy situations, perhaps.
//
// So for now, do nothing here again.
@@ -339,6 +438,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.serve()
}
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
+ ctx, cancel = context.WithCancel(context.Background())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
func (sc *serverConn) rejectConn(err ErrCode, debug string) {
sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
// ignoring errors. hanging up anyway.
@@ -354,47 +462,54 @@ type serverConn struct {
conn net.Conn
bw *bufferedWriter // writing to conn
handler http.Handler
- baseCtx contextContext
+ baseCtx context.Context
framer *Framer
- doneServing chan struct{} // closed when serverConn.serve ends
- readFrameCh chan readFrameResult // written by serverConn.readFrames
- wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
- wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
- bodyReadCh chan bodyReadMsg // from handlers -> serve
- testHookCh chan func(int) // code to run on the serve loop
- flow flow // conn-wide (not stream-specific) outbound flow control
- inflow flow // conn-wide inbound flow control
- tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
remoteAddrStr string
+ writeSched WriteScheduler
// Everything following is owned by the serve loop; use serveG.check():
- serveG goroutineLock // used to verify funcs are on serve()
- pushEnabled bool
- sawFirstSettings bool // got the initial SETTINGS frame after the preface
- needToSendSettingsAck bool
- unackedSettings int // how many SETTINGS have we sent without ACKs?
- clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
- advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
- curOpenStreams uint32 // client's number of open streams
- maxStreamID uint32 // max ever seen
- streams map[uint32]*stream
- initialWindowSize int32
- headerTableSize uint32
- peerMaxHeaderListSize uint32 // zero means unknown (default)
- canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
- needsFrameFlush bool // last frame write wasn't a flush
- writeSched writeScheduler
- inGoAway bool // we've started to or sent GOAWAY
- needToSendGoAway bool // we need to schedule a GOAWAY frame write
- goAwayCode ErrCode
- shutdownTimerCh <-chan time.Time // nil until used
- shutdownTimer *time.Timer // nil until used
- freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curClientStreams uint32 // number of open streams initiated by the client
+ curPushedStreams uint32 // number of open streams initiated by server push
+ maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests
+ maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes
+ streams map[uint32]*stream
+ initialStreamSendWindowSize int32
+ maxFrameSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started writing a frame (on serve goroutine or separate)
+ writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ inGoAway bool // we've started to or sent GOAWAY
+ inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder
+
+ // Used by startGracefulShutdown.
+ shutdownOnce sync.Once
}
func (sc *serverConn) maxHeaderListSize() uint32 {
@@ -409,6 +524,11 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
return uint32(n + typicalHeaders*perFieldOverhead)
}
+func (sc *serverConn) curOpenStreams() uint32 {
+ sc.serveG.check()
+ return sc.curClientStreams + sc.curPushedStreams
+}
+
// stream represents a stream. This is the minimal metadata needed by
// the serve goroutine. Most of the actual stream state is owned by
// the http.Handler's goroutine in the responseWriter. Because the
@@ -422,7 +542,7 @@ type stream struct {
id uint32
body *pipe // non-nil if expecting DATA frames
cw closeWaiter // closed wait stream transitions to closed state
- ctx contextContext
+ ctx context.Context
cancelCtx func()
// owned by serverConn's serve loop:
@@ -434,11 +554,10 @@ type stream struct {
numTrailerValues int64
weight uint8
state streamState
- sentReset bool // only true once detached from streams map
- gotReset bool // only true once detacted from streams map
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- reqBuf []byte
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ writeDeadline *time.Timer // nil if unused
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -453,7 +572,7 @@ func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
sc.serveG.check()
- // http://http2.github.io/http2-spec/#rfc.section.5.1
+ // http://tools.ietf.org/html/rfc7540#section-5.1
if st, ok := sc.streams[streamID]; ok {
return st.state, st
}
@@ -463,8 +582,14 @@ func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
// a client sends a HEADERS frame on stream 7 without ever sending a
// frame on stream 5, then stream 5 transitions to the "closed"
// state when the first frame for stream 7 is sent or received."
- if streamID <= sc.maxStreamID {
- return stateClosed, nil
+ if streamID%2 == 1 {
+ if streamID <= sc.maxClientStreamID {
+ return stateClosed, nil
+ }
+ } else {
+ if streamID <= sc.maxPushPromiseID {
+ return stateClosed, nil
+ }
}
return stateIdle, nil
}
@@ -540,7 +665,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
if err == nil {
return
}
- if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout {
// Boring, expected errors.
sc.vlogf(format, args...)
} else {
@@ -550,6 +675,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
+ buildCommonHeaderMapsOnce()
cv, ok := commonCanonHeader[v]
if ok {
return cv
@@ -603,17 +729,17 @@ func (sc *serverConn) readFrames() {
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
type frameWriteResult struct {
- wm frameWriteMsg // what was written (or attempted)
- err error // result of the writeFrame call
+ wr FrameWriteRequest // what was written (or attempted)
+ err error // result of the writeFrame call
}
// writeFrameAsync runs in its own goroutine and writes a single frame
// and then reports when it's done.
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
-func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
- err := wm.write.writeFrame(sc)
- sc.wroteFrameCh <- frameWriteResult{wm, err}
+func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wr, err}
}
func (sc *serverConn) closeAllStreamsOnConnClose() {
@@ -657,40 +783,53 @@ func (sc *serverConn) serve() {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
- sc.writeFrame(frameWriteMsg{
+ sc.writeFrame(FrameWriteRequest{
write: writeSettings{
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
-
- // TODO: more actual settings, notably
- // SettingInitialWindowSize, but then we also
- // want to bump up the conn window size the
- // same amount here right after the settings
+ {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
sc.unackedSettings++
+ // Each connection starts with intialWindowSize inflow tokens.
+ // If a higher value is configured, we add more tokens.
+ if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
+ sc.sendWindowUpdate(nil, int(diff))
+ }
+
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
// Now that we've got the preface, get us out of the
- // "StateNew" state. We can't go directly to idle, though.
+ // "StateNew" state. We can't go directly to idle, though.
// Active means we read some data and anticipate a request. We'll
// do another Active when we get a HEADERS frame.
sc.setConnState(http.StateActive)
sc.setConnState(http.StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ defer sc.idleTimer.Stop()
+ }
+
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := time.NewTimer(firstSettingsTimeout)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ defer settingsTimer.Stop()
+
loopNum := 0
for {
loopNum++
select {
- case wm := <-sc.wantWriteFrameCh:
- sc.writeFrame(wm)
+ case wr := <-sc.wantWriteFrameCh:
+ if se, ok := wr.write.(StreamError); ok {
+ sc.resetStream(se)
+ break
+ }
+ sc.writeFrame(wr)
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
@@ -698,26 +837,85 @@ func (sc *serverConn) serve() {
return
}
res.readMore()
- if settingsTimer.C != nil {
+ if settingsTimer != nil {
settingsTimer.Stop()
- settingsTimer.C = nil
+ settingsTimer = nil
}
case m := <-sc.bodyReadCh:
sc.noteBodyRead(m.st, m.n)
- case <-settingsTimer.C:
- sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
- return
- case <-sc.shutdownTimerCh:
- sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
- return
- case fn := <-sc.testHookCh:
- fn(loopNum)
+ case msg := <-sc.serveMsgCh:
+ switch v := msg.(type) {
+ case func(int):
+ v(loopNum) // for testing
+ case *serverMessage:
+ switch v {
+ case settingsTimerMsg:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case idleTimerMsg:
+ sc.vlogf("connection is idle")
+ sc.goAway(ErrCodeNo)
+ case shutdownTimerMsg:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case gracefulShutdownMsg:
+ sc.startGracefulShutdownInternal()
+ default:
+ panic("unknown timer")
+ }
+ case *startPushRequest:
+ sc.startPush(v)
+ default:
+ panic(fmt.Sprintf("unexpected type %T", v))
+ }
+ }
+
+ // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
+ // with no error code (graceful shutdown), don't start the timer until
+ // all open streams have been completed.
+ sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame
+ gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0
+ if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) {
+ sc.shutDownIn(goAwayTimeout)
}
}
}
-// readPreface reads the ClientPreface greeting from the peer
-// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) {
+ select {
+ case <-sc.doneServing:
+ case <-sharedCh:
+ close(privateCh)
+ }
+}
+
+type serverMessage int
+
+// Message values sent to serveMsgCh.
+var (
+ settingsTimerMsg = new(serverMessage)
+ idleTimerMsg = new(serverMessage)
+ shutdownTimerMsg = new(serverMessage)
+ gracefulShutdownMsg = new(serverMessage)
+)
+
+func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
+func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
+func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
+
+func (sc *serverConn) sendServeMsg(msg interface{}) {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.serveMsgCh <- msg:
+ case <-sc.doneServing:
+ }
+}
+
+var errPrefaceTimeout = errors.New("timeout waiting for client preface")
+
+// readPreface reads the ClientPreface greeting from the peer or
+// returns errPrefaceTimeout on timeout, or an error if the greeting
+// is invalid.
func (sc *serverConn) readPreface() error {
errc := make(chan error, 1)
go func() {
@@ -735,7 +933,7 @@ func (sc *serverConn) readPreface() error {
defer timer.Stop()
select {
case <-timer.C:
- return errors.New("timeout waiting for client preface")
+ return errPrefaceTimeout
case err := <-errc:
if err == nil {
if VerboseLogs {
@@ -760,7 +958,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
ch := errChanPool.Get().(chan error)
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
- err := sc.writeFrameFromHandler(frameWriteMsg{
+ err := sc.writeFrameFromHandler(FrameWriteRequest{
write: writeArg,
stream: stream,
done: ch,
@@ -796,17 +994,17 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return err
}
-// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts
// if the connection has gone away.
//
// This must not be run from the serve goroutine itself, else it might
// deadlock writing to sc.wantWriteFrameCh (which is only mildly
// buffered and is read by serve itself). If you're on the serve
// goroutine, call writeFrame instead.
-func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error {
sc.serveG.checkNotOn() // NOT
select {
- case sc.wantWriteFrameCh <- wm:
+ case sc.wantWriteFrameCh <- wr:
return nil
case <-sc.doneServing:
// Serve loop is gone.
@@ -823,60 +1021,108 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
// make it onto the wire
//
// If you're not on the serve goroutine, use writeFrameFromHandler instead.
-func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
sc.serveG.check()
+ // If true, wr will not be written and wr.done will not be signaled.
var ignoreWrite bool
+ // We are not allowed to write frames on closed streams. RFC 7540 Section
+ // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on
+ // a closed stream." Our server never sends PRIORITY, so that exception
+ // does not apply.
+ //
+ // The serverConn might close an open stream while the stream's handler
+ // is still running. For example, the server might close a stream when it
+ // receives bad data from the client. If this happens, the handler might
+ // attempt to write a frame after the stream has been closed (since the
+ // handler hasn't yet been notified of the close). In this case, we simply
+ // ignore the frame. The handler will notice that the stream is closed when
+ // it waits for the frame to be written.
+ //
+ // As an exception to this rule, we allow sending RST_STREAM after close.
+ // This allows us to immediately reject new streams without tracking any
+ // state for those streams (except for the queued RST_STREAM frame). This
+ // may result in duplicate RST_STREAMs in some cases, but the client should
+ // ignore those.
+ if wr.StreamID() != 0 {
+ _, isReset := wr.write.(StreamError)
+ if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset {
+ ignoreWrite = true
+ }
+ }
+
// Don't send a 100-continue response if we've already sent headers.
// See golang.org/issue/14030.
- switch wm.write.(type) {
+ switch wr.write.(type) {
case *writeResHeaders:
- wm.stream.wroteHeaders = true
+ wr.stream.wroteHeaders = true
case write100ContinueHeadersFrame:
- if wm.stream.wroteHeaders {
+ if wr.stream.wroteHeaders {
+ // We do not need to notify wr.done because this frame is
+ // never written with wr.done != nil.
+ if wr.done != nil {
+ panic("wr.done != nil for write100ContinueHeadersFrame")
+ }
ignoreWrite = true
}
}
if !ignoreWrite {
- sc.writeSched.add(wm)
+ sc.writeSched.Push(wr)
}
sc.scheduleFrameWrite()
}
-// startFrameWrite starts a goroutine to write wm (in a separate
+// startFrameWrite starts a goroutine to write wr (in a separate
// goroutine since that might block on the network), and updates the
-// serve goroutine's state about the world, updated from info in wm.
-func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+// serve goroutine's state about the world, updated from info in wr.
+func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
sc.serveG.check()
if sc.writingFrame {
panic("internal error: can only be writing one frame at a time")
}
- st := wm.stream
+ st := wr.stream
if st != nil {
switch st.state {
case stateHalfClosedLocal:
- panic("internal error: attempt to send frame on half-closed-local stream")
- case stateClosed:
- if st.sentReset || st.gotReset {
- // Skip this frame.
- sc.scheduleFrameWrite()
- return
+ switch wr.write.(type) {
+ case StreamError, handlerPanicRST, writeWindowUpdate:
+ // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE
+ // in this state. (We never send PRIORITY from the server, so that is not checked.)
+ default:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr))
}
- panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ case stateClosed:
+ panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr))
+ }
+ }
+ if wpp, ok := wr.write.(*writePushPromise); ok {
+ var err error
+ wpp.promisedID, err = wpp.allocatePromisedID()
+ if err != nil {
+ sc.writingFrameAsync = false
+ wr.replyToWriter(err)
+ return
}
}
sc.writingFrame = true
sc.needsFrameFlush = true
- go sc.writeFrameAsync(wm)
+ if wr.write.staysWithinBuffer(sc.bw.Available()) {
+ sc.writingFrameAsync = false
+ err := wr.write.writeFrame(sc)
+ sc.wroteFrame(frameWriteResult{wr, err})
+ } else {
+ sc.writingFrameAsync = true
+ go sc.writeFrameAsync(wr)
+ }
}
// errHandlerPanicked is the error given to any callers blocked in a read from
// Request.Body when the main goroutine panics. Since most handlers read in the
-// the main ServeHTTP goroutine, this will show up rarely.
+// main ServeHTTP goroutine, this will show up rarely.
var errHandlerPanicked = errors.New("http2: handler panicked")
// wroteFrame is called on the serve goroutine with the result of
@@ -887,27 +1133,12 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
panic("internal error: expected to be already writing a frame")
}
sc.writingFrame = false
+ sc.writingFrameAsync = false
- wm := res.wm
- st := wm.stream
+ wr := res.wr
- closeStream := endsStream(wm.write)
-
- if _, ok := wm.write.(handlerPanicRST); ok {
- sc.closeStream(st, errHandlerPanicked)
- }
-
- // Reply (if requested) to the blocked ServeHTTP goroutine.
- if ch := wm.done; ch != nil {
- select {
- case ch <- res.err:
- default:
- panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
- }
- }
- wm.write = nil // prevent use (assume it's tainted after wm.done send)
-
- if closeStream {
+ if writeEndsStream(wr.write) {
+ st := wr.stream
if st == nil {
panic("internal error: expecting non-nil stream")
}
@@ -916,19 +1147,37 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// Here we would go to stateHalfClosedLocal in
// theory, but since our handler is done and
// the net/http package provides no mechanism
- // for finishing writing to a ResponseWriter
- // while still reading data (see possible TODO
- // at top of this file), we go into closed
- // state here anyway, after telling the peer
- // we're hanging up on them.
- st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
- errCancel := streamError(st.id, ErrCodeCancel)
- sc.resetStream(errCancel)
+ // for closing a ResponseWriter while still
+ // reading data (see possible TODO at top of
+ // this file), we go into closed state here
+ // anyway, after telling the peer we're
+ // hanging up on them. We'll transition to
+ // stateClosed after the RST_STREAM frame is
+ // written.
+ st.state = stateHalfClosedLocal
+ // Section 8.1: a server MAY request that the client abort
+ // transmission of a request without error by sending a
+ // RST_STREAM with an error code of NO_ERROR after sending
+ // a complete response.
+ sc.resetStream(streamError(st.id, ErrCodeNo))
case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete)
}
+ } else {
+ switch v := wr.write.(type) {
+ case StreamError:
+ // st may be unknown if the RST_STREAM was generated to reject bad input.
+ if st, ok := sc.streams[v.StreamID]; ok {
+ sc.closeStream(st, v)
+ }
+ case handlerPanicRST:
+ sc.closeStream(wr.stream, errHandlerPanicked)
+ }
}
+ // Reply (if requested) to unblock the ServeHTTP goroutine.
+ wr.replyToWriter(res.err)
+
sc.scheduleFrameWrite()
}
@@ -946,35 +1195,72 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// flush the write buffer.
func (sc *serverConn) scheduleFrameWrite() {
sc.serveG.check()
- if sc.writingFrame {
+ if sc.writingFrame || sc.inFrameScheduleLoop {
return
}
- if sc.needToSendGoAway {
- sc.needToSendGoAway = false
- sc.startFrameWrite(frameWriteMsg{
- write: &writeGoAway{
- maxStreamID: sc.maxStreamID,
- code: sc.goAwayCode,
- },
- })
- return
- }
- if sc.needToSendSettingsAck {
- sc.needToSendSettingsAck = false
- sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
- return
- }
- if !sc.inGoAway {
- if wm, ok := sc.writeSched.take(); ok {
- sc.startFrameWrite(wm)
- return
+ sc.inFrameScheduleLoop = true
+ for !sc.writingFrameAsync {
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(FrameWriteRequest{
+ write: &writeGoAway{
+ maxStreamID: sc.maxClientStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ continue
}
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}})
+ continue
+ }
+ if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
+ if wr, ok := sc.writeSched.Pop(); ok {
+ sc.startFrameWrite(wr)
+ continue
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ continue
+ }
+ break
}
- if sc.needsFrameFlush {
- sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
- sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
- return
- }
+ sc.inFrameScheduleLoop = false
+}
+
+// startGracefulShutdown gracefully shuts down a connection. This
+// sends GOAWAY with ErrCodeNo to tell the client we're gracefully
+// shutting down. The connection isn't closed until all current
+// streams are done.
+//
+// startGracefulShutdown returns immediately; it does not wait until
+// the connection has shut down.
+func (sc *serverConn) startGracefulShutdown() {
+ sc.serveG.checkNotOn() // NOT
+ sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) })
+}
+
+// After sending GOAWAY, the connection will close after goAwayTimeout.
+// If we close the connection immediately after sending GOAWAY, there may
+// be unsent data in our kernel receive buffer, which will cause the kernel
+// to send a TCP RST on close() instead of a FIN. This RST will abort the
+// connection immediately, whether or not the client had received the GOAWAY.
+//
+// Ideally we should delay for at least 1 RTT + epsilon so the client has
+// a chance to read the GOAWAY and stop sending messages. Measuring RTT
+// is hard, so we approximate with 1 second. See golang.org/issue/18701.
+//
+// This is a var so it can be shorter in tests, where all requests uses the
+// loopback interface making the expected RTT very small.
+//
+// TODO: configurable?
+var goAwayTimeout = 1 * time.Second
+
+func (sc *serverConn) startGracefulShutdownInternal() {
+ sc.goAway(ErrCodeNo)
}
func (sc *serverConn) goAway(code ErrCode) {
@@ -982,12 +1268,6 @@ func (sc *serverConn) goAway(code ErrCode) {
if sc.inGoAway {
return
}
- if code != ErrCodeNo {
- sc.shutDownIn(250 * time.Millisecond)
- } else {
- // TODO: configurable
- sc.shutDownIn(1 * time.Second)
- }
sc.inGoAway = true
sc.needToSendGoAway = true
sc.goAwayCode = code
@@ -996,16 +1276,14 @@ func (sc *serverConn) goAway(code ErrCode) {
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = time.NewTimer(d)
- sc.shutdownTimerCh = sc.shutdownTimer.C
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
sc.serveG.check()
- sc.writeFrame(frameWriteMsg{write: se})
+ sc.writeFrame(FrameWriteRequest{write: se})
if st, ok := sc.streams[se.StreamID]; ok {
- st.sentReset = true
- sc.closeStream(st, se)
+ st.resetQueued = true
}
}
@@ -1090,6 +1368,8 @@ func (sc *serverConn) processFrame(f Frame) error {
return sc.processResetStream(f)
case *PriorityFrame:
return sc.processPriority(f)
+ case *GoAwayFrame:
+ return sc.processGoAway(f)
case *PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
@@ -1115,7 +1395,10 @@ func (sc *serverConn) processPing(f *PingFrame) error {
// PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
}
- sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
+ return nil
+ }
+ sc.writeFrame(FrameWriteRequest{write: writePingAck{f}})
return nil
}
@@ -1123,7 +1406,14 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
sc.serveG.check()
switch {
case f.StreamID != 0: // stream-level flow control
- st := sc.streams[f.StreamID]
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
// frame bearing the END_STREAM flag. This means that a
@@ -1157,7 +1447,6 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
return ConnectionError(ErrCodeProtocol)
}
if st != nil {
- st.gotReset = true
st.cancelCtx()
sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
}
@@ -1170,11 +1459,24 @@ func (sc *serverConn) closeStream(st *stream, err error) {
panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
}
st.state = stateClosed
- sc.curOpenStreams--
- if sc.curOpenStreams == 0 {
- sc.setConnState(http.StateIdle)
+ if st.writeDeadline != nil {
+ st.writeDeadline.Stop()
+ }
+ if st.isPushed() {
+ sc.curPushedStreams--
+ } else {
+ sc.curClientStreams--
}
delete(sc.streams, st.id)
+ if len(sc.streams) == 0 {
+ sc.setConnState(http.StateIdle)
+ if sc.srv.IdleTimeout != 0 {
+ sc.idleTimer.Reset(sc.srv.IdleTimeout)
+ }
+ if h1ServerKeepAlivesDisabled(sc.hs) {
+ sc.startGracefulShutdownInternal()
+ }
+ }
if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
@@ -1183,19 +1485,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
p.CloseWithError(err)
}
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
- sc.writeSched.forgetStream(st.id)
- if st.reqBuf != nil {
- // Stash this request body buffer (64k) away for reuse
- // by a future POST/PUT/etc.
- //
- // TODO(bradfitz): share on the server? sync.Pool?
- // Server requires locks and might hurt contention.
- // sync.Pool might work, or might be worse, depending
- // on goroutine CPU migrations. (get and put on
- // separate CPUs). Maybe a mix of strategies. But
- // this is an easy win for now.
- sc.freeRequestBodyBuf = st.reqBuf
- }
+ sc.writeSched.CloseStream(st.id)
}
func (sc *serverConn) processSettings(f *SettingsFrame) error {
@@ -1210,6 +1500,12 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
}
return nil
}
+ if f.NumSettings() > 100 || f.HasDuplicates() {
+ // This isn't actually in the spec, but hang up on
+ // suspiciously large settings frames or those with
+ // duplicate entries.
+ return ConnectionError(ErrCodeProtocol)
+ }
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
}
@@ -1237,7 +1533,7 @@ func (sc *serverConn) processSetting(s Setting) error {
case SettingInitialWindowSize:
return sc.processSettingInitialWindowSize(s.Val)
case SettingMaxFrameSize:
- sc.writeSched.maxFrameSize = s.Val
+ sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
default:
@@ -1262,9 +1558,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// adjust the size of all stream flow control windows that it
// maintains by the difference between the new value and the
// old value."
- old := sc.initialWindowSize
- sc.initialWindowSize = int32(val)
- growth := sc.initialWindowSize - old // may be negative
+ old := sc.initialStreamSendWindowSize
+ sc.initialStreamSendWindowSize = int32(val)
+ growth := int32(val) - old // may be negative
for _, st := range sc.streams {
if !st.flow.add(growth) {
// 6.9.2 Initial Flow Control Window Size
@@ -1281,14 +1577,24 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check()
+ if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
+ return nil
+ }
data := f.Data()
// "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
id := f.Header().StreamID
- st, ok := sc.streams[id]
- if !ok || st.state != stateOpen || st.gotTrailerHeader {
+ state, st := sc.state(id)
+ if id == 0 || state == stateIdle {
+ // Section 5.1: "Receiving any frame other than HEADERS
+ // or PRIORITY on a stream in this state MUST be
+ // treated as a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
// the http.Handler returned, so it's done reading &
@@ -1308,6 +1614,10 @@ func (sc *serverConn) processData(f *DataFrame) error {
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+ if st != nil && st.resetQueued {
+ // Already have a stream error in flight. Don't send another.
+ return nil
+ }
return streamError(id, ErrCodeStreamClosed)
}
if st.body == nil {
@@ -1317,7 +1627,10 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
- return streamError(id, ErrCodeStreamClosed)
+ // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
+ // value of a content-length header field does not equal the sum of the
+ // DATA frame payload lengths that form the body.
+ return streamError(id, ErrCodeProtocol)
}
if f.Length > 0 {
// Check whether the client has flow control quota.
@@ -1350,6 +1663,25 @@ func (sc *serverConn) processData(f *DataFrame) error {
return nil
}
+func (sc *serverConn) processGoAway(f *GoAwayFrame) error {
+ sc.serveG.check()
+ if f.ErrCode != ErrCodeNo {
+ sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ } else {
+ sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f)
+ }
+ sc.startGracefulShutdownInternal()
+ // http://tools.ietf.org/html/rfc7540#section-6.8
+ // We should not create any new streams, which means we should disable push.
+ sc.pushEnabled = false
+ return nil
+}
+
+// isPushed reports whether the stream is server-initiated.
+func (st *stream) isPushed() bool {
+ return st.id%2 == 0
+}
+
// endStream closes a Request.Body's pipe. It is called when a DATA
// frame says a request body is over (or after trailers).
func (st *stream) endStream() {
@@ -1377,14 +1709,20 @@ func (st *stream) copyTrailersToHandlerRequest() {
}
}
+// onWriteTimeout is run on its own goroutine (from time.AfterFunc)
+// when the stream's WriteTimeout has fired.
+func (st *stream) onWriteTimeout() {
+ st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)})
+}
+
func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check()
- id := f.Header().StreamID
+ id := f.StreamID
if sc.inGoAway {
// Ignore.
return nil
}
- // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1
// Streams initiated by a client MUST use odd-numbered stream
// identifiers. [...] An endpoint that receives an unexpected
// stream identifier MUST respond with a connection error
@@ -1396,8 +1734,19 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// send a trailer for an open one. If we already have a stream
// open, let it process its own HEADERS frame (trailers at this
// point, if it's valid).
- st := sc.streams[f.Header().StreamID]
- if st != nil {
+ if st := sc.streams[f.StreamID]; st != nil {
+ if st.resetQueued {
+ // We're sending RST_STREAM to close the stream, so don't bother
+ // processing this frame.
+ return nil
+ }
+ // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
+ // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
+ // this state, it MUST respond with a stream error (Section 5.4.2) of
+ // type STREAM_CLOSED.
+ if st.state == stateHalfClosedRemote {
+ return streamError(id, ErrCodeStreamClosed)
+ }
return st.processTrailerHeaders(f)
}
@@ -1406,54 +1755,45 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// endpoint has opened or reserved. [...] An endpoint that
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
- if id <= sc.maxStreamID {
+ if id <= sc.maxClientStreamID {
return ConnectionError(ErrCodeProtocol)
}
- sc.maxStreamID = id
+ sc.maxClientStreamID = id
- ctx, cancelCtx := contextWithCancel(sc.baseCtx)
- st = &stream{
- sc: sc,
- id: id,
- state: stateOpen,
- ctx: ctx,
- cancelCtx: cancelCtx,
+ if sc.idleTimer != nil {
+ sc.idleTimer.Stop()
}
- if f.StreamEnded() {
- st.state = stateHalfClosedRemote
- }
- st.cw.Init()
- st.flow.conn = &sc.flow // link to conn-level counter
- st.flow.add(sc.initialWindowSize)
- st.inflow.conn = &sc.inflow // link to conn-level counter
- st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
-
- sc.streams[id] = st
- if f.HasPriority() {
- adjustStreamPriority(sc.streams, st.id, f.Priority)
- }
- sc.curOpenStreams++
- if sc.curOpenStreams == 1 {
- sc.setConnState(http.StateActive)
- }
- if sc.curOpenStreams > sc.advMaxStreams {
- // "Endpoints MUST NOT exceed the limit set by their
- // peer. An endpoint that receives a HEADERS frame
- // that causes their advertised concurrent stream
- // limit to be exceeded MUST treat this as a stream
- // error (Section 5.4.2) of type PROTOCOL_ERROR or
- // REFUSED_STREAM."
+ // http://tools.ietf.org/html/rfc7540#section-5.1.2
+ // [...] Endpoints MUST NOT exceed the limit set by their peer. An
+ // endpoint that receives a HEADERS frame that causes their
+ // advertised concurrent stream limit to be exceeded MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR
+ // or REFUSED_STREAM.
+ if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
- return streamError(st.id, ErrCodeProtocol)
+ return streamError(id, ErrCodeProtocol)
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
- return streamError(st.id, ErrCodeRefusedStream)
+ return streamError(id, ErrCodeRefusedStream)
+ }
+
+ initialState := stateOpen
+ if f.StreamEnded() {
+ initialState = stateHalfClosedRemote
+ }
+ st := sc.newStream(id, 0, initialState)
+
+ if f.HasPriority() {
+ if err := checkPriority(f.StreamID, f.Priority); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(st.id, f.Priority)
}
rw, req, err := sc.newWriterAndRequest(st, f)
@@ -1471,10 +1811,21 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
if f.Truncated {
// Their header list was too long. Send a 431 error.
handler = handleHeaderListTooLong
- } else if err := checkValidHTTP2Request(req); err != nil {
+ } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil {
handler = new400Handler(err)
}
+ // The net/http package sets the read deadline from the
+ // http.Server.ReadTimeout during the TLS handshake, but then
+ // passes the connection off to us with the deadline already
+ // set. Disarm it here after the request headers are read,
+ // similar to how the http1 server works. Here it's
+ // technically more like the http1 Server's ReadHeaderTimeout
+ // (in Go 1.8), though. That's a more sane option anyway.
+ if sc.hs.ReadTimeout != 0 {
+ sc.conn.SetReadDeadline(time.Time{})
+ }
+
go sc.runHandler(rw, req, handler)
return nil
}
@@ -1496,7 +1847,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
if st.trailer != nil {
for _, hf := range f.RegularFields() {
key := sc.canonicalHeader(hf.Name)
- if !ValidTrailerHeader(key) {
+ if !httpguts.ValidTrailerHeader(key) {
// TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with
// HTTP folk.
@@ -1509,62 +1860,81 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
return nil
}
-func (sc *serverConn) processPriority(f *PriorityFrame) error {
- adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+func checkPriority(streamID uint32, p PriorityParam) error {
+ if streamID == p.StreamDep {
+ // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
+ // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
+ // Section 5.3.3 says that a stream can depend on one of its dependencies,
+ // so it's only self-dependencies that are forbidden.
+ return streamError(streamID, ErrCodeProtocol)
+ }
return nil
}
-func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
- st, ok := streams[streamID]
- if !ok {
- // TODO: not quite correct (this streamID might
- // already exist in the dep tree, but be closed), but
- // close enough for now.
- return
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ if sc.inGoAway {
+ return nil
}
- st.weight = priority.Weight
- parent := streams[priority.StreamDep] // might be nil
- if parent == st {
- // if client tries to set this stream to be the parent of itself
- // ignore and keep going
- return
+ if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
+ return err
+ }
+ sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream {
+ sc.serveG.check()
+ if id == 0 {
+ panic("internal error: cannot create stream with id 0")
}
- // section 5.3.3: If a stream is made dependent on one of its
- // own dependencies, the formerly dependent stream is first
- // moved to be dependent on the reprioritized stream's previous
- // parent. The moved dependency retains its weight.
- for piter := parent; piter != nil; piter = piter.parent {
- if piter == st {
- parent.parent = st.parent
- break
- }
+ ctx, cancelCtx := context.WithCancel(sc.baseCtx)
+ st := &stream{
+ sc: sc,
+ id: id,
+ state: state,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
}
- st.parent = parent
- if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
- for _, openStream := range streams {
- if openStream != st && openStream.parent == st.parent {
- openStream.parent = st
- }
- }
+ st.cw.Init()
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialStreamSendWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(sc.srv.initialStreamRecvWindowSize())
+ if sc.hs.WriteTimeout != 0 {
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
+
+ sc.streams[id] = st
+ sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID})
+ if st.isPushed() {
+ sc.curPushedStreams++
+ } else {
+ sc.curClientStreams++
+ }
+ if sc.curOpenStreams() == 1 {
+ sc.setConnState(http.StateActive)
+ }
+
+ return st
}
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
- method := f.PseudoValue("method")
- path := f.PseudoValue("path")
- scheme := f.PseudoValue("scheme")
- authority := f.PseudoValue("authority")
+ rp := requestParam{
+ method: f.PseudoValue("method"),
+ scheme: f.PseudoValue("scheme"),
+ authority: f.PseudoValue("authority"),
+ path: f.PseudoValue("path"),
+ }
- isConnect := method == "CONNECT"
+ isConnect := rp.method == "CONNECT"
if isConnect {
- if path != "" || scheme != "" || authority == "" {
+ if rp.path != "" || rp.scheme != "" || rp.authority == "" {
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
- } else if method == "" || path == "" ||
- (scheme != "https" && scheme != "http") {
+ } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@@ -1579,36 +1949,62 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
bodyOpen := !f.StreamEnded()
- if method == "HEAD" && bodyOpen {
+ if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
- var tlsState *tls.ConnectionState // nil if not scheme https
- if scheme == "https" {
+ rp.header = make(http.Header)
+ for _, hf := range f.RegularFields() {
+ rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+ if rp.authority == "" {
+ rp.authority = rp.header.Get("Host")
+ }
+
+ rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
+ if err != nil {
+ return nil, nil, err
+ }
+ if bodyOpen {
+ if vv, ok := rp.header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ req.Body.(*requestBody).pipe = &pipe{
+ b: &dataBuffer{expected: req.ContentLength},
+ }
+ }
+ return rw, req, nil
+}
+
+type requestParam struct {
+ method string
+ scheme, authority, path string
+ header http.Header
+}
+
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ var tlsState *tls.ConnectionState // nil if not scheme https
+ if rp.scheme == "https" {
tlsState = sc.tlsState
}
- header := make(http.Header)
- for _, hf := range f.RegularFields() {
- header.Add(sc.canonicalHeader(hf.Name), hf.Value)
- }
-
- if authority == "" {
- authority = header.Get("Host")
- }
- needsContinue := header.Get("Expect") == "100-continue"
+ needsContinue := rp.header.Get("Expect") == "100-continue"
if needsContinue {
- header.Del("Expect")
+ rp.header.Del("Expect")
}
// Merge Cookie headers into one "; "-delimited value.
- if cookies := header["Cookie"]; len(cookies) > 1 {
- header.Set("Cookie", strings.Join(cookies, "; "))
+ if cookies := rp.header["Cookie"]; len(cookies) > 1 {
+ rp.header.Set("Cookie", strings.Join(cookies, "; "))
}
// Setup Trailers
var trailer http.Header
- for _, v := range header["Trailer"] {
+ for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
key = http.CanonicalHeaderKey(strings.TrimSpace(key))
switch key {
@@ -1623,57 +2019,42 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
}
}
- delete(header, "Trailer")
+ delete(rp.header, "Trailer")
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.method == "CONNECT" {
+ url_ = &url.URL{Host: rp.authority}
+ requestURI = rp.authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.path)
+ if err != nil {
+ return nil, nil, streamError(st.id, ErrCodeProtocol)
+ }
+ requestURI = rp.path
+ }
body := &requestBody{
conn: sc,
stream: st,
needsContinue: needsContinue,
}
- var url_ *url.URL
- var requestURI string
- if isConnect {
- url_ = &url.URL{Host: authority}
- requestURI = authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(path)
- if err != nil {
- return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
- }
- requestURI = path
- }
req := &http.Request{
- Method: method,
+ Method: rp.method,
URL: url_,
RemoteAddr: sc.remoteAddrStr,
- Header: header,
+ Header: rp.header,
RequestURI: requestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
- Host: authority,
+ Host: rp.authority,
Body: body,
Trailer: trailer,
}
- req = requestWithContext(req, st.ctx)
- if bodyOpen {
- // Disabled, per golang.org/issue/14960:
- // st.reqBuf = sc.getRequestBodyBuf()
- // TODO: remove this 64k of garbage per request (again, but without a data race):
- buf := make([]byte, initialWindowSize)
-
- body.pipe = &pipe{
- b: &fixedBuffer{buf: buf},
- }
-
- if vv, ok := header["Content-Length"]; ok {
- req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
- } else {
- req.ContentLength = -1
- }
- }
+ req = req.WithContext(st.ctx)
rws := responseWriterStatePool.Get().(*responseWriterState)
bwSave := rws.bw
@@ -1689,15 +2070,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return rw, req, nil
}
-func (sc *serverConn) getRequestBodyBuf() []byte {
- sc.serveG.check()
- if buf := sc.freeRequestBodyBuf; buf != nil {
- sc.freeRequestBodyBuf = nil
- return buf
- }
- return make([]byte, initialWindowSize)
-}
-
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
didPanic := true
@@ -1705,15 +2077,17 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
rw.rws.stream.cancelCtx()
if didPanic {
e := recover()
- // Same as net/http:
- const size = 64 << 10
- buf := make([]byte, size)
- buf = buf[:runtime.Stack(buf, false)]
- sc.writeFrameFromHandler(frameWriteMsg{
+ sc.writeFrameFromHandler(FrameWriteRequest{
write: handlerPanicRST{rw.rws.stream.id},
stream: rw.rws.stream,
})
- sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ // Same as net/http:
+ if e != nil && e != http.ErrAbortHandler {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ }
return
}
rw.handlerDone()
@@ -1744,7 +2118,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// mutates it.
errc = errChanPool.Get().(chan error)
}
- if err := sc.writeFrameFromHandler(frameWriteMsg{
+ if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
stream: st,
done: errc,
@@ -1767,7 +2141,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// called from handler goroutines.
func (sc *serverConn) write100ContinueHeaders(st *stream) {
- sc.writeFrameFromHandler(frameWriteMsg{
+ sc.writeFrameFromHandler(FrameWriteRequest{
write: write100ContinueHeadersFrame{st.id},
stream: st,
})
@@ -1783,11 +2157,13 @@ type bodyReadMsg struct {
// called from handler goroutines.
// Notes that the handler for the given stream ID read n bytes of its body
// and schedules flow control tokens to be sent.
-func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
sc.serveG.checkNotOn() // NOT on
- select {
- case sc.bodyReadCh <- bodyReadMsg{st, n}:
- case <-sc.doneServing:
+ if n > 0 {
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
}
}
@@ -1830,7 +2206,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
if st != nil {
streamID = st.id
}
- sc.writeFrame(frameWriteMsg{
+ sc.writeFrame(FrameWriteRequest{
write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
stream: st,
})
@@ -1845,16 +2221,19 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
}
}
+// requestBody is the Handler's Request.Body type.
+// Read and Close may be called concurrently.
type requestBody struct {
stream *stream
conn *serverConn
- closed bool
+ closed bool // for use by Close only
+ sawEOF bool // for use by Read only
pipe *pipe // non-nil if we have a HTTP entity message body
needsContinue bool // need to send a 100-continue
}
func (b *requestBody) Close() error {
- if b.pipe != nil {
+ if b.pipe != nil && !b.closed {
b.pipe.BreakWithError(errClosedBody)
}
b.closed = true
@@ -1866,18 +2245,22 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
b.needsContinue = false
b.conn.write100ContinueHeaders(b.stream)
}
- if b.pipe == nil {
+ if b.pipe == nil || b.sawEOF {
return 0, io.EOF
}
n, err = b.pipe.Read(p)
- if n > 0 {
- b.conn.noteBodyReadFromHandler(b.stream, n)
+ if err == io.EOF {
+ b.sawEOF = true
}
+ if b.conn == nil && inTests {
+ return
+ }
+ b.conn.noteBodyReadFromHandler(b.stream, n, err)
return
}
-// responseWriter is the http.ResponseWriter implementation. It's
-// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
// responseWriterState pointer inside is zeroed at the end of a
// request (in handlerDone) and calls on the responseWriter thereafter
// simply crash (caller's mistake), but the much larger responseWriterState
@@ -1911,6 +2294,7 @@ type responseWriterState struct {
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
+ dirty bool // a Write failed; don't reuse this responseWriterState
sentContentLen int64 // non-zero if handler set a Content-Length header
wroteBytes int64
@@ -1923,15 +2307,24 @@ type chunkWriter struct{ rws *responseWriterState }
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
-func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 }
+
+func (rws *responseWriterState) hasNonemptyTrailers() bool {
+ for _, trailer := range rws.trailers {
+ if _, ok := rws.handlerHeader[trailer]; ok {
+ return true
+ }
+ }
+ return false
+}
// declareTrailer is called for each Trailer header when the
// response header is written. It notes that a header will need to be
// written in the trailers at the end of the response.
func (rws *responseWriterState) declareTrailer(k string) {
k = http.CanonicalHeaderKey(k)
- if !ValidTrailerHeader(k) {
- // Forbidden by RFC 2616 14.40.
+ if !httpguts.ValidTrailerHeader(k) {
+ // Forbidden by RFC 7230, section 4.1.2.
rws.conn.logf("ignoring invalid trailer %q", k)
return
}
@@ -1968,7 +2361,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
clen = strconv.Itoa(len(p))
}
_, hasContentType := rws.snapHeader["Content-Type"]
- if !hasContentType && bodyAllowedForStatus(rws.status) {
+ if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 {
ctype = http.DetectContentType(p)
}
var date string
@@ -1981,6 +2374,19 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
foreachHeaderElement(v, rws.declareTrailer)
}
+ // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
+ // but respect "Connection" == "close" to mean sending a GOAWAY and tearing
+ // down the TCP connection when idle, like we do for HTTP/1.
+ // TODO: remove more Connection-specific header fields here, in addition
+ // to "Connection".
+ if _, ok := rws.snapHeader["Connection"]; ok {
+ v := rws.snapHeader.Get("Connection")
+ delete(rws.snapHeader, "Connection")
+ if v == "close" {
+ rws.conn.startGracefulShutdown()
+ }
+ }
+
endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
@@ -1992,6 +2398,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
date: date,
})
if err != nil {
+ rws.dirty = true
return 0, err
}
if endStream {
@@ -2009,21 +2416,28 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
rws.promoteUndeclaredTrailers()
}
- endStream := rws.handlerDone && !rws.hasTrailers()
+ // only send trailers if they have actually been defined by the
+ // server handler.
+ hasNonemptyTrailers := rws.hasNonemptyTrailers()
+ endStream := rws.handlerDone && !hasNonemptyTrailers
if len(p) > 0 || endStream {
// only send a 0 byte DATA frame if we're ending the stream.
if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ rws.dirty = true
return 0, err
}
}
- if rws.handlerDone && rws.hasTrailers() {
+ if rws.handlerDone && hasNonemptyTrailers {
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
h: rws.handlerHeader,
trailers: rws.trailers,
endStream: true,
})
+ if err != nil {
+ rws.dirty = true
+ }
return len(p), err
}
return len(p), nil
@@ -2047,11 +2461,11 @@ const TrailerPrefix = "Trailer:"
// after the header has already been flushed. Because the Go
// ResponseWriter interface has no way to set Trailers (only the
// Header), and because we didn't want to expand the ResponseWriter
-// interface, and because nobody used trailers, and because RFC 2616
+// interface, and because nobody used trailers, and because RFC 7230
// says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header()
-// map to mean both Headers and Trailers. When it's time to write the
+// map to mean both Headers and Trailers. When it's time to write the
// Trailers, we pick out the fields of Headers that were declared as
// trailers. That worked for a while, until we found the first major
// user of Trailers in the wild: gRPC (using them only over http2),
@@ -2110,8 +2524,9 @@ func (w *responseWriter) CloseNotify() <-chan bool {
if ch == nil {
ch = make(chan bool, 1)
rws.closeNotifierCh = ch
+ cw := rws.stream.cw
go func() {
- rws.stream.cw.Wait() // wait for close
+ cw.Wait() // wait for close
ch <- true
}()
}
@@ -2130,6 +2545,24 @@ func (w *responseWriter) Header() http.Header {
return rws.handlerHeader
}
+// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode.
+func checkWriteHeaderCode(code int) {
+ // Issue 22880: require valid WriteHeader status codes.
+ // For now we only enforce that it's three digits.
+ // In the future we might block things over 599 (600 and above aren't defined
+ // at http://httpwg.org/specs/rfc7231.html#status.codes)
+ // and we might block under 200 (once we have more mature 1xx support).
+ // But for now any three digits.
+ //
+ // We used to send "HTTP/1.1 000 0" on the wire in responses but there's
+ // no equivalent bogus thing we can realistically send in HTTP/2,
+ // so we'll consistently panic instead and help people find their bugs
+ // early. (We can't return an error from WriteHeader even if we wanted to.)
+ if code < 100 || code > 999 {
+ panic(fmt.Sprintf("invalid WriteHeader code %v", code))
+ }
+}
+
func (w *responseWriter) WriteHeader(code int) {
rws := w.rws
if rws == nil {
@@ -2140,6 +2573,7 @@ func (w *responseWriter) WriteHeader(code int) {
func (rws *responseWriterState) writeHeader(code int) {
if !rws.wroteHeader {
+ checkWriteHeaderCode(code)
rws.wroteHeader = true
rws.status = code
if len(rws.handlerHeader) > 0 {
@@ -2162,7 +2596,7 @@ func cloneHeader(h http.Header) http.Header {
//
// * Handler calls w.Write or w.WriteString ->
// * -> rws.bw (*bufio.Writer) ->
-// * (Handler migth call Flush)
+// * (Handler might call Flush)
// * -> chunkWriter{rws}
// * -> responseWriterState.writeChunk(p []byte)
// * -> responseWriterState.writeChunk (most of the magic; see comment there)
@@ -2201,14 +2635,216 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
+ dirty := rws.dirty
rws.handlerDone = true
w.Flush()
w.rws = nil
- responseWriterStatePool.Put(rws)
+ if !dirty {
+ // Only recycle the pool if all prior Write calls to
+ // the serverConn goroutine completed successfully. If
+ // they returned earlier due to resets from the peer
+ // there might still be write goroutines outstanding
+ // from the serverConn referencing the rws memory. See
+ // issue 20704.
+ responseWriterStatePool.Put(rws)
+ }
+}
+
+// Push errors.
+var (
+ ErrRecursivePush = errors.New("http2: recursive push not allowed")
+ ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS")
+)
+
+var _ http.Pusher = (*responseWriter)(nil)
+
+func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
+ st := w.rws.stream
+ sc := st.sc
+ sc.serveG.checkNotOn()
+
+ // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream."
+ // http://tools.ietf.org/html/rfc7540#section-6.6
+ if st.isPushed() {
+ return ErrRecursivePush
+ }
+
+ if opts == nil {
+ opts = new(http.PushOptions)
+ }
+
+ // Default options.
+ if opts.Method == "" {
+ opts.Method = "GET"
+ }
+ if opts.Header == nil {
+ opts.Header = http.Header{}
+ }
+ wantScheme := "http"
+ if w.rws.req.TLS != nil {
+ wantScheme = "https"
+ }
+
+ // Validate the request.
+ u, err := url.Parse(target)
+ if err != nil {
+ return err
+ }
+ if u.Scheme == "" {
+ if !strings.HasPrefix(target, "/") {
+ return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target)
+ }
+ u.Scheme = wantScheme
+ u.Host = w.rws.req.Host
+ } else {
+ if u.Scheme != wantScheme {
+ return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme)
+ }
+ if u.Host == "" {
+ return errors.New("URL must have a host")
+ }
+ }
+ for k := range opts.Header {
+ if strings.HasPrefix(k, ":") {
+ return fmt.Errorf("promised request headers cannot include pseudo header %q", k)
+ }
+ // These headers are meaningful only if the request has a body,
+ // but PUSH_PROMISE requests cannot have a body.
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ // Also disallow Host, since the promised URL must be absolute.
+ switch strings.ToLower(k) {
+ case "content-length", "content-encoding", "trailer", "te", "expect", "host":
+ return fmt.Errorf("promised request headers cannot include %q", k)
+ }
+ }
+ if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil {
+ return err
+ }
+
+ // The RFC effectively limits promised requests to GET and HEAD:
+ // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]"
+ // http://tools.ietf.org/html/rfc7540#section-8.2
+ if opts.Method != "GET" && opts.Method != "HEAD" {
+ return fmt.Errorf("method %q must be GET or HEAD", opts.Method)
+ }
+
+ msg := &startPushRequest{
+ parent: st,
+ method: opts.Method,
+ url: u,
+ header: cloneHeader(opts.Header),
+ done: errChanPool.Get().(chan error),
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case sc.serveMsgCh <- msg:
+ }
+
+ select {
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ case err := <-msg.done:
+ errChanPool.Put(msg.done)
+ return err
+ }
+}
+
+type startPushRequest struct {
+ parent *stream
+ method string
+ url *url.URL
+ header http.Header
+ done chan error
+}
+
+func (sc *serverConn) startPush(msg *startPushRequest) {
+ sc.serveG.check()
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that
+ // is in either the "open" or "half-closed (remote)" state.
+ if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote {
+ // responseWriter.Push checks that the stream is peer-initiaed.
+ msg.done <- errStreamClosed
+ return
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-6.6.
+ if !sc.pushEnabled {
+ msg.done <- http.ErrNotSupported
+ return
+ }
+
+ // PUSH_PROMISE frames must be sent in increasing order by stream ID, so
+ // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE
+ // is written. Once the ID is allocated, we start the request handler.
+ allocatePromisedID := func() (uint32, error) {
+ sc.serveG.check()
+
+ // Check this again, just in case. Technically, we might have received
+ // an updated SETTINGS by the time we got around to writing this frame.
+ if !sc.pushEnabled {
+ return 0, http.ErrNotSupported
+ }
+ // http://tools.ietf.org/html/rfc7540#section-6.5.2.
+ if sc.curPushedStreams+1 > sc.clientMaxStreams {
+ return 0, ErrPushLimitReached
+ }
+
+ // http://tools.ietf.org/html/rfc7540#section-5.1.1.
+ // Streams initiated by the server MUST use even-numbered identifiers.
+ // A server that is unable to establish a new stream identifier can send a GOAWAY
+ // frame so that the client is forced to open a new connection for new streams.
+ if sc.maxPushPromiseID+2 >= 1<<31 {
+ sc.startGracefulShutdownInternal()
+ return 0, ErrPushLimitReached
+ }
+ sc.maxPushPromiseID += 2
+ promisedID := sc.maxPushPromiseID
+
+ // http://tools.ietf.org/html/rfc7540#section-8.2.
+ // Strictly speaking, the new stream should start in "reserved (local)", then
+ // transition to "half closed (remote)" after sending the initial HEADERS, but
+ // we start in "half closed (remote)" for simplicity.
+ // See further comments at the definition of stateHalfClosedRemote.
+ promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
+ method: msg.method,
+ scheme: msg.url.Scheme,
+ authority: msg.url.Host,
+ path: msg.url.RequestURI(),
+ header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ })
+ if err != nil {
+ // Should not happen, since we've already validated msg.url.
+ panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err))
+ }
+
+ go sc.runHandler(rw, req, sc.handler.ServeHTTP)
+ return promisedID, nil
+ }
+
+ sc.writeFrame(FrameWriteRequest{
+ write: &writePushPromise{
+ streamID: msg.parent.id,
+ method: msg.method,
+ url: msg.url,
+ h: msg.header,
+ allocatePromisedID: allocatePromisedID,
+ },
+ stream: msg.parent,
+ done: msg.done,
+ })
}
// foreachHeaderElement splits v according to the "#rule" construction
-// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+// in RFC 7230 section 7 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
@@ -2234,16 +2870,16 @@ var connHeaders = []string{
"Upgrade",
}
-// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request,
// per RFC 7540 Section 8.1.2.2.
// The returned error is reported to users.
-func checkValidHTTP2Request(req *http.Request) error {
- for _, h := range connHeaders {
- if _, ok := req.Header[h]; ok {
- return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+func checkValidHTTP2RequestHeaders(h http.Header) error {
+ for _, k := range connHeaders {
+ if _, ok := h[k]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", k)
}
}
- te := req.Header["Te"]
+ te := h["Te"]
if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
}
@@ -2256,37 +2892,16 @@ func new400Handler(err error) http.HandlerFunc {
}
}
-// ValidTrailerHeader reports whether name is a valid header field name to appear
-// in trailers.
-// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
-func ValidTrailerHeader(name string) bool {
- name = http.CanonicalHeaderKey(name)
- if strings.HasPrefix(name, "If-") || badTrailer[name] {
- return false
+// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
+// disabled. See comments on h1ServerShutdownChan above for why
+// the code is written this way.
+func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
+ var x interface{} = hs
+ type I interface {
+ doKeepAlives() bool
}
- return true
-}
-
-var badTrailer = map[string]bool{
- "Authorization": true,
- "Cache-Control": true,
- "Connection": true,
- "Content-Encoding": true,
- "Content-Length": true,
- "Content-Range": true,
- "Content-Type": true,
- "Expect": true,
- "Host": true,
- "Keep-Alive": true,
- "Max-Forwards": true,
- "Pragma": true,
- "Proxy-Authenticate": true,
- "Proxy-Authorization": true,
- "Proxy-Connection": true,
- "Range": true,
- "Realm": true,
- "Te": true,
- "Trailer": true,
- "Transfer-Encoding": true,
- "Www-Authenticate": true,
+ if hs, ok := x.(I); ok {
+ return !hs.doKeepAlives()
+ }
+ return false
}
diff --git a/src/vendor/golang.org/x/net/http2/transport.go b/src/vendor/golang.org/x/net/http2/transport.go
index 3cefc22a6..c0c80d893 100644
--- a/src/vendor/golang.org/x/net/http2/transport.go
+++ b/src/vendor/golang.org/x/net/http2/transport.go
@@ -10,6 +10,8 @@ import (
"bufio"
"bytes"
"compress/gzip"
+ "context"
+ "crypto/rand"
"crypto/tls"
"errors"
"fmt"
@@ -17,16 +19,21 @@ import (
"io/ioutil"
"log"
"math"
+ mathrand "math/rand"
"net"
"net/http"
+ "net/http/httptrace"
+ "net/textproto"
"sort"
"strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
+ "golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
- "golang.org/x/net/lex/httplex"
+ "golang.org/x/net/idna"
)
const (
@@ -84,13 +91,23 @@ type Transport struct {
// MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
// send in the initial settings frame. It is how many bytes
- // of response headers are allow. Unlike the http2 spec, zero here
+ // of response headers are allowed. Unlike the http2 spec, zero here
// means to use a default limit (currently 10MB). If you actually
// want to advertise an ulimited value to the peer, Transport
// interprets the highest possible value here (0xffffffff or 1<<32-1)
// to mean no limit.
MaxHeaderListSize uint32
+ // StrictMaxConcurrentStreams controls whether the server's
+ // SETTINGS_MAX_CONCURRENT_STREAMS should be respected
+ // globally. If false, new TCP connections are created to the
+ // server as needed to keep each under the per-connection
+ // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
+ // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
+ // a global limit and callers of RoundTrip block when needed,
+ // waiting for their turn.
+ StrictMaxConcurrentStreams bool
+
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
@@ -114,16 +131,56 @@ func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
-var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
-
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
-// It requires Go 1.6 or later and returns an error if the net/http package is too old
-// or if t1 has already been HTTP/2-enabled.
+// It returns an error if t1 has already been HTTP/2-enabled.
func ConfigureTransport(t1 *http.Transport) error {
- _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ _, err := configureTransport(t1)
return err
}
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr("https", authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
func (t *Transport) connPool() ClientConnPool {
t.connPoolOnce.Do(t.initConnPool)
return t.connPoolOrDef
@@ -143,30 +200,38 @@ type ClientConn struct {
t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
+ reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
// readLoop goroutine fields:
readerDone chan struct{} // closed on error
readerErr error // set before readerDone is closed
+ idleTimeout time.Duration // or 0 for never
+ idleTimer *time.Timer
+
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
flow flow // our conn-level flow control quota (cs.flow is per stream)
inflow flow // peer's conn-level flow control
+ closing bool
closed bool
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
goAwayDebug string // goAway frame's debug data, retained as a string
streams map[uint32]*clientStream // client-initiated
nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer
br *bufio.Reader
fr *Framer
lastActive time.Time
// Settings from peer: (also guarded by mu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- initialWindowSize uint32
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ initialWindowSize uint32
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
@@ -181,10 +246,11 @@ type ClientConn struct {
type clientStream struct {
cc *ClientConn
req *http.Request
- trace *clientTrace // or nil
+ trace *httptrace.ClientTrace // or nil
ID uint32
resc chan resAndError
bufPipe pipe // buffered pipe with the flow-controlled response payload
+ startedWrite bool // started request body write; guarded by cc.mu
requestedGzip bool
on100 func() // optional code to run if get a 100 continue response
@@ -193,6 +259,7 @@ type clientStream struct {
bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
readErr error // sticky read error; owned by transportResponseBody.Read
stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+ didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu
peerReset chan struct{} // closed on peer reset
resetErr error // populated before peerReset is closed
@@ -200,32 +267,65 @@ type clientStream struct {
done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
// owned by clientConnReadLoop:
- firstByte bool // got the first response byte
- pastHeaders bool // got first MetaHeadersFrame (actual headers)
- pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ num1xx uint8 // number of 1xx responses seen
trailer http.Header // accumulated trailers
resTrailer *http.Header // client's Response.Trailer
}
-// awaitRequestCancel runs in its own goroutine and waits for the user
-// to cancel a RoundTrip request, its context to expire, or for the
-// request to be done (any way it might be removed from the cc.streams
-// map: peer reset, successful completion, TCP connection breakage,
-// etc)
-func (cs *clientStream) awaitRequestCancel(req *http.Request) {
- ctx := reqContext(req)
+// awaitRequestCancel waits for the user to cancel a request or for the done
+// channel to be signaled. A non-nil error is returned only if the request was
+// canceled.
+func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
+ ctx := req.Context()
if req.Cancel == nil && ctx.Done() == nil {
- return
+ return nil
}
select {
case <-req.Cancel:
- cs.bufPipe.CloseWithError(errRequestCanceled)
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return errRequestCanceled
case <-ctx.Done():
- cs.bufPipe.CloseWithError(ctx.Err())
- cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
- case <-cs.done:
+ return ctx.Err()
+ case <-done:
+ return nil
+ }
+}
+
+var got1xxFuncForTests func(int, textproto.MIMEHeader) error
+
+// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
+// if any. It returns nil if not set or if the Go version is too old.
+func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
+ if fn := got1xxFuncForTests; fn != nil {
+ return fn
+ }
+ return traceGot1xxResponseFunc(cs.trace)
+}
+
+// awaitRequestCancel waits for the user to cancel a request, its context to
+// expire, or for the request to be done (any way it might be removed from the
+// cc.streams map: peer reset, successful completion, TCP connection breakage,
+// etc). If the request is canceled, then cs will be canceled and closed.
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+ if err := awaitRequestCancel(req, cs.done); err != nil {
+ cs.cancelStream()
+ cs.bufPipe.CloseWithError(err)
+ }
+}
+
+func (cs *clientStream) cancelStream() {
+ cc := cs.cc
+ cc.mu.Lock()
+ didReset := cs.didReset
+ cs.didReset = true
+ cc.mu.Unlock()
+
+ if !didReset {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ cc.forgetStreamID(cs.ID)
}
}
@@ -242,6 +342,13 @@ func (cs *clientStream) checkResetOrDone() error {
}
}
+func (cs *clientStream) getStartedWrite() bool {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cs.startedWrite
+}
+
func (cs *clientStream) abortRequestBodyWrite(err error) {
if err == nil {
panic("nil error")
@@ -267,7 +374,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
return
}
-var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+// noCachedConnError is the concrete type of ErrNoCachedConn, which
+// needs to be detected by net/http regardless of whether it's its
+// bundled version (in h2_bundle.go with a rewritten type name) or
+// from a user's x/net/http2. As such, as it has a unique method name
+// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
+// isNoCachedConnError.
+type noCachedConnError struct{}
+
+func (noCachedConnError) IsHTTP2NoCachedConnError() {}
+func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
+
+// isNoCachedConnError reports whether err is of type noCachedConnError
+// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
+// may coexist in the same running program.
+func isNoCachedConnError(err error) bool {
+ _, ok := err.(interface{ IsHTTP2NoCachedConnError() })
+ return ok
+}
+
+var ErrNoCachedConn error = noCachedConnError{}
// RoundTripOpt are options for the Transport.RoundTripOpt method.
type RoundTripOpt struct {
@@ -285,14 +411,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
// and returns a host:port. The port 443 is added if needed.
func authorityAddr(scheme string, authority string) (addr string) {
- if _, _, err := net.SplitHostPort(authority); err == nil {
- return authority
+ host, port, err := net.SplitHostPort(authority)
+ if err != nil { // authority didn't have a port
+ port = "443"
+ if scheme == "http" {
+ port = "80"
+ }
+ host = authority
}
- port := "443"
- if scheme == "http" {
- port = "80"
+ if a, err := idna.ToASCII(host); err == nil {
+ host = a
}
- return net.JoinHostPort(authority, port)
+ // IPv6 address literal, without a port:
+ if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") {
+ return host + ":" + port
+ }
+ return net.JoinHostPort(host, port)
}
// RoundTripOpt is like RoundTrip, but takes options.
@@ -302,16 +436,30 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
addr := authorityAddr(req.URL.Scheme, req.URL.Host)
- for {
+ for retry := 0; ; retry++ {
cc, err := t.connPool().GetClientConn(req, addr)
if err != nil {
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
- traceGotConn(req, cc)
- res, err := cc.RoundTrip(req)
- if shouldRetryRequest(req, err) {
- continue
+ reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+ traceGotConn(req, cc, reused)
+ res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req)
+ if err != nil && retry <= 6 {
+ if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil {
+ // After the first retry, do exponential backoff with 10% jitter.
+ if retry == 0 {
+ continue
+ }
+ backoff := float64(uint(1) << (uint(retry) - 1))
+ backoff += backoff * (0.1 * mathrand.Float64())
+ select {
+ case <-time.After(time.Second * time.Duration(backoff)):
+ continue
+ case <-req.Context().Done():
+ return nil, req.Context().Err()
+ }
+ }
}
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
@@ -331,14 +479,58 @@ func (t *Transport) CloseIdleConnections() {
}
var (
- errClientConnClosed = errors.New("http2: client conn is closed")
- errClientConnUnusable = errors.New("http2: client conn not usable")
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+ errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
)
-func shouldRetryRequest(req *http.Request, err error) bool {
- // TODO: retry GET requests (no bodies) more aggressively, if shutdown
- // before response.
- return err == errClientConnUnusable
+// shouldRetryRequest is called by RoundTrip when a request fails to get
+// response headers. It is always called with a non-nil error.
+// It returns either a request to retry (either the same request, or a
+// modified clone), or an error if the request can't be replayed.
+func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) {
+ if !canRetryError(err) {
+ return nil, err
+ }
+ // If the Body is nil (or http.NoBody), it's safe to reuse
+ // this request and its Body.
+ if req.Body == nil || req.Body == http.NoBody {
+ return req, nil
+ }
+
+ // If the request body can be reset back to its original
+ // state via the optional req.GetBody, do that.
+ if req.GetBody != nil {
+ // TODO: consider a req.Body.Close here? or audit that all caller paths do?
+ body, err := req.GetBody()
+ if err != nil {
+ return nil, err
+ }
+ newReq := *req
+ newReq.Body = body
+ return &newReq, nil
+ }
+
+ // The Request.Body can't reset back to the beginning, but we
+ // don't seem to have started to read from it yet, so reuse
+ // the request directly. The "afterBodyWrite" means the
+ // bodyWrite process has started, which becomes true before
+ // the first Read.
+ if !afterBodyWrite {
+ return req, nil
+ }
+
+ return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
+}
+
+func canRetryError(err error) bool {
+ if err == errClientConnUnusable || err == errClientConnGotGoAway {
+ return true
+ }
+ if se, ok := err.(StreamError); ok {
+ return se.Code == ErrCodeRefusedStream
+ }
+ return false
}
func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
@@ -356,7 +548,7 @@ func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, er
func (t *Transport) newTLSConfig(host string) *tls.Config {
cfg := new(tls.Config)
if t.TLSClientConfig != nil {
- *cfg = *t.TLSClientConfig
+ *cfg = *t.TLSClientConfig.Clone()
}
if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
@@ -407,7 +599,7 @@ func (t *Transport) expectContinueTimeout() time.Duration {
if t.t1 == nil {
return 0
}
- return transportExpectContinueTimeout(t.t1)
+ return t.t1.ExpectContinueTimeout
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
@@ -416,16 +608,22 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
cc := &ClientConn{
- t: t,
- tconn: c,
- readerDone: make(chan struct{}),
- nextStreamID: 1,
- maxFrameSize: 16 << 10, // spec default
- initialWindowSize: 65535, // spec default
- maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
- streams: make(map[uint32]*clientStream),
- singleUse: singleUse,
- wantSettingsAck: true,
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
+ pings: make(map[[8]byte]chan struct{}),
+ }
+ if d := t.idleConnTimeout(); d != 0 {
+ cc.idleTimeout = d
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -446,6 +644,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
// henc in response to SETTINGS frames?
cc.henc = hpack.NewEncoder(&cc.hbuf)
+ if t.AllowHTTP {
+ cc.nextStreamID = 3
+ }
+
if cs, ok := c.(connectionStater); ok {
state := cs.ConnectionState()
cc.tlsState = &state
@@ -486,21 +688,72 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
if old != nil && old.ErrCode != ErrCodeNo {
cc.goAway.ErrCode = old.ErrCode
}
+ last := f.LastStreamID
+ for streamID, cs := range cc.streams {
+ if streamID > last {
+ select {
+ case cs.resc <- resAndError{err: errClientConnGotGoAway}:
+ default:
+ }
+ }
+ }
}
+// CanTakeNewRequest reports whether the connection can take a new request,
+// meaning it has not been closed or received or sent a GOAWAY.
func (cc *ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.canTakeNewRequestLocked()
}
-func (cc *ClientConn) canTakeNewRequestLocked() bool {
+// clientConnIdleState describes the suitability of a client
+// connection to initiate a new RoundTrip request.
+type clientConnIdleState struct {
+ canTakeNewRequest bool
+ freshConn bool // whether it's unused by any previous request
+}
+
+func (cc *ClientConn) idleState() clientConnIdleState {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.idleStateLocked()
+}
+
+func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
if cc.singleUse && cc.nextStreamID > 1 {
- return false
+ return
}
- return cc.goAway == nil && !cc.closed &&
- int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
- cc.nextStreamID < math.MaxInt32
+ var maxConcurrentOkay bool
+ if cc.t.StrictMaxConcurrentStreams {
+ // We'll tell the caller we can take a new request to
+ // prevent the caller from dialing a new TCP
+ // connection, but then we'll block later before
+ // writing it.
+ maxConcurrentOkay = true
+ } else {
+ maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams)
+ }
+
+ st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
+ int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
+ st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
+ return
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ st := cc.idleStateLocked()
+ return st.canTakeNewRequest
+}
+
+// onIdleTimeout is called from a time.AfterFunc goroutine. It will
+// only be called when we're idle, but because we're coming from a new
+// goroutine, there could be a new request coming in at the same time,
+// so this simply calls the synchronized closeIfIdle to shut down this
+// connection. The timer could just call closeIfIdle, but this is more
+// clear.
+func (cc *ClientConn) onIdleTimeout() {
+ cc.closeIfIdle()
}
func (cc *ClientConn) closeIfIdle() {
@@ -520,6 +773,87 @@ func (cc *ClientConn) closeIfIdle() {
cc.tconn.Close()
}
+var shutdownEnterWaitStateHook = func() {}
+
+// Shutdown gracefully close the client connection, waiting for running streams to complete.
+func (cc *ClientConn) Shutdown(ctx context.Context) error {
+ if err := cc.sendGoAway(); err != nil {
+ return err
+ }
+ // Wait for all in-flight streams to complete or connection to close
+ done := make(chan error, 1)
+ cancelled := false // guarded by cc.mu
+ go func() {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if len(cc.streams) == 0 || cc.closed {
+ cc.closed = true
+ done <- cc.tconn.Close()
+ break
+ }
+ if cancelled {
+ break
+ }
+ cc.cond.Wait()
+ }
+ }()
+ shutdownEnterWaitStateHook()
+ select {
+ case err := <-done:
+ return err
+ case <-ctx.Done():
+ cc.mu.Lock()
+ // Free the goroutine above
+ cancelled = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ return ctx.Err()
+ }
+}
+
+func (cc *ClientConn) sendGoAway() error {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if cc.closing {
+ // GOAWAY sent already
+ return nil
+ }
+ // Send a graceful shutdown frame to server
+ maxStreamID := cc.nextStreamID
+ if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
+ return err
+ }
+ if err := cc.bw.Flush(); err != nil {
+ return err
+ }
+ // Prevent new requests
+ cc.closing = true
+ return nil
+}
+
+// Close closes the client connection immediately.
+//
+// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
+func (cc *ClientConn) Close() error {
+ cc.mu.Lock()
+ defer cc.cond.Broadcast()
+ defer cc.mu.Unlock()
+ err := errors.New("http2: client connection force closed via ClientConn.Close")
+ for id, cs := range cc.streams {
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+ cs.bufPipe.CloseWithError(err)
+ delete(cc.streams, id)
+ }
+ cc.closed = true
+ return cc.tconn.Close()
+}
+
const maxAllocFrameSize = 512 << 10
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
@@ -576,8 +910,6 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
}
if len(keys) > 0 {
sort.Strings(keys)
- // TODO: could do better allocation-wise here, but trailers are rare,
- // so being lazy for now.
return strings.Join(keys, ","), nil
}
return "", nil
@@ -599,65 +931,59 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
// Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
- return errors.New("http2: invalid Upgrade request header")
+ return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
}
- if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
- return errors.New("http2: invalid Transfer-Encoding request header")
+ if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
}
- if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
- return errors.New("http2: invalid Connection request header")
+ if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
+ return fmt.Errorf("http2: invalid Connection request header: %q", vv)
}
return nil
}
-func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
- body = req.Body
- if body == nil {
- return nil, 0
+// actualContentLength returns a sanitized version of
+// req.ContentLength, where 0 actually means zero (not unknown) and -1
+// means unknown.
+func actualContentLength(req *http.Request) int64 {
+ if req.Body == nil || req.Body == http.NoBody {
+ return 0
}
if req.ContentLength != 0 {
- return req.Body, req.ContentLength
+ return req.ContentLength
}
-
- // We have a body but a zero content length. Test to see if
- // it's actually zero or just unset.
- var buf [1]byte
- n, rerr := io.ReadFull(body, buf[:])
- if rerr != nil && rerr != io.EOF {
- return errorReader{rerr}, -1
- }
- if n == 1 {
- // Oh, guess there is data in this Body Reader after all.
- // The ContentLength field just wasn't set.
- // Stich the Body back together again, re-attaching our
- // consumed byte.
- return io.MultiReader(bytes.NewReader(buf[:]), body), -1
- }
- // Body is actually zero bytes.
- return nil, 0
+ return -1
}
func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ resp, _, err := cc.roundTrip(req)
+ return resp, err
+}
+
+func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
if err := checkConnHeaders(req); err != nil {
- return nil, err
+ return nil, false, err
+ }
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
}
trailers, err := commaSeparatedTrailers(req)
if err != nil {
- return nil, err
+ return nil, false, err
}
hasTrailers := trailers != ""
- body, contentLen := bodyAndLength(req)
- hasBody := body != nil
-
cc.mu.Lock()
- cc.lastActive = time.Now()
- if cc.closed || !cc.canTakeNewRequestLocked() {
+ if err := cc.awaitOpenSlotForRequest(req); err != nil {
cc.mu.Unlock()
- return nil, errClientConnUnusable
+ return nil, false, err
}
+ body := req.Body
+ contentLen := actualContentLength(req)
+ hasBody := contentLen != 0
+
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
var requestedGzip bool
if !cc.t.disableCompression() &&
@@ -685,19 +1011,19 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
if err != nil {
cc.mu.Unlock()
- return nil, err
+ return nil, false, err
}
cs := cc.newStream()
cs.req = req
- cs.trace = requestTrace(req)
+ cs.trace = httptrace.ContextClientTrace(req.Context())
cs.requestedGzip = requestedGzip
bodyWriter := cc.t.getBodyWriterState(cs, body)
cs.on100 = bodyWriter.on100
cc.wmu.Lock()
endStream := !hasBody && !hasTrailers
- werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+ werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
cc.wmu.Unlock()
traceWroteHeaders(cs.trace)
cc.mu.Unlock()
@@ -711,7 +1037,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// Don't bother sending a RST_STREAM (our write already failed;
// no need to keep writing)
traceWroteRequest(cs.trace, werr)
- return nil, werr
+ return nil, false, werr
}
var respHeaderTimer <-chan time.Time
@@ -728,9 +1054,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
readLoopResCh := cs.resc
bodyWritten := false
- ctx := reqContext(req)
+ ctx := req.Context()
- handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
+ handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
res := re.res
if re.err != nil || res.StatusCode > 299 {
// On error or status code 3xx, 4xx, 5xx, etc abort any
@@ -739,19 +1065,19 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
// 2xx, however, then assume the server DOES potentially
// want our body (e.g. full-duplex streaming:
// golang.org/issue/13444). If it turns out the server
- // doesn't, they'll RST_STREAM us soon enough. This is a
- // heuristic to avoid adding knobs to Transport. Hopefully
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
// we can keep it.
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWrite)
}
if re.err != nil {
cc.forgetStreamID(cs.ID)
- return nil, re.err
+ return nil, cs.getStartedWrite(), re.err
}
res.Request = req
res.TLS = cc.tlsState
- return res, nil
+ return res, false, nil
}
for {
@@ -759,37 +1085,37 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
case re := <-readLoopResCh:
return handleReadLoopResponse(re)
case <-respHeaderTimer:
- cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else {
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
- return nil, errTimeout
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), errTimeout
case <-ctx.Done():
- cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else {
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
- return nil, ctx.Err()
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), ctx.Err()
case <-req.Cancel:
- cc.forgetStreamID(cs.ID)
if !hasBody || bodyWritten {
cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
} else {
bodyWriter.cancel()
cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
- return nil, errRequestCanceled
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), errRequestCanceled
case <-cs.peerReset:
// processResetStream already removed the
// stream from the streams map; no need for
// forgetStreamID.
- return nil, cs.resetErr
+ return nil, cs.getStartedWrite(), cs.resetErr
case err := <-bodyWriter.resc:
// Prefer the read loop's response, if available. Issue 16102.
select {
@@ -798,7 +1124,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
default:
}
if err != nil {
- return nil, err
+ cc.forgetStreamID(cs.ID)
+ return nil, cs.getStartedWrite(), err
}
bodyWritten = true
if d := cc.responseHeaderTimeout(); d != 0 {
@@ -810,14 +1137,55 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
}
}
+// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams.
+// Must hold cc.mu.
+func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
+ var waitingForConn chan struct{}
+ var waitingForConnErr error // guarded by cc.mu
+ for {
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ if waitingForConn != nil {
+ close(waitingForConn)
+ }
+ return errClientConnUnusable
+ }
+ if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
+ if waitingForConn != nil {
+ close(waitingForConn)
+ }
+ return nil
+ }
+ // Unfortunately, we cannot wait on a condition variable and channel at
+ // the same time, so instead, we spin up a goroutine to check if the
+ // request is canceled while we wait for a slot to open in the connection.
+ if waitingForConn == nil {
+ waitingForConn = make(chan struct{})
+ go func() {
+ if err := awaitRequestCancel(req, waitingForConn); err != nil {
+ cc.mu.Lock()
+ waitingForConnErr = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+ }
+ }()
+ }
+ cc.pendingRequests++
+ cc.cond.Wait()
+ cc.pendingRequests--
+ if waitingForConnErr != nil {
+ return waitingForConnErr
+ }
+ }
+}
+
// requires cc.wmu be held
-func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error {
first := true // first frame written (HEADERS is first, then CONTINUATION)
- frameSize := int(cc.maxFrameSize)
for len(hdrs) > 0 && cc.werr == nil {
chunk := hdrs
- if len(chunk) > frameSize {
- chunk = chunk[:frameSize]
+ if len(chunk) > maxFrameSize {
+ chunk = chunk[:maxFrameSize]
}
hdrs = hdrs[len(chunk):]
endHeaders := len(hdrs) == 0
@@ -878,6 +1246,7 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
sawEOF = true
err = nil
} else if err != nil {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
return err
}
@@ -901,10 +1270,11 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
err = cc.fr.WriteData(cs.ID, sentEnd, data)
if err == nil {
// TODO(bradfitz): this flush is for latency, not bandwidth.
- // Most requests won't need this. Make this opt-in or opt-out?
- // Use some heuristic on the body type? Nagel-like timers?
- // Based on 'n'? Only last chunk of this for loop, unless flow control
- // tokens are low? For now, always:
+ // Most requests won't need this. Make this opt-in or
+ // opt-out? Use some heuristic on the body type? Nagel-like
+ // timers? Based on 'n'? Only last chunk of this for loop,
+ // unless flow control tokens are low? For now, always.
+ // If we change this, see comment below.
err = cc.bw.Flush()
}
cc.wmu.Unlock()
@@ -914,20 +1284,36 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
}
}
- var trls []byte
- if !sentEnd && hasTrailers {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- trls = cc.encodeTrailers(req)
+ if sentEnd {
+ // Already sent END_STREAM (which implies we have no
+ // trailers) and flushed, because currently all
+ // WriteData frames above get a flush. So we're done.
+ return nil
}
+ var trls []byte
+ if hasTrailers {
+ cc.mu.Lock()
+ trls, err = cc.encodeTrailers(req)
+ cc.mu.Unlock()
+ if err != nil {
+ cc.writeStreamReset(cs.ID, ErrCodeInternal, err)
+ cc.forgetStreamID(cs.ID)
+ return err
+ }
+ }
+
+ cc.mu.Lock()
+ maxFrameSize := int(cc.maxFrameSize)
+ cc.mu.Unlock()
+
cc.wmu.Lock()
defer cc.wmu.Unlock()
- // Avoid forgetting to send an END_STREAM if the encoded
- // trailers are 0 bytes. Both results produce and END_STREAM.
+ // Two ways to send END_STREAM: either with trailers, or
+ // with an empty DATA frame.
if len(trls) > 0 {
- err = cc.writeHeaders(cs.ID, true, trls)
+ err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls)
} else {
err = cc.fr.WriteData(cs.ID, true, nil)
}
@@ -986,77 +1372,132 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
if host == "" {
host = req.URL.Host
}
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return nil, err
+ }
+
+ var path string
+ if req.Method != "CONNECT" {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return nil, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
// Check for any invalid headers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
for k, vv := range req.Header {
- if !httplex.ValidHeaderFieldName(k) {
+ if !httpguts.ValidHeaderFieldName(k) {
return nil, fmt.Errorf("invalid HTTP header name %q", k)
}
for _, v := range vv {
- if !httplex.ValidHeaderFieldValue(v) {
+ if !httpguts.ValidHeaderFieldValue(v) {
return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
}
}
}
- // 8.1.2.3 Request Pseudo-Header Fields
- // The :path pseudo-header field includes the path and query parts of the
- // target URI (the path-absolute production and optionally a '?' character
- // followed by the query production (see Sections 3.3 and 3.4 of
- // [RFC3986]).
- cc.writeHeader(":authority", host)
- cc.writeHeader(":method", req.Method)
- if req.Method != "CONNECT" {
- cc.writeHeader(":path", req.URL.RequestURI())
- cc.writeHeader(":scheme", "https")
- }
- if trailers != "" {
- cc.writeHeader("trailer", trailers)
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ m := req.Method
+ if m == "" {
+ m = http.MethodGet
+ }
+ f(":method", m)
+ if req.Method != "CONNECT" {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") ||
+ strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") ||
+ strings.EqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if strings.EqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ f("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", defaultUserAgent)
+ }
}
- var didUA bool
- for k, vv := range req.Header {
- lowKey := strings.ToLower(k)
- switch lowKey {
- case "host", "content-length":
- // Host is :authority, already sent.
- // Content-Length is automatic, set below.
- continue
- case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
- // Per 8.1.2.2 Connection-Specific Header
- // Fields, don't send connection-specific
- // fields. We have already checked if any
- // are error-worthy so just ignore the rest.
- continue
- case "user-agent":
- // Match Go's http1 behavior: at most one
- // User-Agent. If set to nil or empty string,
- // then omit it. Otherwise if not mentioned,
- // include the default (below).
- didUA = true
- if len(vv) < 1 {
- continue
- }
- vv = vv[:1]
- if vv[0] == "" {
- continue
- }
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, errRequestHeaderListSize
+ }
+
+ trace := httptrace.ContextClientTrace(req.Context())
+ traceHeaders := traceHasWroteHeaderField(trace)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name = strings.ToLower(name)
+ cc.writeHeader(name, value)
+ if traceHeaders {
+ traceWroteHeaderField(trace, name, value)
}
- for _, v := range vv {
- cc.writeHeader(lowKey, v)
- }
- }
- if shouldSendReqContentLength(req.Method, contentLength) {
- cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
- }
- if addGzipHeader {
- cc.writeHeader("accept-encoding", "gzip")
- }
- if !didUA {
- cc.writeHeader("user-agent", defaultUserAgent)
- }
+ })
+
return cc.hbuf.Bytes(), nil
}
@@ -1083,17 +1524,29 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
}
// requires cc.mu be held.
-func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
cc.hbuf.Reset()
+
+ hlSize := uint64(0)
for k, vv := range req.Trailer {
- // Transfer-Encoding, etc.. have already been filter at the
+ for _, v := range vv {
+ hf := hpack.HeaderField{Name: k, Value: v}
+ hlSize += uint64(hf.Size())
+ }
+ }
+ if hlSize > cc.peerMaxHeaderListSize {
+ return nil, errRequestHeaderListSize
+ }
+
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filtered at the
// start of RoundTrip
lowKey := strings.ToLower(k)
for _, v := range vv {
cc.writeHeader(lowKey, v)
}
}
- return cc.hbuf.Bytes()
+ return cc.hbuf.Bytes(), nil
}
func (cc *ClientConn) writeHeader(name, value string) {
@@ -1137,8 +1590,13 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
if andRemove && cs != nil && !cc.closed {
cc.lastActive = time.Now()
delete(cc.streams, id)
+ if len(cc.streams) == 0 && cc.idleTimer != nil {
+ cc.idleTimer.Reset(cc.idleTimeout)
+ }
close(cs.done)
- cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ // Wake up checkResetOrDone via clientStream.awaitFlowControl and
+ // wake up RoundTrip if there is a pending request.
+ cc.cond.Broadcast()
}
return cs
}
@@ -1146,17 +1604,12 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct {
cc *ClientConn
- activeRes map[uint32]*clientStream // keyed by streamID
closeWhenIdle bool
}
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
- rl := &clientConnReadLoop{
- cc: cc,
- activeRes: make(map[uint32]*clientStream),
- }
-
+ rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
if ce, ok := cc.readerErr.(ConnectionError); ok {
@@ -1193,6 +1646,10 @@ func (rl *clientConnReadLoop) cleanup() {
defer cc.t.connPool().MarkDead(cc)
defer close(cc.readerDone)
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+
// Close any response bodies if the server closes prematurely.
// TODO: also do this if we've written the headers but not
// gotten a response yet.
@@ -1207,10 +1664,8 @@ func (rl *clientConnReadLoop) cleanup() {
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
- for _, cs := range rl.activeRes {
- cs.bufPipe.CloseWithError(err)
- }
for _, cs := range cc.streams {
+ cs.bufPipe.CloseWithError(err) // no-op if already closed
select {
case cs.resc <- resAndError{err: err}:
default:
@@ -1233,8 +1688,9 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
- if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
+ if cs := cc.streamByID(se.StreamID, false); cs != nil {
cs.cc.writeStreamReset(cs.ID, se.Code, err)
+ cs.cc.forgetStreamID(cs.ID)
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
@@ -1287,7 +1743,7 @@ func (rl *clientConnReadLoop) run() error {
}
return err
}
- if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ if rl.closeWhenIdle && gotReply && maybeIdle {
cc.closeIfIdle()
}
}
@@ -1295,13 +1751,31 @@ func (rl *clientConnReadLoop) run() error {
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
cc := rl.cc
- cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ cs := cc.streamByID(f.StreamID, false)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
// was just something we canceled, ignore it.
return nil
}
+ if f.StreamEnded() {
+ // Issue 20521: If the stream has ended, streamByID() causes
+ // clientStream.done to be closed, which causes the request's bodyWriter
+ // to be closed with an errStreamClosed, which may be received by
+ // clientConn.RoundTrip before the result of processing these headers.
+ // Deferring stream closure allows the header processing to occur first.
+ // clientConn.RoundTrip may still receive the bodyWriter error first, but
+ // the fix for issue 16102 prioritises any response.
+ //
+ // Issue 22413: If there is no request body, we should close the
+ // stream before writing to cs.resc so that the stream is closed
+ // immediately once RoundTrip returns.
+ if cs.req.Body != nil {
+ defer cc.forgetStreamID(f.StreamID)
+ } else {
+ cc.forgetStreamID(f.StreamID)
+ }
+ }
if !cs.firstByte {
if cs.trace != nil {
// TODO(bradfitz): move first response byte earlier,
@@ -1325,6 +1799,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
}
// Any other error type is a stream error.
cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+ cc.forgetStreamID(cs.ID)
cs.resc <- resAndError{err: err}
return nil // return nil from process* funcs to keep conn alive
}
@@ -1332,9 +1807,6 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
// (nil, nil) special case. See handleResponse docs.
return nil
}
- if res.Body != noBody {
- rl.activeRes[cs.ID] = cs
- }
cs.resTrailer = &res.Trailer
cs.resc <- resAndError{res: res}
return nil
@@ -1345,8 +1817,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
// is the detail.
//
// As a special case, handleResponse may return (nil, nil) to skip the
-// frame (currently only used for 100 expect continue). This special
-// case is going away after Issue 13851 is fixed.
+// frame (currently only used for 1xx responses).
func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
if f.Truncated {
return nil, errResponseHeaderListSize
@@ -1354,20 +1825,11 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
status := f.PseudoValue("status")
if status == "" {
- return nil, errors.New("missing status pseudo header")
+ return nil, errors.New("malformed response from server: missing status pseudo header")
}
statusCode, err := strconv.Atoi(status)
if err != nil {
- return nil, errors.New("malformed non-numeric status pseudo header")
- }
-
- if statusCode == 100 {
- traceGot100Continue(cs.trace)
- if cs.on100 != nil {
- cs.on100() // forces any write delay timer to fire
- }
- cs.pastHeaders = false // do it all again
- return nil, nil
+ return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
}
header := make(http.Header)
@@ -1394,6 +1856,27 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
}
}
+ if statusCode >= 100 && statusCode <= 199 {
+ cs.num1xx++
+ const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
+ if cs.num1xx > max1xxResponses {
+ return nil, errors.New("http2: too many 1xx informational responses")
+ }
+ if fn := cs.get1xxTraceFunc(); fn != nil {
+ if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
+ return nil, err
+ }
+ }
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ if cs.on100 != nil {
+ cs.on100() // forces any write delay timer to fire
+ }
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
streamEnded := f.StreamEnded()
isHead := cs.req.Method == "HEAD"
if !streamEnded || isHead {
@@ -1416,8 +1899,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
return res, nil
}
- buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
- cs.bufPipe = pipe{b: buf}
+ cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}}
cs.bytesRemain = res.ContentLength
res.Body = transportResponseBody{cs}
go cs.awaitRequestCancel(cs.req)
@@ -1427,7 +1909,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
res.Header.Del("Content-Length")
res.ContentLength = -1
res.Body = &gzipReader{body: res.Body}
- setResponseUncompressed(res)
+ res.Uncompressed = true
}
return res, nil
}
@@ -1544,6 +2026,7 @@ func (b transportResponseBody) Close() error {
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
+ cs.didReset = true
}
// Return connection-level flow control.
if unread > 0 {
@@ -1556,6 +2039,7 @@ func (b transportResponseBody) Close() error {
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
+ cc.forgetStreamID(cs.ID)
return nil
}
@@ -1590,13 +2074,23 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
return nil
}
+ if !cs.firstByte {
+ cc.logf("protocol error: received DATA before a HEADERS frame")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
+ }
if f.Length > 0 {
- if len(data) > 0 && cs.bufPipe.b == nil {
- // Data frame after it's already closed?
- cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
- return ConnectionError(ErrCodeProtocol)
+ if cs.req.Method == "HEAD" && len(data) > 0 {
+ cc.logf("protocol error: received DATA on a HEAD request")
+ rl.endStreamError(cs, StreamError{
+ StreamID: f.StreamID,
+ Code: ErrCodeProtocol,
+ })
+ return nil
}
-
// Check connection-level flow control.
cc.mu.Lock()
if cs.inflow.available() >= int32(f.Length) {
@@ -1607,18 +2101,30 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
// Return any padded flow control now, since we won't
// refund it later on body reads.
- if pad := int32(f.Length) - int32(len(data)); pad > 0 {
- cs.inflow.add(pad)
- cc.inflow.add(pad)
+ var refund int
+ if pad := int(f.Length) - len(data); pad > 0 {
+ refund += pad
+ }
+ // Return len(data) now if the stream is already closed,
+ // since data will never be read.
+ didReset := cs.didReset
+ if didReset {
+ refund += len(data)
+ }
+ if refund > 0 {
+ cc.inflow.add(int32(refund))
cc.wmu.Lock()
- cc.fr.WriteWindowUpdate(0, uint32(pad))
- cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
+ cc.fr.WriteWindowUpdate(0, uint32(refund))
+ if !didReset {
+ cs.inflow.add(int32(refund))
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(refund))
+ }
cc.bw.Flush()
cc.wmu.Unlock()
}
cc.mu.Unlock()
- if len(data) > 0 {
+ if len(data) > 0 && !didReset {
if _, err := cs.bufPipe.Write(data); err != nil {
rl.endStreamError(cs, err)
return err
@@ -1646,11 +2152,10 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
err = io.EOF
code = cs.copyTrailers
}
- cs.bufPipe.closeWithErrorAndCode(err, code)
- delete(rl.activeRes, cs.ID)
if isConnectionCloseRequest(cs.req) {
rl.closeWhenIdle = true
}
+ cs.bufPipe.closeWithErrorAndCode(err, code)
select {
case cs.resc <- resAndError{err: err}:
@@ -1698,6 +2203,8 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
+ case SettingMaxHeaderListSize:
+ cc.peerMaxHeaderListSize = uint64(s.Val)
case SettingInitialWindowSize:
// Values above the maximum flow-control
// window size of 2^31-1 MUST be treated as a
@@ -1775,14 +2282,58 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
cs.bufPipe.CloseWithError(err)
cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
- delete(rl.activeRes, cs.ID)
return nil
}
+// Ping sends a PING frame to the server and waits for the ack.
+func (cc *ClientConn) Ping(ctx context.Context) error {
+ c := make(chan struct{})
+ // Generate a random payload
+ var p [8]byte
+ for {
+ if _, err := rand.Read(p[:]); err != nil {
+ return err
+ }
+ cc.mu.Lock()
+ // check for dup before insert
+ if _, found := cc.pings[p]; !found {
+ cc.pings[p] = c
+ cc.mu.Unlock()
+ break
+ }
+ cc.mu.Unlock()
+ }
+ cc.wmu.Lock()
+ if err := cc.fr.WritePing(false, p); err != nil {
+ cc.wmu.Unlock()
+ return err
+ }
+ if err := cc.bw.Flush(); err != nil {
+ cc.wmu.Unlock()
+ return err
+ }
+ cc.wmu.Unlock()
+ select {
+ case <-c:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.readerDone:
+ // connection closed
+ return cc.readerErr
+ }
+}
+
func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() {
- // 6.7 PING: " An endpoint MUST NOT respond to PING frames
- // containing this flag."
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ // If ack, notify listener if any
+ if c, ok := cc.pings[f.Data]; ok {
+ close(c)
+ delete(cc.pings, f.Data)
+ }
return nil
}
cc := rl.cc
@@ -1818,6 +2369,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error)
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
)
@@ -1904,11 +2456,14 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body
resc := make(chan error, 1)
s.resc = resc
s.fn = func() {
+ cs.cc.mu.Lock()
+ cs.startedWrite = true
+ cs.cc.mu.Unlock()
resc <- cs.writeRequestBody(body, cs.req.Body)
}
s.delay = t.expectContinueTimeout()
if s.delay == 0 ||
- !httplex.HeaderValuesContainsToken(
+ !httpguts.HeaderValuesContainsToken(
cs.req.Header["Expect"],
"100-continue") {
return
@@ -1963,5 +2518,93 @@ func (s bodyWriterState) scheduleBodyWrite() {
// isConnectionCloseRequest reports whether req should use its own
// connection for a single request and then close the connection.
func isConnectionCloseRequest(req *http.Request) bool {
- return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
+ return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// converting panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+// (The field is exported so it can be accessed via reflect from net/http; tested
+// by TestNoDialH2RoundTripperType)
+type noDialH2RoundTripper struct{ *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.Transport.RoundTrip(req)
+ if isNoCachedConnError(err) {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
+
+func (t *Transport) idleConnTimeout() time.Duration {
+ if t.t1 != nil {
+ return t.t1.IdleConnTimeout
+ }
+ return 0
+}
+
+func traceGetConn(req *http.Request, hostPort string) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GetConn == nil {
+ return
+ }
+ trace.GetConn(hostPort)
+}
+
+func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ ci.Reused = reused
+ cc.mu.Lock()
+ ci.WasIdle = len(cc.streams) == 0 && reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *httptrace.ClientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *httptrace.ClientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
}
diff --git a/src/vendor/golang.org/x/net/http2/write.go b/src/vendor/golang.org/x/net/http2/write.go
index 27ef0dd4d..3849bc263 100644
--- a/src/vendor/golang.org/x/net/http2/write.go
+++ b/src/vendor/golang.org/x/net/http2/write.go
@@ -9,15 +9,20 @@ import (
"fmt"
"log"
"net/http"
- "time"
+ "net/url"
+ "golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
- "golang.org/x/net/lex/httplex"
)
// writeFramer is implemented by any type that is used to write frames.
type writeFramer interface {
writeFrame(writeContext) error
+
+ // staysWithinBuffer reports whether this writer promises that
+ // it will only write less than or equal to size bytes, and it
+ // won't Flush the write context.
+ staysWithinBuffer(size int) bool
}
// writeContext is the interface needed by the various frame writer
@@ -39,9 +44,10 @@ type writeContext interface {
HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
}
-// endsStream reports whether the given frame writer w will locally
-// close the stream.
-func endsStream(w writeFramer) bool {
+// writeEndsStream reports whether w writes a frame that will transition
+// the stream to a half-closed local state. This returns false for RST_STREAM,
+// which closes the entire stream (not just the local half).
+func writeEndsStream(w writeFramer) bool {
switch v := w.(type) {
case *writeData:
return v.endStream
@@ -51,7 +57,7 @@ func endsStream(w writeFramer) bool {
// This can only happen if the caller reuses w after it's
// been intentionally nil'ed out to prevent use. Keep this
// here to catch future refactoring breaking it.
- panic("endsStream called on nil writeFramer")
+ panic("writeEndsStream called on nil writeFramer")
}
return false
}
@@ -62,8 +68,16 @@ func (flushFrameWriter) writeFrame(ctx writeContext) error {
return ctx.Flush()
}
+func (flushFrameWriter) staysWithinBuffer(max int) bool { return false }
+
type writeSettings []Setting
+func (s writeSettings) staysWithinBuffer(max int) bool {
+ const settingSize = 6 // uint16 + uint32
+ return frameHeaderLen+settingSize*len(s) <= max
+
+}
+
func (s writeSettings) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettings([]Setting(s)...)
}
@@ -75,14 +89,12 @@ type writeGoAway struct {
func (p *writeGoAway) writeFrame(ctx writeContext) error {
err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
- if p.code != 0 {
- ctx.Flush() // ignore error: we're hanging up on them anyway
- time.Sleep(50 * time.Millisecond)
- ctx.CloseConn()
- }
+ ctx.Flush() // ignore error: we're hanging up on them anyway
return err
}
+func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes
+
type writeData struct {
streamID uint32
p []byte
@@ -97,6 +109,10 @@ func (w *writeData) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
}
+func (w *writeData) staysWithinBuffer(max int) bool {
+ return frameHeaderLen+len(w.p) <= max
+}
+
// handlerPanicRST is the message sent from handler goroutines when
// the handler panics.
type handlerPanicRST struct {
@@ -107,22 +123,57 @@ func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
}
+func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
func (se StreamError) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
}
+func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
type writePingAck struct{ pf *PingFrame }
func (w writePingAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WritePing(true, w.pf.Data)
}
+func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max }
+
type writeSettingsAck struct{}
func (writeSettingsAck) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteSettingsAck()
}
+func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max }
+
+// splitHeaderBlock splits headerBlock into fragments so that each fragment fits
+// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true
+// for the first/last fragment, respectively.
+func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error {
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil {
+ return err
+ }
+ first = false
+ }
+ return nil
+}
+
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
// for HTTP response headers or trailers from a server handler.
type writeResHeaders struct {
@@ -144,6 +195,17 @@ func encKV(enc *hpack.Encoder, k, v string) {
enc.WriteField(hpack.HeaderField{Name: k, Value: v})
}
+func (w *writeResHeaders) staysWithinBuffer(max int) bool {
+ // TODO: this is a common one. It'd be nice to return true
+ // here and get into the fast path if we could be clever and
+ // calculate the size fast enough, or at least a conservative
+ // upper bound that usually fires. (Maybe if w.h and
+ // w.trailers are nil, so we don't need to enumerate it.)
+ // Otherwise I'm afraid that just calculating the length to
+ // answer this question would be slower than the ~2µs benefit.
+ return false
+}
+
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
@@ -169,39 +231,69 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error {
panic("unexpected empty hpack")
}
- // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
- // that all peers must support (16KB). Later we could care
- // more and send larger frames if the peer advertised it, but
- // there's little point. Most headers are small anyway (so we
- // generally won't have CONTINUATION frames), and extra frames
- // only waste 9 bytes anyway.
- const maxFrameSize = 16384
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
- first := true
- for len(headerBlock) > 0 {
- frag := headerBlock
- if len(frag) > maxFrameSize {
- frag = frag[:maxFrameSize]
- }
- headerBlock = headerBlock[len(frag):]
- endHeaders := len(headerBlock) == 0
- var err error
- if first {
- first = false
- err = ctx.Framer().WriteHeaders(HeadersFrameParam{
- StreamID: w.streamID,
- BlockFragment: frag,
- EndStream: w.endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
- }
- if err != nil {
- return err
- }
+func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
+ }
+}
+
+// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames.
+type writePushPromise struct {
+ streamID uint32 // pusher stream
+ method string // for :method
+ url *url.URL // for :scheme, :authority, :path
+ h http.Header
+
+ // Creates an ID for a pushed stream. This runs on serveG just before
+ // the frame is written. The returned ID is copied to promisedID.
+ allocatePromisedID func() (uint32, error)
+ promisedID uint32
+}
+
+func (w *writePushPromise) staysWithinBuffer(max int) bool {
+ // TODO: see writeResHeaders.staysWithinBuffer
+ return false
+}
+
+func (w *writePushPromise) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ encKV(enc, ":method", w.method)
+ encKV(enc, ":scheme", w.url.Scheme)
+ encKV(enc, ":authority", w.url.Host)
+ encKV(enc, ":path", w.url.RequestURI())
+ encodeHeaders(enc, w.h, nil)
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 {
+ panic("unexpected empty hpack")
+ }
+
+ return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock)
+}
+
+func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error {
+ if firstFrag {
+ return ctx.Framer().WritePushPromise(PushPromiseParam{
+ StreamID: w.streamID,
+ PromiseID: w.promisedID,
+ BlockFragment: frag,
+ EndHeaders: lastFrag,
+ })
+ } else {
+ return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag)
}
- return nil
}
type write100ContinueHeadersFrame struct {
@@ -220,15 +312,24 @@ func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
})
}
+func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool {
+ // Sloppy but conservative:
+ return 9+2*(len(":status")+len("100")) <= max
+}
+
type writeWindowUpdate struct {
streamID uint32 // or 0 for conn-level
n uint32
}
+func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
+
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
}
+// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k])
+// is encoded only if k is in keys.
func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
if keys == nil {
sorter := sorterPool.Get().(*sorter)
@@ -249,7 +350,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
}
isTE := k == "transfer-encoding"
for _, v := range vv {
- if !httplex.ValidHeaderFieldValue(v) {
+ if !httpguts.ValidHeaderFieldValue(v) {
// TODO: return an error? golang.org/issue/14048
// For now just omit it.
continue
diff --git a/src/vendor/golang.org/x/net/http2/writesched.go b/src/vendor/golang.org/x/net/http2/writesched.go
index c24316ce7..4fe307307 100644
--- a/src/vendor/golang.org/x/net/http2/writesched.go
+++ b/src/vendor/golang.org/x/net/http2/writesched.go
@@ -6,14 +6,53 @@ package http2
import "fmt"
-// frameWriteMsg is a request to write a frame.
-type frameWriteMsg struct {
+// WriteScheduler is the interface implemented by HTTP/2 write schedulers.
+// Methods are never called concurrently.
+type WriteScheduler interface {
+ // OpenStream opens a new stream in the write scheduler.
+ // It is illegal to call this with streamID=0 or with a streamID that is
+ // already open -- the call may panic.
+ OpenStream(streamID uint32, options OpenStreamOptions)
+
+ // CloseStream closes a stream in the write scheduler. Any frames queued on
+ // this stream should be discarded. It is illegal to call this on a stream
+ // that is not open -- the call may panic.
+ CloseStream(streamID uint32)
+
+ // AdjustStream adjusts the priority of the given stream. This may be called
+ // on a stream that has not yet been opened or has been closed. Note that
+ // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See:
+ // https://tools.ietf.org/html/rfc7540#section-5.1
+ AdjustStream(streamID uint32, priority PriorityParam)
+
+ // Push queues a frame in the scheduler. In most cases, this will not be
+ // called with wr.StreamID()!=0 unless that stream is currently open. The one
+ // exception is RST_STREAM frames, which may be sent on idle or closed streams.
+ Push(wr FrameWriteRequest)
+
+ // Pop dequeues the next frame to write. Returns false if no frames can
+ // be written. Frames with a given wr.StreamID() are Pop'd in the same
+ // order they are Push'd.
+ Pop() (wr FrameWriteRequest, ok bool)
+}
+
+// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream.
+type OpenStreamOptions struct {
+ // PusherID is zero if the stream was initiated by the client. Otherwise,
+ // PusherID names the stream that pushed the newly opened stream.
+ PusherID uint32
+}
+
+// FrameWriteRequest is a request to write a frame.
+type FrameWriteRequest struct {
// write is the interface value that does the writing, once the
- // writeScheduler (below) has decided to select this frame
- // to write. The write functions are all defined in write.go.
+ // WriteScheduler has selected this frame to write. The write
+ // functions are all defined in write.go.
write writeFramer
- stream *stream // used for prioritization. nil for non-stream frames.
+ // stream is the stream on which this frame will be written.
+ // nil for non-stream frames like PING and SETTINGS.
+ stream *stream
// done, if non-nil, must be a buffered channel with space for
// 1 message and is sent the return value from write (or an
@@ -21,263 +60,183 @@ type frameWriteMsg struct {
done chan error
}
-// for debugging only:
-func (wm frameWriteMsg) String() string {
- var streamID uint32
- if wm.stream != nil {
- streamID = wm.stream.id
- }
- var des string
- if s, ok := wm.write.(fmt.Stringer); ok {
- des = s.String()
- } else {
- des = fmt.Sprintf("%T", wm.write)
- }
- return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
-}
-
-// writeScheduler tracks pending frames to write, priorities, and decides
-// the next one to use. It is not thread-safe.
-type writeScheduler struct {
- // zero are frames not associated with a specific stream.
- // They're sent before any stream-specific freams.
- zero writeQueue
-
- // maxFrameSize is the maximum size of a DATA frame
- // we'll write. Must be non-zero and between 16K-16M.
- maxFrameSize uint32
-
- // sq contains the stream-specific queues, keyed by stream ID.
- // when a stream is idle, it's deleted from the map.
- sq map[uint32]*writeQueue
-
- // canSend is a slice of memory that's reused between frame
- // scheduling decisions to hold the list of writeQueues (from sq)
- // which have enough flow control data to send. After canSend is
- // built, the best is selected.
- canSend []*writeQueue
-
- // pool of empty queues for reuse.
- queuePool []*writeQueue
-}
-
-func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
- if len(q.s) != 0 {
- panic("queue must be empty")
- }
- ws.queuePool = append(ws.queuePool, q)
-}
-
-func (ws *writeScheduler) getEmptyQueue() *writeQueue {
- ln := len(ws.queuePool)
- if ln == 0 {
- return new(writeQueue)
- }
- q := ws.queuePool[ln-1]
- ws.queuePool = ws.queuePool[:ln-1]
- return q
-}
-
-func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
-
-func (ws *writeScheduler) add(wm frameWriteMsg) {
- st := wm.stream
- if st == nil {
- ws.zero.push(wm)
- } else {
- ws.streamQueue(st.id).push(wm)
- }
-}
-
-func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
- if q, ok := ws.sq[streamID]; ok {
- return q
- }
- if ws.sq == nil {
- ws.sq = make(map[uint32]*writeQueue)
- }
- q := ws.getEmptyQueue()
- ws.sq[streamID] = q
- return q
-}
-
-// take returns the most important frame to write and removes it from the scheduler.
-// It is illegal to call this if the scheduler is empty or if there are no connection-level
-// flow control bytes available.
-func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
- if ws.maxFrameSize == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
-
- // If there any frames not associated with streams, prefer those first.
- // These are usually SETTINGS, etc.
- if !ws.zero.empty() {
- return ws.zero.shift(), true
- }
- if len(ws.sq) == 0 {
- return
- }
-
- // Next, prioritize frames on streams that aren't DATA frames (no cost).
- for id, q := range ws.sq {
- if q.firstIsNoCost() {
- return ws.takeFrom(id, q)
+// StreamID returns the id of the stream this frame will be written to.
+// 0 is used for non-stream frames such as PING and SETTINGS.
+func (wr FrameWriteRequest) StreamID() uint32 {
+ if wr.stream == nil {
+ if se, ok := wr.write.(StreamError); ok {
+ // (*serverConn).resetStream doesn't set
+ // stream because it doesn't necessarily have
+ // one. So special case this type of write
+ // message.
+ return se.StreamID
}
- }
-
- // Now, all that remains are DATA frames with non-zero bytes to
- // send. So pick the best one.
- if len(ws.canSend) != 0 {
- panic("should be empty")
- }
- for _, q := range ws.sq {
- if n := ws.streamWritableBytes(q); n > 0 {
- ws.canSend = append(ws.canSend, q)
- }
- }
- if len(ws.canSend) == 0 {
- return
- }
- defer ws.zeroCanSend()
-
- // TODO: find the best queue
- q := ws.canSend[0]
-
- return ws.takeFrom(q.streamID(), q)
-}
-
-// zeroCanSend is defered from take.
-func (ws *writeScheduler) zeroCanSend() {
- for i := range ws.canSend {
- ws.canSend[i] = nil
- }
- ws.canSend = ws.canSend[:0]
-}
-
-// streamWritableBytes returns the number of DATA bytes we could write
-// from the given queue's stream, if this stream/queue were
-// selected. It is an error to call this if q's head isn't a
-// *writeData.
-func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
- wm := q.head()
- ret := wm.stream.flow.available() // max we can write
- if ret == 0 {
return 0
}
- if int32(ws.maxFrameSize) < ret {
- ret = int32(ws.maxFrameSize)
- }
- if ret == 0 {
- panic("internal error: ws.maxFrameSize not initialized or invalid")
- }
- wd := wm.write.(*writeData)
- if len(wd.p) < int(ret) {
- ret = int32(len(wd.p))
- }
- return ret
+ return wr.stream.id
}
-func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
- wm = q.head()
- // If the first item in this queue costs flow control tokens
- // and we don't have enough, write as much as we can.
- if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
- allowed := wm.stream.flow.available() // max we can write
- if allowed == 0 {
- // No quota available. Caller can try the next stream.
- return frameWriteMsg{}, false
- }
- if int32(ws.maxFrameSize) < allowed {
- allowed = int32(ws.maxFrameSize)
- }
- // TODO: further restrict the allowed size, because even if
- // the peer says it's okay to write 16MB data frames, we might
- // want to write smaller ones to properly weight competing
- // streams' priorities.
-
- if len(wd.p) > int(allowed) {
- wm.stream.flow.take(allowed)
- chunk := wd.p[:allowed]
- wd.p = wd.p[allowed:]
- // Make up a new write message of a valid size, rather
- // than shifting one off the queue.
- return frameWriteMsg{
- stream: wm.stream,
- write: &writeData{
- streamID: wd.streamID,
- p: chunk,
- // even if the original had endStream set, there
- // arebytes remaining because len(wd.p) > allowed,
- // so we know endStream is false:
- endStream: false,
- },
- // our caller is blocking on the final DATA frame, not
- // these intermediates, so no need to wait:
- done: nil,
- }, true
- }
- wm.stream.flow.take(int32(len(wd.p)))
+// DataSize returns the number of flow control bytes that must be consumed
+// to write this entire frame. This is 0 for non-DATA frames.
+func (wr FrameWriteRequest) DataSize() int {
+ if wd, ok := wr.write.(*writeData); ok {
+ return len(wd.p)
}
-
- q.shift()
- if q.empty() {
- ws.putEmptyQueue(q)
- delete(ws.sq, id)
- }
- return wm, true
+ return 0
}
-func (ws *writeScheduler) forgetStream(id uint32) {
- q, ok := ws.sq[id]
- if !ok {
+// Consume consumes min(n, available) bytes from this frame, where available
+// is the number of flow control bytes available on the stream. Consume returns
+// 0, 1, or 2 frames, where the integer return value gives the number of frames
+// returned.
+//
+// If flow control prevents consuming any bytes, this returns (_, _, 0). If
+// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this
+// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and
+// 'rest' contains the remaining bytes. The consumed bytes are deducted from the
+// underlying stream's flow control budget.
+func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) {
+ var empty FrameWriteRequest
+
+ // Non-DATA frames are always consumed whole.
+ wd, ok := wr.write.(*writeData)
+ if !ok || len(wd.p) == 0 {
+ return wr, empty, 1
+ }
+
+ // Might need to split after applying limits.
+ allowed := wr.stream.flow.available()
+ if n < allowed {
+ allowed = n
+ }
+ if wr.stream.sc.maxFrameSize < allowed {
+ allowed = wr.stream.sc.maxFrameSize
+ }
+ if allowed <= 0 {
+ return empty, empty, 0
+ }
+ if len(wd.p) > int(allowed) {
+ wr.stream.flow.take(allowed)
+ consumed := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[:allowed],
+ // Even if the original had endStream set, there
+ // are bytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false.
+ endStream: false,
+ },
+ // Our caller is blocking on the final DATA frame, not
+ // this intermediate frame, so no need to wait.
+ done: nil,
+ }
+ rest := FrameWriteRequest{
+ stream: wr.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: wd.p[allowed:],
+ endStream: wd.endStream,
+ },
+ done: wr.done,
+ }
+ return consumed, rest, 2
+ }
+
+ // The frame is consumed whole.
+ // NB: This cast cannot overflow because allowed is <= math.MaxInt32.
+ wr.stream.flow.take(int32(len(wd.p)))
+ return wr, empty, 1
+}
+
+// String is for debugging only.
+func (wr FrameWriteRequest) String() string {
+ var des string
+ if s, ok := wr.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wr.write)
+ }
+ return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des)
+}
+
+// replyToWriter sends err to wr.done and panics if the send must block
+// This does nothing if wr.done is nil.
+func (wr *FrameWriteRequest) replyToWriter(err error) {
+ if wr.done == nil {
return
}
- delete(ws.sq, id)
-
- // But keep it for others later.
- for i := range q.s {
- q.s[i] = frameWriteMsg{}
+ select {
+ case wr.done <- err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write))
}
- q.s = q.s[:0]
- ws.putEmptyQueue(q)
+ wr.write = nil // prevent use (assume it's tainted after wr.done send)
}
+// writeQueue is used by implementations of WriteScheduler.
type writeQueue struct {
- s []frameWriteMsg
+ s []FrameWriteRequest
}
-// streamID returns the stream ID for a non-empty stream-specific queue.
-func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
-
func (q *writeQueue) empty() bool { return len(q.s) == 0 }
-func (q *writeQueue) push(wm frameWriteMsg) {
- q.s = append(q.s, wm)
+func (q *writeQueue) push(wr FrameWriteRequest) {
+ q.s = append(q.s, wr)
}
-// head returns the next item that would be removed by shift.
-func (q *writeQueue) head() frameWriteMsg {
+func (q *writeQueue) shift() FrameWriteRequest {
if len(q.s) == 0 {
panic("invalid use of queue")
}
- return q.s[0]
-}
-
-func (q *writeQueue) shift() frameWriteMsg {
- if len(q.s) == 0 {
- panic("invalid use of queue")
- }
- wm := q.s[0]
+ wr := q.s[0]
// TODO: less copy-happy queue.
copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = frameWriteMsg{}
+ q.s[len(q.s)-1] = FrameWriteRequest{}
q.s = q.s[:len(q.s)-1]
- return wm
+ return wr
}
-func (q *writeQueue) firstIsNoCost() bool {
- if df, ok := q.s[0].write.(*writeData); ok {
- return len(df.p) == 0
+// consume consumes up to n bytes from q.s[0]. If the frame is
+// entirely consumed, it is removed from the queue. If the frame
+// is partially consumed, the frame is kept with the consumed
+// bytes removed. Returns true iff any bytes were consumed.
+func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
+ if len(q.s) == 0 {
+ return FrameWriteRequest{}, false
}
- return true
+ consumed, rest, numresult := q.s[0].Consume(n)
+ switch numresult {
+ case 0:
+ return FrameWriteRequest{}, false
+ case 1:
+ q.shift()
+ case 2:
+ q.s[0] = rest
+ }
+ return consumed, true
+}
+
+type writeQueuePool []*writeQueue
+
+// put inserts an unused writeQueue into the pool.
+func (p *writeQueuePool) put(q *writeQueue) {
+ for i := range q.s {
+ q.s[i] = FrameWriteRequest{}
+ }
+ q.s = q.s[:0]
+ *p = append(*p, q)
+}
+
+// get returns an empty writeQueue.
+func (p *writeQueuePool) get() *writeQueue {
+ ln := len(*p)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ x := ln - 1
+ q := (*p)[x]
+ (*p)[x] = nil
+ *p = (*p)[:x]
+ return q
}
diff --git a/src/vendor/golang.org/x/net/http2/writesched_priority.go b/src/vendor/golang.org/x/net/http2/writesched_priority.go
new file mode 100644
index 000000000..848fed6ec
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http2/writesched_priority.go
@@ -0,0 +1,452 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+ "sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeight = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &priorityWriteScheduler{
+ nodes: make(map[uint32]*priorityNode),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type priorityNodeState int
+
+const (
+ priorityNodeOpen priorityNodeState = iota
+ priorityNodeClosed
+ priorityNodeIdle
+)
+
+// priorityNode is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNode struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeState // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *priorityNode
+ kids *priorityNode // start of the kids list
+ prev, next *priorityNode // doubly-linked list of siblings
+}
+
+func (n *priorityNode) setParent(parent *priorityNode) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *priorityNode) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this funcion returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == priorityNodeOpen)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(sortPriorityNodeSiblings(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type sortPriorityNodeSiblings []*priorityNode
+
+func (z sortPriorityNodeSiblings) Len() int { return len(z) }
+func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblings) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type priorityWriteScheduler struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root priorityNode
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*priorityNode
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*priorityNode
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*priorityNode
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != priorityNodeIdle {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = priorityNodeOpen
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeOpen,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != priorityNodeOpen {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = priorityNodeClosed
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &priorityNode{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeight,
+ state: priorityNodeIdle,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = priorityDefaultWeight
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
+ var n *priorityNode
+ if id := wr.StreamID(); id == 0 {
+ n = &ws.root
+ } else {
+ n = ws.nodes[id]
+ if n == nil {
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. However, wr can be a RST_STREAM. In this case, we
+ // push wr onto the root, rather than creating a new priorityNode,
+ // since RST_STREAM is tiny and the stream's priority is unknown
+ // anyway. See issue #17919.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
+ for k := n.kids; k != nil; k = k.next {
+ k.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
diff --git a/src/vendor/golang.org/x/net/http2/writesched_random.go b/src/vendor/golang.org/x/net/http2/writesched_random.go
new file mode 100644
index 000000000..36d7919f1
--- /dev/null
+++ b/src/vendor/golang.org/x/net/http2/writesched_random.go
@@ -0,0 +1,72 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "math"
+
+// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2
+// priorities. Control frames like SETTINGS and PING are written before DATA
+// frames, but if no control frames are queued and multiple streams have queued
+// HEADERS or DATA frames, Pop selects a ready stream arbitrarily.
+func NewRandomWriteScheduler() WriteScheduler {
+ return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)}
+}
+
+type randomWriteScheduler struct {
+ // zero are frames not associated with a specific stream.
+ zero writeQueue
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // When a stream is idle or closed, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // no-op: idle streams are not tracked
+}
+
+func (ws *randomWriteScheduler) CloseStream(streamID uint32) {
+ q, ok := ws.sq[streamID]
+ if !ok {
+ return
+ }
+ delete(ws.sq, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
+ // no-op: priorities are ignored
+}
+
+func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
+ id := wr.StreamID()
+ if id == 0 {
+ ws.zero.push(wr)
+ return
+ }
+ q, ok := ws.sq[id]
+ if !ok {
+ q = ws.queuePool.get()
+ ws.sq[id] = q
+ }
+ q.push(wr)
+}
+
+func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
+ // Control frames first.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ // Iterate over all non-idle streams until finding one that can be consumed.
+ for _, q := range ws.sq {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ return wr, true
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/src/vendor/golang.org/x/net/idna/idna10.0.0.go b/src/vendor/golang.org/x/net/idna/idna10.0.0.go
new file mode 100644
index 000000000..a98a31f40
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/idna10.0.0.go
@@ -0,0 +1,734 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.10
+
+// Package idna implements IDNA2008 using the compatibility processing
+// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
+// deal with the transition from IDNA2003.
+//
+// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
+// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
+// UTS #46 is defined in https://www.unicode.org/reports/tr46.
+// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
+// differences between these two standards.
+package idna // import "golang.org/x/net/idna"
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/secure/bidirule"
+ "golang.org/x/text/unicode/bidi"
+ "golang.org/x/text/unicode/norm"
+)
+
+// NOTE: Unlike common practice in Go APIs, the functions will return a
+// sanitized domain name in case of errors. Browsers sometimes use a partially
+// evaluated string as lookup.
+// TODO: the current error handling is, in my opinion, the least opinionated.
+// Other strategies are also viable, though:
+// Option 1) Return an empty string in case of error, but allow the user to
+// specify explicitly which errors to ignore.
+// Option 2) Return the partially evaluated string if it is itself a valid
+// string, otherwise return the empty string in case of error.
+// Option 3) Option 1 and 2.
+// Option 4) Always return an empty string for now and implement Option 1 as
+// needed, and document that the return string may not be empty in case of
+// error in the future.
+// I think Option 1 is best, but it is quite opinionated.
+
+// ToASCII is a wrapper for Punycode.ToASCII.
+func ToASCII(s string) (string, error) {
+ return Punycode.process(s, true)
+}
+
+// ToUnicode is a wrapper for Punycode.ToUnicode.
+func ToUnicode(s string) (string, error) {
+ return Punycode.process(s, false)
+}
+
+// An Option configures a Profile at creation time.
+type Option func(*options)
+
+// Transitional sets a Profile to use the Transitional mapping as defined in UTS
+// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
+// transitional mapping provides a compromise between IDNA2003 and IDNA2008
+// compatibility. It is used by most browsers when resolving domain names. This
+// option is only meaningful if combined with MapForLookup.
+func Transitional(transitional bool) Option {
+ return func(o *options) { o.transitional = true }
+}
+
+// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
+// are longer than allowed by the RFC.
+func VerifyDNSLength(verify bool) Option {
+ return func(o *options) { o.verifyDNSLength = verify }
+}
+
+// RemoveLeadingDots removes leading label separators. Leading runes that map to
+// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
+//
+// This is the behavior suggested by the UTS #46 and is adopted by some
+// browsers.
+func RemoveLeadingDots(remove bool) Option {
+ return func(o *options) { o.removeLeadingDots = remove }
+}
+
+// ValidateLabels sets whether to check the mandatory label validation criteria
+// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
+// of hyphens ('-'), normalization, validity of runes, and the context rules.
+func ValidateLabels(enable bool) Option {
+ return func(o *options) {
+ // Don't override existing mappings, but set one that at least checks
+ // normalization if it is not set.
+ if o.mapping == nil && enable {
+ o.mapping = normalize
+ }
+ o.trie = trie
+ o.validateLabels = enable
+ o.fromPuny = validateFromPunycode
+ }
+}
+
+// StrictDomainName limits the set of permissible ASCII characters to those
+// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
+// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
+//
+// This option is useful, for instance, for browsers that allow characters
+// outside this range, for example a '_' (U+005F LOW LINE). See
+// http://www.rfc-editor.org/std/std3.txt for more details This option
+// corresponds to the UseSTD3ASCIIRules option in UTS #46.
+func StrictDomainName(use bool) Option {
+ return func(o *options) {
+ o.trie = trie
+ o.useSTD3Rules = use
+ o.fromPuny = validateFromPunycode
+ }
+}
+
+// NOTE: the following options pull in tables. The tables should not be linked
+// in as long as the options are not used.
+
+// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
+// that relies on proper validation of labels should include this rule.
+func BidiRule() Option {
+ return func(o *options) { o.bidirule = bidirule.ValidString }
+}
+
+// ValidateForRegistration sets validation options to verify that a given IDN is
+// properly formatted for registration as defined by Section 4 of RFC 5891.
+func ValidateForRegistration() Option {
+ return func(o *options) {
+ o.mapping = validateRegistration
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ VerifyDNSLength(true)(o)
+ BidiRule()(o)
+ }
+}
+
+// MapForLookup sets validation and mapping options such that a given IDN is
+// transformed for domain name lookup according to the requirements set out in
+// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
+// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
+// to add this check.
+//
+// The mappings include normalization and mapping case, width and other
+// compatibility mappings.
+func MapForLookup() Option {
+ return func(o *options) {
+ o.mapping = validateAndMap
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ }
+}
+
+type options struct {
+ transitional bool
+ useSTD3Rules bool
+ validateLabels bool
+ verifyDNSLength bool
+ removeLeadingDots bool
+
+ trie *idnaTrie
+
+ // fromPuny calls validation rules when converting A-labels to U-labels.
+ fromPuny func(p *Profile, s string) error
+
+ // mapping implements a validation and mapping step as defined in RFC 5895
+ // or UTS 46, tailored to, for example, domain registration or lookup.
+ mapping func(p *Profile, s string) (mapped string, isBidi bool, err error)
+
+ // bidirule, if specified, checks whether s conforms to the Bidi Rule
+ // defined in RFC 5893.
+ bidirule func(s string) bool
+}
+
+// A Profile defines the configuration of an IDNA mapper.
+type Profile struct {
+ options
+}
+
+func apply(o *options, opts []Option) {
+ for _, f := range opts {
+ f(o)
+ }
+}
+
+// New creates a new Profile.
+//
+// With no options, the returned Profile is the most permissive and equals the
+// Punycode Profile. Options can be passed to further restrict the Profile. The
+// MapForLookup and ValidateForRegistration options set a collection of options,
+// for lookup and registration purposes respectively, which can be tailored by
+// adding more fine-grained options, where later options override earlier
+// options.
+func New(o ...Option) *Profile {
+ p := &Profile{}
+ apply(&p.options, o)
+ return p
+}
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToASCII(s string) (string, error) {
+ return p.process(s, true)
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToUnicode(s string) (string, error) {
+ pp := *p
+ pp.transitional = false
+ return pp.process(s, false)
+}
+
+// String reports a string with a description of the profile for debugging
+// purposes. The string format may change with different versions.
+func (p *Profile) String() string {
+ s := ""
+ if p.transitional {
+ s = "Transitional"
+ } else {
+ s = "NonTransitional"
+ }
+ if p.useSTD3Rules {
+ s += ":UseSTD3Rules"
+ }
+ if p.validateLabels {
+ s += ":ValidateLabels"
+ }
+ if p.verifyDNSLength {
+ s += ":VerifyDNSLength"
+ }
+ return s
+}
+
+var (
+ // Punycode is a Profile that does raw punycode processing with a minimum
+ // of validation.
+ Punycode *Profile = punycode
+
+ // Lookup is the recommended profile for looking up domain names, according
+ // to Section 5 of RFC 5891. The exact configuration of this profile may
+ // change over time.
+ Lookup *Profile = lookup
+
+ // Display is the recommended profile for displaying domain names.
+ // The configuration of this profile may change over time.
+ Display *Profile = display
+
+ // Registration is the recommended profile for checking whether a given
+ // IDN is valid for registration, according to Section 4 of RFC 5891.
+ Registration *Profile = registration
+
+ punycode = &Profile{}
+ lookup = &Profile{options{
+ transitional: true,
+ useSTD3Rules: true,
+ validateLabels: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ display = &Profile{options{
+ useSTD3Rules: true,
+ validateLabels: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ registration = &Profile{options{
+ useSTD3Rules: true,
+ validateLabels: true,
+ verifyDNSLength: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateRegistration,
+ bidirule: bidirule.ValidString,
+ }}
+
+ // TODO: profiles
+ // Register: recommended for approving domain names: don't do any mappings
+ // but rather reject on invalid input. Bundle or block deviation characters.
+)
+
+type labelError struct{ label, code_ string }
+
+func (e labelError) code() string { return e.code_ }
+func (e labelError) Error() string {
+ return fmt.Sprintf("idna: invalid label %q", e.label)
+}
+
+type runeError rune
+
+func (e runeError) code() string { return "P1" }
+func (e runeError) Error() string {
+ return fmt.Sprintf("idna: disallowed rune %U", e)
+}
+
+// process implements the algorithm described in section 4 of UTS #46,
+// see https://www.unicode.org/reports/tr46.
+func (p *Profile) process(s string, toASCII bool) (string, error) {
+ var err error
+ var isBidi bool
+ if p.mapping != nil {
+ s, isBidi, err = p.mapping(p, s)
+ }
+ // Remove leading empty labels.
+ if p.removeLeadingDots {
+ for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
+ }
+ }
+ // TODO: allow for a quick check of the tables data.
+ // It seems like we should only create this error on ToASCII, but the
+ // UTS 46 conformance tests suggests we should always check this.
+ if err == nil && p.verifyDNSLength && s == "" {
+ err = &labelError{s, "A4"}
+ }
+ labels := labelIter{orig: s}
+ for ; !labels.done(); labels.next() {
+ label := labels.label()
+ if label == "" {
+ // Empty labels are not okay. The label iterator skips the last
+ // label if it is empty.
+ if err == nil && p.verifyDNSLength {
+ err = &labelError{s, "A4"}
+ }
+ continue
+ }
+ if strings.HasPrefix(label, acePrefix) {
+ u, err2 := decode(label[len(acePrefix):])
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ // Spec says keep the old label.
+ continue
+ }
+ isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight
+ labels.set(u)
+ if err == nil && p.validateLabels {
+ err = p.fromPuny(p, u)
+ }
+ if err == nil {
+ // This should be called on NonTransitional, according to the
+ // spec, but that currently does not have any effect. Use the
+ // original profile to preserve options.
+ err = p.validateLabel(u)
+ }
+ } else if err == nil {
+ err = p.validateLabel(label)
+ }
+ }
+ if isBidi && p.bidirule != nil && err == nil {
+ for labels.reset(); !labels.done(); labels.next() {
+ if !p.bidirule(labels.label()) {
+ err = &labelError{s, "B"}
+ break
+ }
+ }
+ }
+ if toASCII {
+ for labels.reset(); !labels.done(); labels.next() {
+ label := labels.label()
+ if !ascii(label) {
+ a, err2 := encode(acePrefix, label)
+ if err == nil {
+ err = err2
+ }
+ label = a
+ labels.set(a)
+ }
+ n := len(label)
+ if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
+ err = &labelError{label, "A4"}
+ }
+ }
+ }
+ s = labels.result()
+ if toASCII && p.verifyDNSLength && err == nil {
+ // Compute the length of the domain name minus the root label and its dot.
+ n := len(s)
+ if n > 0 && s[n-1] == '.' {
+ n--
+ }
+ if len(s) < 1 || n > 253 {
+ err = &labelError{s, "A4"}
+ }
+ }
+ return s, err
+}
+
+func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) {
+ // TODO: consider first doing a quick check to see if any of these checks
+ // need to be done. This will make it slower in the general case, but
+ // faster in the common case.
+ mapped = norm.NFC.String(s)
+ isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft
+ return mapped, isBidi, nil
+}
+
+func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) {
+ // TODO: filter need for normalization in loop below.
+ if !norm.NFC.IsNormalString(s) {
+ return s, false, &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if sz == 0 {
+ return s, bidi, runeError(utf8.RuneError)
+ }
+ bidi = bidi || info(v).isBidi(s[i:])
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ // TODO: handle the NV8 defined in the Unicode idna data set to allow
+ // for strict conformance to IDNA2008.
+ case valid, deviation:
+ case disallowed, mapped, unknown, ignored:
+ r, _ := utf8.DecodeRuneInString(s[i:])
+ return s, bidi, runeError(r)
+ }
+ i += sz
+ }
+ return s, bidi, nil
+}
+
+func (c info) isBidi(s string) bool {
+ if !c.isMapped() {
+ return c&attributesMask == rtl
+ }
+ // TODO: also store bidi info for mapped data. This is possible, but a bit
+ // cumbersome and not for the common case.
+ p, _ := bidi.LookupString(s)
+ switch p.Class() {
+ case bidi.R, bidi.AL, bidi.AN:
+ return true
+ }
+ return false
+}
+
+func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) {
+ var (
+ b []byte
+ k int
+ )
+ // combinedInfoBits contains the or-ed bits of all runes. We use this
+ // to derive the mayNeedNorm bit later. This may trigger normalization
+ // overeagerly, but it will not do so in the common case. The end result
+ // is another 10% saving on BenchmarkProfile for the common case.
+ var combinedInfoBits info
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if sz == 0 {
+ b = append(b, s[k:i]...)
+ b = append(b, "\ufffd"...)
+ k = len(s)
+ if err == nil {
+ err = runeError(utf8.RuneError)
+ }
+ break
+ }
+ combinedInfoBits |= info(v)
+ bidi = bidi || info(v).isBidi(s[i:])
+ start := i
+ i += sz
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ case valid:
+ continue
+ case disallowed:
+ if err == nil {
+ r, _ := utf8.DecodeRuneInString(s[start:])
+ err = runeError(r)
+ }
+ continue
+ case mapped, deviation:
+ b = append(b, s[k:start]...)
+ b = info(v).appendMapping(b, s[start:i])
+ case ignored:
+ b = append(b, s[k:start]...)
+ // drop the rune
+ case unknown:
+ b = append(b, s[k:start]...)
+ b = append(b, "\ufffd"...)
+ }
+ k = i
+ }
+ if k == 0 {
+ // No changes so far.
+ if combinedInfoBits&mayNeedNorm != 0 {
+ s = norm.NFC.String(s)
+ }
+ } else {
+ b = append(b, s[k:]...)
+ if norm.NFC.QuickSpan(b) != len(b) {
+ b = norm.NFC.Bytes(b)
+ }
+ // TODO: the punycode converters require strings as input.
+ s = string(b)
+ }
+ return s, bidi, err
+}
+
+// A labelIter allows iterating over domain name labels.
+type labelIter struct {
+ orig string
+ slice []string
+ curStart int
+ curEnd int
+ i int
+}
+
+func (l *labelIter) reset() {
+ l.curStart = 0
+ l.curEnd = 0
+ l.i = 0
+}
+
+func (l *labelIter) done() bool {
+ return l.curStart >= len(l.orig)
+}
+
+func (l *labelIter) result() string {
+ if l.slice != nil {
+ return strings.Join(l.slice, ".")
+ }
+ return l.orig
+}
+
+func (l *labelIter) label() string {
+ if l.slice != nil {
+ return l.slice[l.i]
+ }
+ p := strings.IndexByte(l.orig[l.curStart:], '.')
+ l.curEnd = l.curStart + p
+ if p == -1 {
+ l.curEnd = len(l.orig)
+ }
+ return l.orig[l.curStart:l.curEnd]
+}
+
+// next sets the value to the next label. It skips the last label if it is empty.
+func (l *labelIter) next() {
+ l.i++
+ if l.slice != nil {
+ if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
+ l.curStart = len(l.orig)
+ }
+ } else {
+ l.curStart = l.curEnd + 1
+ if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
+ l.curStart = len(l.orig)
+ }
+ }
+}
+
+func (l *labelIter) set(s string) {
+ if l.slice == nil {
+ l.slice = strings.Split(l.orig, ".")
+ }
+ l.slice[l.i] = s
+}
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+func (p *Profile) simplify(cat category) category {
+ switch cat {
+ case disallowedSTD3Mapped:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = mapped
+ }
+ case disallowedSTD3Valid:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = valid
+ }
+ case deviation:
+ if !p.transitional {
+ cat = valid
+ }
+ case validNV8, validXV8:
+ // TODO: handle V2008
+ cat = valid
+ }
+ return cat
+}
+
+func validateFromPunycode(p *Profile, s string) error {
+ if !norm.NFC.IsNormalString(s) {
+ return &labelError{s, "V1"}
+ }
+ // TODO: detect whether string may have to be normalized in the following
+ // loop.
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if sz == 0 {
+ return runeError(utf8.RuneError)
+ }
+ if c := p.simplify(info(v).category()); c != valid && c != deviation {
+ return &labelError{s, "V6"}
+ }
+ i += sz
+ }
+ return nil
+}
+
+const (
+ zwnj = "\u200c"
+ zwj = "\u200d"
+)
+
+type joinState int8
+
+const (
+ stateStart joinState = iota
+ stateVirama
+ stateBefore
+ stateBeforeVirama
+ stateAfter
+ stateFAIL
+)
+
+var joinStates = [][numJoinTypes]joinState{
+ stateStart: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateVirama,
+ },
+ stateVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ },
+ stateBefore: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ joinZWNJ: stateAfter,
+ joinZWJ: stateFAIL,
+ joinVirama: stateBeforeVirama,
+ },
+ stateBeforeVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ },
+ stateAfter: {
+ joiningL: stateFAIL,
+ joiningD: stateBefore,
+ joiningT: stateAfter,
+ joiningR: stateStart,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateAfter, // no-op as we can't accept joiners here
+ },
+ stateFAIL: {
+ 0: stateFAIL,
+ joiningL: stateFAIL,
+ joiningD: stateFAIL,
+ joiningT: stateFAIL,
+ joiningR: stateFAIL,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateFAIL,
+ },
+}
+
+// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
+// already implicitly satisfied by the overall implementation.
+func (p *Profile) validateLabel(s string) (err error) {
+ if s == "" {
+ if p.verifyDNSLength {
+ return &labelError{s, "A4"}
+ }
+ return nil
+ }
+ if !p.validateLabels {
+ return nil
+ }
+ trie := p.trie // p.validateLabels is only set if trie is set.
+ if len(s) > 4 && s[2] == '-' && s[3] == '-' {
+ return &labelError{s, "V2"}
+ }
+ if s[0] == '-' || s[len(s)-1] == '-' {
+ return &labelError{s, "V3"}
+ }
+ // TODO: merge the use of this in the trie.
+ v, sz := trie.lookupString(s)
+ x := info(v)
+ if x.isModifier() {
+ return &labelError{s, "V5"}
+ }
+ // Quickly return in the absence of zero-width (non) joiners.
+ if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
+ return nil
+ }
+ st := stateStart
+ for i := 0; ; {
+ jt := x.joinType()
+ if s[i:i+sz] == zwj {
+ jt = joinZWJ
+ } else if s[i:i+sz] == zwnj {
+ jt = joinZWNJ
+ }
+ st = joinStates[st][jt]
+ if x.isViramaModifier() {
+ st = joinStates[st][joinVirama]
+ }
+ if i += sz; i == len(s) {
+ break
+ }
+ v, sz = trie.lookupString(s[i:])
+ x = info(v)
+ }
+ if st == stateFAIL || st == stateAfter {
+ return &labelError{s, "C"}
+ }
+ return nil
+}
+
+func ascii(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/vendor/golang.org/x/net/idna/idna9.0.0.go b/src/vendor/golang.org/x/net/idna/idna9.0.0.go
new file mode 100644
index 000000000..8842146b5
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/idna9.0.0.go
@@ -0,0 +1,682 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.10
+
+// Package idna implements IDNA2008 using the compatibility processing
+// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
+// deal with the transition from IDNA2003.
+//
+// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
+// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
+// UTS #46 is defined in https://www.unicode.org/reports/tr46.
+// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the
+// differences between these two standards.
+package idna // import "golang.org/x/net/idna"
+
+import (
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "golang.org/x/text/secure/bidirule"
+ "golang.org/x/text/unicode/norm"
+)
+
+// NOTE: Unlike common practice in Go APIs, the functions will return a
+// sanitized domain name in case of errors. Browsers sometimes use a partially
+// evaluated string as lookup.
+// TODO: the current error handling is, in my opinion, the least opinionated.
+// Other strategies are also viable, though:
+// Option 1) Return an empty string in case of error, but allow the user to
+// specify explicitly which errors to ignore.
+// Option 2) Return the partially evaluated string if it is itself a valid
+// string, otherwise return the empty string in case of error.
+// Option 3) Option 1 and 2.
+// Option 4) Always return an empty string for now and implement Option 1 as
+// needed, and document that the return string may not be empty in case of
+// error in the future.
+// I think Option 1 is best, but it is quite opinionated.
+
+// ToASCII is a wrapper for Punycode.ToASCII.
+func ToASCII(s string) (string, error) {
+ return Punycode.process(s, true)
+}
+
+// ToUnicode is a wrapper for Punycode.ToUnicode.
+func ToUnicode(s string) (string, error) {
+ return Punycode.process(s, false)
+}
+
+// An Option configures a Profile at creation time.
+type Option func(*options)
+
+// Transitional sets a Profile to use the Transitional mapping as defined in UTS
+// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
+// transitional mapping provides a compromise between IDNA2003 and IDNA2008
+// compatibility. It is used by most browsers when resolving domain names. This
+// option is only meaningful if combined with MapForLookup.
+func Transitional(transitional bool) Option {
+ return func(o *options) { o.transitional = true }
+}
+
+// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
+// are longer than allowed by the RFC.
+func VerifyDNSLength(verify bool) Option {
+ return func(o *options) { o.verifyDNSLength = verify }
+}
+
+// RemoveLeadingDots removes leading label separators. Leading runes that map to
+// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well.
+//
+// This is the behavior suggested by the UTS #46 and is adopted by some
+// browsers.
+func RemoveLeadingDots(remove bool) Option {
+ return func(o *options) { o.removeLeadingDots = remove }
+}
+
+// ValidateLabels sets whether to check the mandatory label validation criteria
+// as defined in Section 5.4 of RFC 5891. This includes testing for correct use
+// of hyphens ('-'), normalization, validity of runes, and the context rules.
+func ValidateLabels(enable bool) Option {
+ return func(o *options) {
+ // Don't override existing mappings, but set one that at least checks
+ // normalization if it is not set.
+ if o.mapping == nil && enable {
+ o.mapping = normalize
+ }
+ o.trie = trie
+ o.validateLabels = enable
+ o.fromPuny = validateFromPunycode
+ }
+}
+
+// StrictDomainName limits the set of permissable ASCII characters to those
+// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the
+// hyphen). This is set by default for MapForLookup and ValidateForRegistration.
+//
+// This option is useful, for instance, for browsers that allow characters
+// outside this range, for example a '_' (U+005F LOW LINE). See
+// http://www.rfc-editor.org/std/std3.txt for more details This option
+// corresponds to the UseSTD3ASCIIRules option in UTS #46.
+func StrictDomainName(use bool) Option {
+ return func(o *options) {
+ o.trie = trie
+ o.useSTD3Rules = use
+ o.fromPuny = validateFromPunycode
+ }
+}
+
+// NOTE: the following options pull in tables. The tables should not be linked
+// in as long as the options are not used.
+
+// BidiRule enables the Bidi rule as defined in RFC 5893. Any application
+// that relies on proper validation of labels should include this rule.
+func BidiRule() Option {
+ return func(o *options) { o.bidirule = bidirule.ValidString }
+}
+
+// ValidateForRegistration sets validation options to verify that a given IDN is
+// properly formatted for registration as defined by Section 4 of RFC 5891.
+func ValidateForRegistration() Option {
+ return func(o *options) {
+ o.mapping = validateRegistration
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ VerifyDNSLength(true)(o)
+ BidiRule()(o)
+ }
+}
+
+// MapForLookup sets validation and mapping options such that a given IDN is
+// transformed for domain name lookup according to the requirements set out in
+// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894,
+// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option
+// to add this check.
+//
+// The mappings include normalization and mapping case, width and other
+// compatibility mappings.
+func MapForLookup() Option {
+ return func(o *options) {
+ o.mapping = validateAndMap
+ StrictDomainName(true)(o)
+ ValidateLabels(true)(o)
+ RemoveLeadingDots(true)(o)
+ }
+}
+
+type options struct {
+ transitional bool
+ useSTD3Rules bool
+ validateLabels bool
+ verifyDNSLength bool
+ removeLeadingDots bool
+
+ trie *idnaTrie
+
+ // fromPuny calls validation rules when converting A-labels to U-labels.
+ fromPuny func(p *Profile, s string) error
+
+ // mapping implements a validation and mapping step as defined in RFC 5895
+ // or UTS 46, tailored to, for example, domain registration or lookup.
+ mapping func(p *Profile, s string) (string, error)
+
+ // bidirule, if specified, checks whether s conforms to the Bidi Rule
+ // defined in RFC 5893.
+ bidirule func(s string) bool
+}
+
+// A Profile defines the configuration of a IDNA mapper.
+type Profile struct {
+ options
+}
+
+func apply(o *options, opts []Option) {
+ for _, f := range opts {
+ f(o)
+ }
+}
+
+// New creates a new Profile.
+//
+// With no options, the returned Profile is the most permissive and equals the
+// Punycode Profile. Options can be passed to further restrict the Profile. The
+// MapForLookup and ValidateForRegistration options set a collection of options,
+// for lookup and registration purposes respectively, which can be tailored by
+// adding more fine-grained options, where later options override earlier
+// options.
+func New(o ...Option) *Profile {
+ p := &Profile{}
+ apply(&p.options, o)
+ return p
+}
+
+// ToASCII converts a domain or domain label to its ASCII form. For example,
+// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
+// ToASCII("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToASCII(s string) (string, error) {
+ return p.process(s, true)
+}
+
+// ToUnicode converts a domain or domain label to its Unicode form. For example,
+// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
+// ToUnicode("golang") is "golang". If an error is encountered it will return
+// an error and a (partially) processed result.
+func (p *Profile) ToUnicode(s string) (string, error) {
+ pp := *p
+ pp.transitional = false
+ return pp.process(s, false)
+}
+
+// String reports a string with a description of the profile for debugging
+// purposes. The string format may change with different versions.
+func (p *Profile) String() string {
+ s := ""
+ if p.transitional {
+ s = "Transitional"
+ } else {
+ s = "NonTransitional"
+ }
+ if p.useSTD3Rules {
+ s += ":UseSTD3Rules"
+ }
+ if p.validateLabels {
+ s += ":ValidateLabels"
+ }
+ if p.verifyDNSLength {
+ s += ":VerifyDNSLength"
+ }
+ return s
+}
+
+var (
+ // Punycode is a Profile that does raw punycode processing with a minimum
+ // of validation.
+ Punycode *Profile = punycode
+
+ // Lookup is the recommended profile for looking up domain names, according
+ // to Section 5 of RFC 5891. The exact configuration of this profile may
+ // change over time.
+ Lookup *Profile = lookup
+
+ // Display is the recommended profile for displaying domain names.
+ // The configuration of this profile may change over time.
+ Display *Profile = display
+
+ // Registration is the recommended profile for checking whether a given
+ // IDN is valid for registration, according to Section 4 of RFC 5891.
+ Registration *Profile = registration
+
+ punycode = &Profile{}
+ lookup = &Profile{options{
+ transitional: true,
+ useSTD3Rules: true,
+ validateLabels: true,
+ removeLeadingDots: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ display = &Profile{options{
+ useSTD3Rules: true,
+ validateLabels: true,
+ removeLeadingDots: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateAndMap,
+ bidirule: bidirule.ValidString,
+ }}
+ registration = &Profile{options{
+ useSTD3Rules: true,
+ validateLabels: true,
+ verifyDNSLength: true,
+ trie: trie,
+ fromPuny: validateFromPunycode,
+ mapping: validateRegistration,
+ bidirule: bidirule.ValidString,
+ }}
+
+ // TODO: profiles
+ // Register: recommended for approving domain names: don't do any mappings
+ // but rather reject on invalid input. Bundle or block deviation characters.
+)
+
+type labelError struct{ label, code_ string }
+
+func (e labelError) code() string { return e.code_ }
+func (e labelError) Error() string {
+ return fmt.Sprintf("idna: invalid label %q", e.label)
+}
+
+type runeError rune
+
+func (e runeError) code() string { return "P1" }
+func (e runeError) Error() string {
+ return fmt.Sprintf("idna: disallowed rune %U", e)
+}
+
+// process implements the algorithm described in section 4 of UTS #46,
+// see https://www.unicode.org/reports/tr46.
+func (p *Profile) process(s string, toASCII bool) (string, error) {
+ var err error
+ if p.mapping != nil {
+ s, err = p.mapping(p, s)
+ }
+ // Remove leading empty labels.
+ if p.removeLeadingDots {
+ for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
+ }
+ }
+ // It seems like we should only create this error on ToASCII, but the
+ // UTS 46 conformance tests suggests we should always check this.
+ if err == nil && p.verifyDNSLength && s == "" {
+ err = &labelError{s, "A4"}
+ }
+ labels := labelIter{orig: s}
+ for ; !labels.done(); labels.next() {
+ label := labels.label()
+ if label == "" {
+ // Empty labels are not okay. The label iterator skips the last
+ // label if it is empty.
+ if err == nil && p.verifyDNSLength {
+ err = &labelError{s, "A4"}
+ }
+ continue
+ }
+ if strings.HasPrefix(label, acePrefix) {
+ u, err2 := decode(label[len(acePrefix):])
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ // Spec says keep the old label.
+ continue
+ }
+ labels.set(u)
+ if err == nil && p.validateLabels {
+ err = p.fromPuny(p, u)
+ }
+ if err == nil {
+ // This should be called on NonTransitional, according to the
+ // spec, but that currently does not have any effect. Use the
+ // original profile to preserve options.
+ err = p.validateLabel(u)
+ }
+ } else if err == nil {
+ err = p.validateLabel(label)
+ }
+ }
+ if toASCII {
+ for labels.reset(); !labels.done(); labels.next() {
+ label := labels.label()
+ if !ascii(label) {
+ a, err2 := encode(acePrefix, label)
+ if err == nil {
+ err = err2
+ }
+ label = a
+ labels.set(a)
+ }
+ n := len(label)
+ if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
+ err = &labelError{label, "A4"}
+ }
+ }
+ }
+ s = labels.result()
+ if toASCII && p.verifyDNSLength && err == nil {
+ // Compute the length of the domain name minus the root label and its dot.
+ n := len(s)
+ if n > 0 && s[n-1] == '.' {
+ n--
+ }
+ if len(s) < 1 || n > 253 {
+ err = &labelError{s, "A4"}
+ }
+ }
+ return s, err
+}
+
+func normalize(p *Profile, s string) (string, error) {
+ return norm.NFC.String(s), nil
+}
+
+func validateRegistration(p *Profile, s string) (string, error) {
+ if !norm.NFC.IsNormalString(s) {
+ return s, &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ // TODO: handle the NV8 defined in the Unicode idna data set to allow
+ // for strict conformance to IDNA2008.
+ case valid, deviation:
+ case disallowed, mapped, unknown, ignored:
+ r, _ := utf8.DecodeRuneInString(s[i:])
+ return s, runeError(r)
+ }
+ i += sz
+ }
+ return s, nil
+}
+
+func validateAndMap(p *Profile, s string) (string, error) {
+ var (
+ err error
+ b []byte
+ k int
+ )
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ start := i
+ i += sz
+ // Copy bytes not copied so far.
+ switch p.simplify(info(v).category()) {
+ case valid:
+ continue
+ case disallowed:
+ if err == nil {
+ r, _ := utf8.DecodeRuneInString(s[start:])
+ err = runeError(r)
+ }
+ continue
+ case mapped, deviation:
+ b = append(b, s[k:start]...)
+ b = info(v).appendMapping(b, s[start:i])
+ case ignored:
+ b = append(b, s[k:start]...)
+ // drop the rune
+ case unknown:
+ b = append(b, s[k:start]...)
+ b = append(b, "\ufffd"...)
+ }
+ k = i
+ }
+ if k == 0 {
+ // No changes so far.
+ s = norm.NFC.String(s)
+ } else {
+ b = append(b, s[k:]...)
+ if norm.NFC.QuickSpan(b) != len(b) {
+ b = norm.NFC.Bytes(b)
+ }
+ // TODO: the punycode converters require strings as input.
+ s = string(b)
+ }
+ return s, err
+}
+
+// A labelIter allows iterating over domain name labels.
+type labelIter struct {
+ orig string
+ slice []string
+ curStart int
+ curEnd int
+ i int
+}
+
+func (l *labelIter) reset() {
+ l.curStart = 0
+ l.curEnd = 0
+ l.i = 0
+}
+
+func (l *labelIter) done() bool {
+ return l.curStart >= len(l.orig)
+}
+
+func (l *labelIter) result() string {
+ if l.slice != nil {
+ return strings.Join(l.slice, ".")
+ }
+ return l.orig
+}
+
+func (l *labelIter) label() string {
+ if l.slice != nil {
+ return l.slice[l.i]
+ }
+ p := strings.IndexByte(l.orig[l.curStart:], '.')
+ l.curEnd = l.curStart + p
+ if p == -1 {
+ l.curEnd = len(l.orig)
+ }
+ return l.orig[l.curStart:l.curEnd]
+}
+
+// next sets the value to the next label. It skips the last label if it is empty.
+func (l *labelIter) next() {
+ l.i++
+ if l.slice != nil {
+ if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
+ l.curStart = len(l.orig)
+ }
+ } else {
+ l.curStart = l.curEnd + 1
+ if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
+ l.curStart = len(l.orig)
+ }
+ }
+}
+
+func (l *labelIter) set(s string) {
+ if l.slice == nil {
+ l.slice = strings.Split(l.orig, ".")
+ }
+ l.slice[l.i] = s
+}
+
+// acePrefix is the ASCII Compatible Encoding prefix.
+const acePrefix = "xn--"
+
+func (p *Profile) simplify(cat category) category {
+ switch cat {
+ case disallowedSTD3Mapped:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = mapped
+ }
+ case disallowedSTD3Valid:
+ if p.useSTD3Rules {
+ cat = disallowed
+ } else {
+ cat = valid
+ }
+ case deviation:
+ if !p.transitional {
+ cat = valid
+ }
+ case validNV8, validXV8:
+ // TODO: handle V2008
+ cat = valid
+ }
+ return cat
+}
+
+func validateFromPunycode(p *Profile, s string) error {
+ if !norm.NFC.IsNormalString(s) {
+ return &labelError{s, "V1"}
+ }
+ for i := 0; i < len(s); {
+ v, sz := trie.lookupString(s[i:])
+ if c := p.simplify(info(v).category()); c != valid && c != deviation {
+ return &labelError{s, "V6"}
+ }
+ i += sz
+ }
+ return nil
+}
+
+const (
+ zwnj = "\u200c"
+ zwj = "\u200d"
+)
+
+type joinState int8
+
+const (
+ stateStart joinState = iota
+ stateVirama
+ stateBefore
+ stateBeforeVirama
+ stateAfter
+ stateFAIL
+)
+
+var joinStates = [][numJoinTypes]joinState{
+ stateStart: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateVirama,
+ },
+ stateVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ },
+ stateBefore: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ joinZWNJ: stateAfter,
+ joinZWJ: stateFAIL,
+ joinVirama: stateBeforeVirama,
+ },
+ stateBeforeVirama: {
+ joiningL: stateBefore,
+ joiningD: stateBefore,
+ joiningT: stateBefore,
+ },
+ stateAfter: {
+ joiningL: stateFAIL,
+ joiningD: stateBefore,
+ joiningT: stateAfter,
+ joiningR: stateStart,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateAfter, // no-op as we can't accept joiners here
+ },
+ stateFAIL: {
+ 0: stateFAIL,
+ joiningL: stateFAIL,
+ joiningD: stateFAIL,
+ joiningT: stateFAIL,
+ joiningR: stateFAIL,
+ joinZWNJ: stateFAIL,
+ joinZWJ: stateFAIL,
+ joinVirama: stateFAIL,
+ },
+}
+
+// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are
+// already implicitly satisfied by the overall implementation.
+func (p *Profile) validateLabel(s string) error {
+ if s == "" {
+ if p.verifyDNSLength {
+ return &labelError{s, "A4"}
+ }
+ return nil
+ }
+ if p.bidirule != nil && !p.bidirule(s) {
+ return &labelError{s, "B"}
+ }
+ if !p.validateLabels {
+ return nil
+ }
+ trie := p.trie // p.validateLabels is only set if trie is set.
+ if len(s) > 4 && s[2] == '-' && s[3] == '-' {
+ return &labelError{s, "V2"}
+ }
+ if s[0] == '-' || s[len(s)-1] == '-' {
+ return &labelError{s, "V3"}
+ }
+ // TODO: merge the use of this in the trie.
+ v, sz := trie.lookupString(s)
+ x := info(v)
+ if x.isModifier() {
+ return &labelError{s, "V5"}
+ }
+ // Quickly return in the absence of zero-width (non) joiners.
+ if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
+ return nil
+ }
+ st := stateStart
+ for i := 0; ; {
+ jt := x.joinType()
+ if s[i:i+sz] == zwj {
+ jt = joinZWJ
+ } else if s[i:i+sz] == zwnj {
+ jt = joinZWNJ
+ }
+ st = joinStates[st][jt]
+ if x.isViramaModifier() {
+ st = joinStates[st][joinVirama]
+ }
+ if i += sz; i == len(s) {
+ break
+ }
+ v, sz = trie.lookupString(s[i:])
+ x = info(v)
+ }
+ if st == stateFAIL || st == stateAfter {
+ return &labelError{s, "C"}
+ }
+ return nil
+}
+
+func ascii(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] >= utf8.RuneSelf {
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/vendor/golang.org/x/net/idna/punycode.go b/src/vendor/golang.org/x/net/idna/punycode.go
new file mode 100644
index 000000000..02c7d59af
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/punycode.go
@@ -0,0 +1,203 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+// This file implements the Punycode algorithm from RFC 3492.
+
+import (
+ "math"
+ "strings"
+ "unicode/utf8"
+)
+
+// These parameter values are specified in section 5.
+//
+// All computation is done with int32s, so that overflow behavior is identical
+// regardless of whether int is 32-bit or 64-bit.
+const (
+ base int32 = 36
+ damp int32 = 700
+ initialBias int32 = 72
+ initialN int32 = 128
+ skew int32 = 38
+ tmax int32 = 26
+ tmin int32 = 1
+)
+
+func punyError(s string) error { return &labelError{s, "A3"} }
+
+// decode decodes a string as specified in section 6.2.
+func decode(encoded string) (string, error) {
+ if encoded == "" {
+ return "", nil
+ }
+ pos := 1 + strings.LastIndex(encoded, "-")
+ if pos == 1 {
+ return "", punyError(encoded)
+ }
+ if pos == len(encoded) {
+ return encoded[:len(encoded)-1], nil
+ }
+ output := make([]rune, 0, len(encoded))
+ if pos != 0 {
+ for _, r := range encoded[:pos-1] {
+ output = append(output, r)
+ }
+ }
+ i, n, bias := int32(0), initialN, initialBias
+ for pos < len(encoded) {
+ oldI, w := i, int32(1)
+ for k := base; ; k += base {
+ if pos == len(encoded) {
+ return "", punyError(encoded)
+ }
+ digit, ok := decodeDigit(encoded[pos])
+ if !ok {
+ return "", punyError(encoded)
+ }
+ pos++
+ i += digit * w
+ if i < 0 {
+ return "", punyError(encoded)
+ }
+ t := k - bias
+ if t < tmin {
+ t = tmin
+ } else if t > tmax {
+ t = tmax
+ }
+ if digit < t {
+ break
+ }
+ w *= base - t
+ if w >= math.MaxInt32/base {
+ return "", punyError(encoded)
+ }
+ }
+ x := int32(len(output) + 1)
+ bias = adapt(i-oldI, x, oldI == 0)
+ n += i / x
+ i %= x
+ if n > utf8.MaxRune || len(output) >= 1024 {
+ return "", punyError(encoded)
+ }
+ output = append(output, 0)
+ copy(output[i+1:], output[i:])
+ output[i] = n
+ i++
+ }
+ return string(output), nil
+}
+
+// encode encodes a string as specified in section 6.3 and prepends prefix to
+// the result.
+//
+// The "while h < length(input)" line in the specification becomes "for
+// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
+func encode(prefix, s string) (string, error) {
+ output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
+ copy(output, prefix)
+ delta, n, bias := int32(0), initialN, initialBias
+ b, remaining := int32(0), int32(0)
+ for _, r := range s {
+ if r < 0x80 {
+ b++
+ output = append(output, byte(r))
+ } else {
+ remaining++
+ }
+ }
+ h := b
+ if b > 0 {
+ output = append(output, '-')
+ }
+ for remaining != 0 {
+ m := int32(0x7fffffff)
+ for _, r := range s {
+ if m > r && r >= n {
+ m = r
+ }
+ }
+ delta += (m - n) * (h + 1)
+ if delta < 0 {
+ return "", punyError(s)
+ }
+ n = m
+ for _, r := range s {
+ if r < n {
+ delta++
+ if delta < 0 {
+ return "", punyError(s)
+ }
+ continue
+ }
+ if r > n {
+ continue
+ }
+ q := delta
+ for k := base; ; k += base {
+ t := k - bias
+ if t < tmin {
+ t = tmin
+ } else if t > tmax {
+ t = tmax
+ }
+ if q < t {
+ break
+ }
+ output = append(output, encodeDigit(t+(q-t)%(base-t)))
+ q = (q - t) / (base - t)
+ }
+ output = append(output, encodeDigit(q))
+ bias = adapt(delta, h+1, h == b)
+ delta = 0
+ h++
+ remaining--
+ }
+ delta++
+ n++
+ }
+ return string(output), nil
+}
+
+func decodeDigit(x byte) (digit int32, ok bool) {
+ switch {
+ case '0' <= x && x <= '9':
+ return int32(x - ('0' - 26)), true
+ case 'A' <= x && x <= 'Z':
+ return int32(x - 'A'), true
+ case 'a' <= x && x <= 'z':
+ return int32(x - 'a'), true
+ }
+ return 0, false
+}
+
+func encodeDigit(digit int32) byte {
+ switch {
+ case 0 <= digit && digit < 26:
+ return byte(digit + 'a')
+ case 26 <= digit && digit < 36:
+ return byte(digit + ('0' - 26))
+ }
+ panic("idna: internal error in punycode encoding")
+}
+
+// adapt is the bias adaptation function specified in section 6.1.
+func adapt(delta, numPoints int32, firstTime bool) int32 {
+ if firstTime {
+ delta /= damp
+ } else {
+ delta /= 2
+ }
+ delta += delta / numPoints
+ k := int32(0)
+ for delta > ((base-tmin)*tmax)/2 {
+ delta /= base - tmin
+ k += base
+ }
+ return k + (base-tmin+1)*delta/(delta+skew)
+}
diff --git a/src/vendor/golang.org/x/net/idna/tables10.0.0.go b/src/vendor/golang.org/x/net/idna/tables10.0.0.go
new file mode 100644
index 000000000..54fddb4b1
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/tables10.0.0.go
@@ -0,0 +1,4559 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.10,!go1.13
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "10.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040,
+ 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e,
+ 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26,
+ 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6,
+ 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46,
+ 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06,
+ 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6,
+ 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86,
+ 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d,
+ 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d,
+ 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d,
+ 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd,
+ 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd,
+ 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d,
+ 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d,
+ 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d,
+ 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd,
+ 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d,
+ 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd,
+ 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d,
+ 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd,
+ 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd,
+ 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d,
+ 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd,
+ 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d,
+ 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd,
+ 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd,
+ 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d,
+ 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d,
+ 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd,
+ 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d,
+ 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d,
+ 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd,
+ 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd,
+ 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d,
+ 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d,
+ 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd,
+ 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d,
+ 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15,
+ 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75,
+ 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded,
+ 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d,
+ 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5,
+ 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d,
+ 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d,
+ 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd,
+ 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed,
+ 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d,
+ 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040,
+ 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575,
+ 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635,
+ 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008,
+ 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715,
+ 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5,
+ 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935,
+ 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5,
+ 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5,
+ 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35,
+ 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5,
+ 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75,
+ 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35,
+ 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56,
+ 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95,
+ 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040,
+ 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081,
+ 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115,
+ 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5,
+ 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295,
+ 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355,
+ 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415,
+ 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515,
+ 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595,
+ 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5,
+ 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655,
+ 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115,
+ 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735,
+ 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5,
+ 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5,
+ 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5,
+ 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5,
+ 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5,
+ 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715,
+ 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35,
+ 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289,
+ 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349,
+ 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409,
+ 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9,
+ 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589,
+ 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649,
+ 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709,
+ 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79,
+ 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39,
+ 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9,
+ 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39,
+ 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9,
+ 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79,
+ 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39,
+ 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9,
+ 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059,
+ 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9,
+ 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239,
+ 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9,
+ 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399,
+ 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459,
+ 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309,
+ 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559,
+ 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9,
+ 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679,
+ 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9,
+ 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d,
+ 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9,
+ 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959,
+ 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d,
+ 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d,
+ 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9,
+ 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99,
+ 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9,
+ 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9,
+ 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99,
+ 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39,
+ 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639,
+ 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9,
+ 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d,
+ 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9,
+ 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d,
+ 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd,
+ 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979,
+ 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19,
+ 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d,
+ 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d,
+ 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99,
+ 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39,
+ 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9,
+ 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39,
+ 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd,
+ 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19,
+ 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9,
+ 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59,
+ 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd,
+ 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d,
+ 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d,
+ 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d,
+ 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879,
+ 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919,
+ 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd,
+ 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9,
+ 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99,
+ 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39,
+ 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9,
+ 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d,
+ 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19,
+ 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9,
+ 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59,
+ 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9,
+ 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0,
+ 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,
+ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,
+ 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8,
+ 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0,
+ 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe,
+ 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e,
+ 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c,
+ 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e,
+ 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137,
+ 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160,
+ 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e,
+ 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172,
+ 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179,
+ 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 264 entries, 528 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778}
+
+// idnaSparseValues: 1915 entries, 7660 bytes
+var idnaSparseValues = [1915]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x6, offset 0x34
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x63
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0xc, offset 0x6b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x77
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x85
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x93
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa3
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb1
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbd
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xc9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xda
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xeb
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x109
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x110
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11b
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x138
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x142
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x144
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x149
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x14f
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x151
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x168
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x170
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x176
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x181
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x186
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x189
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x193
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x198
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a4
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1ae
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b4
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1cf
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x34, offset 0x1d2
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1da
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1dd
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1ea
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fd
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x205
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x215
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x221
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x223
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22d
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x239
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x245
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x251
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x259
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x45, offset 0x268
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x46, offset 0x279
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x27d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x288
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x28c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x295
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x29d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2a8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2ab
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2b9
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2bd
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0018, lo: 0xbd, hi: 0xbf},
+ // Block 0x53, offset 0x2c3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0xab},
+ {value: 0x0018, lo: 0xac, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x54, offset 0x2ca
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2d0
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2d8
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2df
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x58, offset 0x2ea
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x59, offset 0x2f4
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5a, offset 0x2f8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0x5b, offset 0x2fb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5c, offset 0x301
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5d, offset 0x305
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x5e, offset 0x307
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x5f, offset 0x30a
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x30c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x61, offset 0x30f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x62, offset 0x319
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x63, offset 0x31c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x64, offset 0x32b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x65, offset 0x32f
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x66, offset 0x334
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0x67, offset 0x337
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x68, offset 0x33b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x69, offset 0x340
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6a, offset 0x345
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6b, offset 0x34b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x351
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6d, offset 0x360
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6e, offset 0x366
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6f, offset 0x36a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x70, offset 0x379
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x71, offset 0x37e
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x72, offset 0x386
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x73, offset 0x390
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x74, offset 0x39b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x75, offset 0x3a3
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3b4
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x77, offset 0x3bd
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x78, offset 0x3cd
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3da
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3e4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3e9
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7c, offset 0x3f6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3fa
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7e, offset 0x3ff
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x7f, offset 0x401
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x80, offset 0x405
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x407
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x82, offset 0x40b
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x83, offset 0x414
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x84, offset 0x41a
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x85, offset 0x41e
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x86, offset 0x42e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x87, offset 0x438
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x88, offset 0x43d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x89, offset 0x440
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8a, offset 0x446
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8b, offset 0x44d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8c, offset 0x452
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8d, offset 0x456
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8e, offset 0x45c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8f, offset 0x461
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x90, offset 0x46a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x91, offset 0x46f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x92, offset 0x475
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x93, offset 0x47c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x94, offset 0x483
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x95, offset 0x48a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x48e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x97, offset 0x493
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x98, offset 0x496
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x99, offset 0x49b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9a, offset 0x4a7
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9b, offset 0x4ad
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9c, offset 0x4b2
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9d, offset 0x4b9
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9e, offset 0x4c1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9f, offset 0x4c6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa0, offset 0x4ca
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa1, offset 0x4da
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa2, offset 0x4e1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa3, offset 0x4e5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa4, offset 0x4e9
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa5, offset 0x4f0
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa6, offset 0x4f2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa7, offset 0x4f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa8, offset 0x4f8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x4fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xaa, offset 0x500
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xab, offset 0x506
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xac, offset 0x50f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0340, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xad, offset 0x51b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xae, offset 0x522
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xaf, offset 0x52b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb0, offset 0x533
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb1, offset 0x53a
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb2, offset 0x548
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb3, offset 0x555
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb4, offset 0x562
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb5, offset 0x56b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb6, offset 0x56f
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xb7, offset 0x57d
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xb8, offset 0x585
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xb9, offset 0x590
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xba, offset 0x599
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbb, offset 0x59f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbc, offset 0x5a7
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xbd, offset 0x5b0
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xbe, offset 0x5ba
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xbf, offset 0x5bd
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc0, offset 0x5c9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc1, offset 0x5cc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc2, offset 0x5d1
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc3, offset 0x5de
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xc4, offset 0x5e7
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xc5, offset 0x5f3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc6, offset 0x5f6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc7, offset 0x600
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xc8, offset 0x609
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xc9, offset 0x615
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xca, offset 0x622
+ {value: 0x0000, lo: 0x07},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xcb, offset 0x62a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xcc, offset 0x62d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xcd, offset 0x632
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xce, offset 0x635
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xcf, offset 0x638
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd0, offset 0x63b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xd1, offset 0x642
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xd2, offset 0x649
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd3, offset 0x64d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xd4, offset 0x658
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xd5, offset 0x65b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd6, offset 0x661
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xd7, offset 0x666
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xd8, offset 0x66a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xd9, offset 0x66d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xda, offset 0x670
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xdb, offset 0x673
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xdc, offset 0x676
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xdd, offset 0x679
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xde, offset 0x67e
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xdf, offset 0x688
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe0, offset 0x68b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xe1, offset 0x68f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xe2, offset 0x69e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xe3, offset 0x6aa
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xe4, offset 0x6ae
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xe5, offset 0x6b3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe6, offset 0x6b8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xe7, offset 0x6bc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xe8, offset 0x6c1
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xe9, offset 0x6ca
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xea, offset 0x6d5
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xeb, offset 0x6db
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xec, offset 0x6e3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xed, offset 0x6e7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xee, offset 0x6eb
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xef, offset 0x6f1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xf0, offset 0x6f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xf1, offset 0x6fc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xf2, offset 0x6ff
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0xf3, offset 0x70f
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xf4, offset 0x716
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf5, offset 0x719
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0xbf},
+ // Block 0xf6, offset 0x71c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0xf7, offset 0x720
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0xf8, offset 0x726
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0xf9, offset 0x72b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xfa, offset 0x730
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0xfb, offset 0x735
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xbf},
+ // Block 0xfc, offset 0x738
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0xfd, offset 0x73d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xfe, offset 0x740
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xff, offset 0x743
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x100, offset 0x747
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x101, offset 0x74b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x102, offset 0x74e
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0x103, offset 0x75e
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x104, offset 0x76f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x105, offset 0x774
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x106, offset 0x776
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x107, offset 0x778
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42114 bytes (41KiB); checksum: 355A58A4
diff --git a/src/vendor/golang.org/x/net/idna/tables11.0.0.go b/src/vendor/golang.org/x/net/idna/tables11.0.0.go
new file mode 100644
index 000000000..c515d7ad2
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/tables11.0.0.go
@@ -0,0 +1,4653 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build go1.13
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "11.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 125:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 125
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 127 blocks, 8128 entries, 16256 bytes
+// The third block is the zero block.
+var idnaValues = [8128]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08,
+ 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08,
+ 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08,
+ 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808,
+ 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040,
+ 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08,
+ 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08,
+ 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040,
+ 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040,
+ 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040,
+ 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040,
+ // Block 0x16, offset 0x580
+ 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308,
+ 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008,
+ 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308,
+ 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308,
+ 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1,
+ 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308,
+ 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008,
+ 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008,
+ 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008,
+ 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008,
+ 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008,
+ 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008,
+ 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040,
+ 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040,
+ 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008,
+ // Block 0x18, offset 0x600
+ 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040,
+ 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008,
+ 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008,
+ 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1,
+ 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308,
+ 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018,
+ 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018,
+ 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040,
+ // Block 0x19, offset 0x640
+ 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008,
+ 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040,
+ 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040,
+ 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008,
+ 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008,
+ 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008,
+ 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008,
+ 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040,
+ 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308,
+ 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308,
+ 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040,
+ 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040,
+ 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040,
+ 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308,
+ 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008,
+ 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008,
+ 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008,
+ 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008,
+ 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008,
+ 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008,
+ 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008,
+ 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308,
+ 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008,
+ 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040,
+ 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040,
+ 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308,
+ 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040,
+ 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308,
+ 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008,
+ 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008,
+ 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008,
+ 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008,
+ 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008,
+ 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008,
+ 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040,
+ 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008,
+ 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040,
+ 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008,
+ 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9,
+ 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308,
+ 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018,
+ 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008,
+ 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040,
+ 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040,
+ 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040,
+ 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040,
+ 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008,
+ 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008,
+ 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040,
+ 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008,
+ // Block 0x20, offset 0x800
+ 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040,
+ 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308,
+ 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040,
+ 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040,
+ 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308,
+ 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040,
+ 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018,
+ 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008,
+ 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008,
+ 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040,
+ 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008,
+ 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008,
+ 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008,
+ 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008,
+ 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040,
+ 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040,
+ 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040,
+ 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040,
+ 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040,
+ 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040,
+ 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008,
+ 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008,
+ 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018,
+ 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308,
+ 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018,
+ 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008,
+ 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040,
+ 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040,
+ 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040,
+ 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008,
+ 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008,
+ 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308,
+ 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040,
+ // Block 0x25, offset 0x940
+ 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008,
+ 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008,
+ 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79,
+ 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008,
+ 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008,
+ 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9,
+ 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040,
+ 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59,
+ 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008,
+ // Block 0x26, offset 0x980
+ 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018,
+ 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308,
+ 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308,
+ 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11,
+ 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308,
+ 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308,
+ 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308,
+ 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308,
+ 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308,
+ 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008,
+ 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008,
+ 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008,
+ 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008,
+ 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008,
+ 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008,
+ 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008,
+ 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008,
+ 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41,
+ 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008,
+ 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1,
+ 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011,
+ 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041,
+ 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9,
+ 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099,
+ 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269,
+ 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1,
+ 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008,
+ 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008,
+ 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008,
+ 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008,
+ 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008,
+ 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008,
+ 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008,
+ 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169,
+ 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9,
+ 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251,
+ 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9,
+ 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359,
+ 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1,
+ 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008,
+ 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008,
+ 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008,
+ 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008,
+ 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008,
+ 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008,
+ 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008,
+ 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008,
+ 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008,
+ 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008,
+ 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008,
+ 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008,
+ 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045,
+ 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008,
+ 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008,
+ 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045,
+ 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008,
+ 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045,
+ 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045,
+ 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489,
+ 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1,
+ 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1,
+ 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591,
+ 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1,
+ 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1,
+ 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771,
+ 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891,
+ 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831,
+ 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951,
+ 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459,
+ 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040,
+ 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489,
+ 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008,
+ 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008,
+ 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2,
+ 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61,
+ 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045,
+ 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa,
+ 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040,
+ 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9,
+ 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a,
+ 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0,
+ 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018,
+ 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a,
+ 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018,
+ 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018,
+ 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018,
+ 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018,
+ 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018,
+ 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9,
+ 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018,
+ 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340,
+ 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040,
+ 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340,
+ 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61,
+ 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd,
+ 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61,
+ 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5,
+ 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09,
+ 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359,
+ 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040,
+ 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018,
+ 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018,
+ 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018,
+ 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018,
+ 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018,
+ 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e,
+ 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249,
+ 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41,
+ 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018,
+ 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269,
+ 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018,
+ 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018,
+ 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09,
+ 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9,
+ 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd,
+ 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9,
+ 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018,
+ 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151,
+ 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279,
+ 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399,
+ 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439,
+ 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369,
+ 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61,
+ 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451,
+ 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5,
+ 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018,
+ 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040,
+ 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51,
+ 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601,
+ 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691,
+ 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26,
+ 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6,
+ 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a,
+ 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040,
+ 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040,
+ 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040,
+ 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46,
+ 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06,
+ 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6,
+ 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86,
+ 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46,
+ 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199,
+ 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99,
+ 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089,
+ 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9,
+ 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249,
+ 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71,
+ 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9,
+ 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1,
+ 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018,
+ 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018,
+ 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018,
+ 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008,
+ 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008,
+ 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008,
+ 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008,
+ 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008,
+ 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd,
+ 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d,
+ 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9,
+ 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d,
+ 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008,
+ 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008,
+ 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008,
+ 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008,
+ 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008,
+ 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008,
+ 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008,
+ 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018,
+ 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308,
+ 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018,
+ 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d,
+ 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d,
+ 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d,
+ 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040,
+ 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040,
+ 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040,
+ 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040,
+ 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040,
+ 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040,
+ 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040,
+ 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008,
+ 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018,
+ 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018,
+ 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018,
+ 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018,
+ 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018,
+ 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018,
+ 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018,
+ 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018,
+ 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018,
+ 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd,
+ 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd,
+ 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d,
+ 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d,
+ 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d,
+ 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd,
+ 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d,
+ 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd,
+ 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d,
+ 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd,
+ 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd,
+ 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d,
+ 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018,
+ 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd,
+ 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d,
+ 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008,
+ 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008,
+ 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008,
+ 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008,
+ 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040,
+ 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd,
+ 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018,
+ 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761,
+ 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1,
+ 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881,
+ 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd,
+ 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d,
+ 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d,
+ 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd,
+ 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d,
+ 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d,
+ 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d,
+ 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd,
+ 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd,
+ 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d,
+ 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d,
+ 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd,
+ 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d,
+ 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999,
+ 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29,
+ 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69,
+ 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69,
+ 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15,
+ 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75,
+ 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded,
+ 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d,
+ 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5,
+ 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d,
+ 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d,
+ 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd,
+ 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9,
+ 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1,
+ 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9,
+ 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549,
+ 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1,
+ 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11,
+ 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91,
+ 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9,
+ 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011,
+ 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209,
+ 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541,
+ 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781,
+ 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979,
+ 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89,
+ 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1,
+ 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99,
+ 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9,
+ 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9,
+ 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069,
+ 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9,
+ 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271,
+ 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9,
+ 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed,
+ 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371,
+ 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9,
+ 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d,
+ 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211,
+ 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1,
+ 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599,
+ 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9,
+ 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671,
+ 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709,
+ 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781,
+ 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1,
+ 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811,
+ 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901,
+ 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1,
+ 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11,
+ 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31,
+ 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51,
+ 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008,
+ 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008,
+ 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008,
+ 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308,
+ 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308,
+ 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308,
+ 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008,
+ 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008,
+ 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008,
+ 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008,
+ 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11,
+ 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008,
+ 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008,
+ 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008,
+ 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008,
+ 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018,
+ 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018,
+ 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018,
+ 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008,
+ 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008,
+ 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008,
+ 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008,
+ 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008,
+ 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008,
+ 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008,
+ 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d,
+ 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008,
+ 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d,
+ 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008,
+ 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008,
+ 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008,
+ 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008,
+ 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008,
+ 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008,
+ 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008,
+ 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040,
+ 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575,
+ 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635,
+ 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008,
+ 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715,
+ 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5,
+ 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008,
+ 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008,
+ 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935,
+ 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5,
+ 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5,
+ 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35,
+ 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5,
+ 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19,
+ 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91,
+ 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040,
+ 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040,
+ 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040,
+ 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040,
+ 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040,
+ 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040,
+ 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001,
+ 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040,
+ 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040,
+ 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9,
+ 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1,
+ 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149,
+ 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2,
+ 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1,
+ 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1,
+ 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479,
+ 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040,
+ 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659,
+ 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721,
+ 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751,
+ 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769,
+ 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799,
+ 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1,
+ 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1,
+ 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9,
+ 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829,
+ 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871,
+ 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9,
+ 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9,
+ 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919,
+ 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931,
+ 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961,
+ 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991,
+ 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1,
+ 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818,
+ 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818,
+ 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040,
+ 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040,
+ 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040,
+ 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09,
+ 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479,
+ 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81,
+ 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1,
+ 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19,
+ 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91,
+ 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1,
+ 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1,
+ 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1,
+ 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1,
+ 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991,
+ 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81,
+ 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a,
+ 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99,
+ 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89,
+ 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79,
+ 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19,
+ 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649,
+ 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9,
+ 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49,
+ 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21,
+ 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9,
+ 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01,
+ 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91,
+ 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9,
+ 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171,
+ 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289,
+ 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1,
+ 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621,
+ 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739,
+ 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1,
+ 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9,
+ 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29,
+ 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079,
+ 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1,
+ 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171,
+ 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261,
+ 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1,
+ 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1,
+ 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171,
+ 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261,
+ 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351,
+ 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441,
+ 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509,
+ 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1,
+ 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081,
+ 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239,
+ 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040,
+ 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609,
+ 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721,
+ 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839,
+ 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919,
+ 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9,
+ 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9,
+ 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9,
+ 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1,
+ 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989,
+ 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040,
+ 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040,
+ 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040,
+ 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040,
+ 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040,
+ 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040,
+ 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9,
+ 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12,
+ 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0,
+ 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0,
+ 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55,
+ 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75,
+ 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040,
+ 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308,
+ 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308,
+ 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308,
+ 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2,
+ 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35,
+ 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018,
+ 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56,
+ 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95,
+ 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa,
+ 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95,
+ 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99,
+ 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda,
+ 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040,
+ 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040,
+ 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081,
+ 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141,
+ 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171,
+ 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1,
+ 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1,
+ 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201,
+ 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219,
+ 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249,
+ 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291,
+ 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1,
+ 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9,
+ 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321,
+ 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339,
+ 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369,
+ 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381,
+ 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1,
+ 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9,
+ 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9,
+ 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1,
+ 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441,
+ 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9,
+ 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea,
+ 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2,
+ 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9,
+ 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81,
+ 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2,
+ 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159,
+ 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41,
+ 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9,
+ 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9,
+ 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a,
+ 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09,
+ 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51,
+ 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039,
+ 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279,
+ 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a,
+ 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115,
+ 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5,
+ 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295,
+ 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355,
+ 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415,
+ 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515,
+ 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595,
+ 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5,
+ 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655,
+ 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115,
+ 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735,
+ 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5,
+ 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5,
+ 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5,
+ 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5,
+ 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5,
+ 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715,
+ 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040,
+ 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935,
+ 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040,
+ 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6,
+ 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35,
+ 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040,
+ 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040,
+ 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08,
+ 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808,
+ 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08,
+ 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908,
+ 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08,
+ 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808,
+ 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040,
+ 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18,
+ 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818,
+ 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08,
+ 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08,
+ 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08,
+ 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040,
+ 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040,
+ 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040,
+ 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18,
+ 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818,
+ 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040,
+ 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008,
+ 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008,
+ 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008,
+ 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008,
+ 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040,
+ 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008,
+ 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008,
+ 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308,
+ 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040,
+ 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008,
+ 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040,
+ 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008,
+ 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008,
+ 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008,
+ 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308,
+ 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040,
+ 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040,
+ 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040,
+ 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199,
+ 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359,
+ 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269,
+ 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369,
+ 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9,
+ 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259,
+ 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99,
+ 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089,
+ 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9,
+ 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249,
+ 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269,
+ 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369,
+ 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9,
+ 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259,
+ 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99,
+ 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089,
+ 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9,
+ 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249,
+ 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71,
+ 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9,
+ 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9,
+ 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259,
+ 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99,
+ 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089,
+ 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040,
+ 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040,
+ 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71,
+ 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9,
+ 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1,
+ 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199,
+ 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99,
+ 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089,
+ 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9,
+ 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249,
+ 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71,
+ 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9,
+ 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1,
+ 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199,
+ 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359,
+ 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269,
+ 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9,
+ 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040,
+ 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71,
+ 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9,
+ 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040,
+ 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199,
+ 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359,
+ 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269,
+ 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369,
+ 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9,
+ 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040,
+ 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9,
+ 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040,
+ 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199,
+ 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359,
+ 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269,
+ 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369,
+ 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9,
+ 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259,
+ 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99,
+ 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1,
+ 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199,
+ 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359,
+ 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269,
+ 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369,
+ 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9,
+ 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259,
+ 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99,
+ 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089,
+ 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9,
+ 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359,
+ 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269,
+ 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369,
+ 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9,
+ 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259,
+ 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99,
+ 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089,
+ 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9,
+ 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249,
+ 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71,
+ 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369,
+ 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9,
+ 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259,
+ 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99,
+ 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089,
+ 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9,
+ 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249,
+ 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71,
+ 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9,
+ 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1,
+ 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259,
+ 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99,
+ 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089,
+ 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9,
+ 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249,
+ 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71,
+ 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9,
+ 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1,
+ 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199,
+ 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359,
+ 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089,
+ 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9,
+ 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249,
+ 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71,
+ 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9,
+ 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1,
+ 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099,
+ 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429,
+ 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71,
+ 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9,
+ 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9,
+ 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11,
+ 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109,
+ 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1,
+ 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429,
+ 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099,
+ 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429,
+ 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71,
+ 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9,
+ 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01,
+ 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11,
+ 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109,
+ 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1,
+ 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429,
+ 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099,
+ 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429,
+ 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71,
+ 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9,
+ 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01,
+ 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1,
+ 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109,
+ 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1,
+ 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429,
+ 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099,
+ 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429,
+ 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71,
+ 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9,
+ 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01,
+ 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1,
+ 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41,
+ 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1,
+ 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429,
+ 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099,
+ 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429,
+ 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71,
+ 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9,
+ 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01,
+ 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1,
+ 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41,
+ 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1,
+ 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429,
+ 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41,
+ 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079,
+ 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1,
+ 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61,
+ 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9,
+ 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81,
+ 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079,
+ 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1,
+ 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61,
+ 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115,
+ 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135,
+ 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115,
+ 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175,
+ 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115,
+ 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08,
+ 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08,
+ 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08,
+ 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08,
+ 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08,
+ 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411,
+ 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231,
+ 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949,
+ 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351,
+ 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040,
+ 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231,
+ 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949,
+ 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040,
+ 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411,
+ 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1,
+ 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9,
+ 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231,
+ 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040,
+ 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249,
+ 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429,
+ 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339,
+ 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1,
+ 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351,
+ 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02,
+ 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018,
+ 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2,
+ 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72,
+ 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32,
+ 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2,
+ 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2,
+ 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018,
+ 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199,
+ 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359,
+ 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089,
+ 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1,
+ 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018,
+ 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018,
+ 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018,
+ 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018,
+ 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018,
+ 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040,
+ 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018,
+ 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018,
+ 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040,
+ 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040,
+ 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289,
+ 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349,
+ 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409,
+ 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9,
+ 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589,
+ 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649,
+ 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709,
+ 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9,
+ 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79,
+ 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39,
+ 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9,
+ 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39,
+ 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9,
+ 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79,
+ 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39,
+ 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9,
+ 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059,
+ 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9,
+ 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239,
+ 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9,
+ 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399,
+ 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459,
+ 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309,
+ 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559,
+ 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9,
+ 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679,
+ 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9,
+ 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d,
+ 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9,
+ 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959,
+ 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d,
+ 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d,
+ 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9,
+ 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99,
+ 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9,
+ 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9,
+ 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99,
+ 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39,
+ 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639,
+ 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9,
+ 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d,
+ 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9,
+ 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d,
+ 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd,
+ 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979,
+ 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19,
+ 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d,
+ 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d,
+ 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99,
+ 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39,
+ 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9,
+ 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39,
+ 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd,
+ 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19,
+ 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9,
+ 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59,
+ 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd,
+ 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d,
+ 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d,
+ 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d,
+ 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879,
+ 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919,
+ 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd,
+ 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9,
+ 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99,
+ 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39,
+ 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9,
+ 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d,
+ 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79,
+ // Block 0x7e, offset 0x1f80
+ 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19,
+ 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9,
+ 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59,
+ 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9,
+ 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d,
+ 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040,
+ 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040,
+ 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040,
+ 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040,
+ 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040,
+ 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040,
+}
+
+// idnaIndex: 36 blocks, 2304 entries, 4608 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2304]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21,
+ // Block 0x4, offset 0x100
+ 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16,
+ 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3,
+ 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b,
+ 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0,
+ 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5,
+ 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1,
+ 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8,
+ 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0,
+ 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe,
+ 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c,
+ 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52,
+ 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e,
+ 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c,
+ 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba,
+ 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131,
+ 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a,
+ 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f,
+ 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168,
+ 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66,
+ 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179,
+ 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d,
+ 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184,
+ 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f,
+ 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f,
+ 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f,
+ 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f,
+ 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f,
+ 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba,
+ 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba,
+ 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba,
+ 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba,
+ 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b,
+ 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba,
+ 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba,
+ 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba,
+ // Block 0x1f, offset 0x7c0
+ 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07,
+ 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17,
+ 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07,
+ 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b,
+ 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b,
+ 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b,
+ 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b,
+ 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b,
+ 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b,
+ 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b,
+ 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b,
+ // Block 0x21, offset 0x840
+ 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190,
+ 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba,
+ 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba,
+ 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba,
+ 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba,
+ 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba,
+ 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba,
+ 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+ 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b,
+ 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b,
+ 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b,
+ 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b,
+ 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b,
+ 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b,
+ 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b,
+}
+
+// idnaSparseOffset: 276 entries, 552 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca}
+
+// idnaSparseValues: 1997 entries, 7988 bytes
+var idnaSparseValues = [1997]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x6, offset 0x33
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xae},
+ {value: 0x0808, lo: 0xaf, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x62
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbf},
+ // Block 0xc, offset 0x6c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x78
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xe, offset 0x86
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0xf, offset 0x8b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x10, offset 0x94
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x11, offset 0xa4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x12, offset 0xb2
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x3b08, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbe
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xca
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x15, offset 0xdb
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x16, offset 0xe5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x17, offset 0xec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x18, offset 0xf9
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x19, offset 0x10a
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1a, offset 0x111
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1b, offset 0x11c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1c, offset 0x12b
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1e, offset 0x143
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x1f, offset 0x145
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x20, offset 0x14a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x21, offset 0x14d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x22, offset 0x150
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x23, offset 0x152
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x24, offset 0x15e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x25, offset 0x169
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x26, offset 0x171
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x177
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x28, offset 0x17d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x29, offset 0x182
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2a, offset 0x187
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2b, offset 0x18a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2c, offset 0x18e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2d, offset 0x194
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2e, offset 0x199
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x2f, offset 0x1a5
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x30, offset 0x1af
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x31, offset 0x1b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x32, offset 0x1c6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x33, offset 0x1d0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x34, offset 0x1d3
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x35, offset 0x1db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x36, offset 0x1de
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x37, offset 0x1eb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x38, offset 0x1f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x39, offset 0x1f7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3a, offset 0x1fe
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3b, offset 0x206
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3c, offset 0x216
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x222
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3e, offset 0x224
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x3f, offset 0x22e
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23a
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x41, offset 0x246
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x42, offset 0x252
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x43, offset 0x25a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x44, offset 0x25f
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x45, offset 0x269
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x46, offset 0x27a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x47, offset 0x27e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x48, offset 0x289
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x49, offset 0x28d
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4a, offset 0x296
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4b, offset 0x29e
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4c, offset 0x2a4
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4d, offset 0x2a9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x4e, offset 0x2ac
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x4f, offset 0x2b0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x50, offset 0x2b6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x51, offset 0x2ba
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x52, offset 0x2be
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xbf},
+ // Block 0x53, offset 0x2c2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x54, offset 0x2c7
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x55, offset 0x2cd
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2d5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x57, offset 0x2dc
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x58, offset 0x2e7
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x59, offset 0x2f1
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5a, offset 0x2f5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0xbf},
+ // Block 0x5b, offset 0x2f8
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5c, offset 0x2fe
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5d, offset 0x302
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x5e, offset 0x304
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x5f, offset 0x307
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x309
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x61, offset 0x30c
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x62, offset 0x316
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x63, offset 0x319
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x64, offset 0x328
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x65, offset 0x32c
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x66, offset 0x331
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x67, offset 0x334
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x68, offset 0x338
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x69, offset 0x33d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6a, offset 0x342
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6b, offset 0x348
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6c, offset 0x34e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6d, offset 0x35d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6e, offset 0x363
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x6f, offset 0x367
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x70, offset 0x376
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x71, offset 0x37b
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x72, offset 0x383
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x73, offset 0x38d
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x74, offset 0x398
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x75, offset 0x3a0
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x76, offset 0x3b1
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x77, offset 0x3ba
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x78, offset 0x3ca
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x79, offset 0x3d7
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7a, offset 0x3e1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3e6
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7c, offset 0x3f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x7e, offset 0x3fc
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x7f, offset 0x3fe
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x80, offset 0x402
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x404
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x82, offset 0x408
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x83, offset 0x411
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x84, offset 0x417
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x85, offset 0x41b
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x86, offset 0x42b
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x87, offset 0x435
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x88, offset 0x43a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x89, offset 0x43d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8a, offset 0x443
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8b, offset 0x44a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8c, offset 0x44f
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8d, offset 0x453
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x8e, offset 0x459
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xbf},
+ // Block 0x8f, offset 0x45e
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x90, offset 0x467
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x91, offset 0x46c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x92, offset 0x472
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x93, offset 0x479
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x94, offset 0x480
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x95, offset 0x487
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x96, offset 0x48b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x97, offset 0x490
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x98, offset 0x493
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x99, offset 0x498
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9a, offset 0x4a4
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9b, offset 0x4aa
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9c, offset 0x4af
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9d, offset 0x4b6
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0x9e, offset 0x4be
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0x9f, offset 0x4c3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa0, offset 0x4c7
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa1, offset 0x4d7
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa2, offset 0x4de
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa3, offset 0x4e2
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa4, offset 0x4e6
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa5, offset 0x4ed
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa6, offset 0x4ef
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa7, offset 0x4f2
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xa8, offset 0x4f5
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xa9, offset 0x4f9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0908, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0xa1},
+ {value: 0x0c08, lo: 0xa2, hi: 0xa2},
+ {value: 0x0a08, lo: 0xa3, hi: 0xa3},
+ {value: 0x3308, lo: 0xa4, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xaa, offset 0x502
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xab, offset 0x506
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0xa6},
+ {value: 0x0808, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0a08, lo: 0xb0, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb3},
+ {value: 0x0a08, lo: 0xb4, hi: 0xbf},
+ // Block 0xac, offset 0x50e
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x84},
+ {value: 0x0808, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x90},
+ {value: 0x0a18, lo: 0x91, hi: 0x93},
+ {value: 0x0c18, lo: 0x94, hi: 0x94},
+ {value: 0x0818, lo: 0x95, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xad, offset 0x516
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xae, offset 0x51c
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xaf, offset 0x525
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xb0, offset 0x531
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb1, offset 0x538
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb2, offset 0x541
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb3, offset 0x54b
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb4, offset 0x552
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb5, offset 0x560
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb6, offset 0x56d
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb7, offset 0x57a
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb8, offset 0x583
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb9, offset 0x587
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xba, offset 0x596
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xbb, offset 0x59e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbc, offset 0x5a9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbd, offset 0x5b2
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbe, offset 0x5b8
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbf, offset 0x5c0
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xc0, offset 0x5c9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xc1, offset 0x5d3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc2, offset 0x5d6
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc3, offset 0x5e2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xc4, offset 0x5eb
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc5, offset 0x5ee
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5f3
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0xc7, offset 0x5fe
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x3b08, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0xbf},
+ // Block 0xc8, offset 0x607
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x98},
+ {value: 0x3b08, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xbf},
+ // Block 0xc9, offset 0x613
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xca, offset 0x616
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xcb, offset 0x620
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xcc, offset 0x629
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xcd, offset 0x635
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xce, offset 0x642
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xbf},
+ // Block 0xcf, offset 0x64f
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x3008, lo: 0x93, hi: 0x94},
+ {value: 0x3308, lo: 0x95, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x96},
+ {value: 0x3b08, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xbf},
+ // Block 0xd0, offset 0x65d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xd1, offset 0x664
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xd2, offset 0x667
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xd3, offset 0x66c
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xd4, offset 0x66f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xd5, offset 0x672
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xd6, offset 0x675
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xd7, offset 0x67c
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xd8, offset 0x683
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd9, offset 0x687
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xda, offset 0x692
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xdb, offset 0x695
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xdc, offset 0x698
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0xdd, offset 0x69b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xde, offset 0x6a1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xdf, offset 0x6a6
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xe0, offset 0x6aa
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe1, offset 0x6ad
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xe2, offset 0x6b0
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe3, offset 0x6b3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x6b6
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0xe5, offset 0x6b9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xe6, offset 0x6be
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xe7, offset 0x6c8
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xe8, offset 0x6cb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xe9, offset 0x6cf
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xea, offset 0x6de
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xeb, offset 0x6ea
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xec, offset 0x6ee
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xed, offset 0x6f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xee, offset 0x6f7
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xef, offset 0x6fc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xf0, offset 0x700
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xf1, offset 0x705
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xf2, offset 0x70e
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xf3, offset 0x719
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xf4, offset 0x71f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xf5, offset 0x727
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xb0},
+ {value: 0x0818, lo: 0xb1, hi: 0xbf},
+ // Block 0xf6, offset 0x72a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0818, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xf7, offset 0x72d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xf8, offset 0x731
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xf9, offset 0x735
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xfa, offset 0x73b
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xfb, offset 0x741
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xfc, offset 0x746
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xfd, offset 0x749
+ {value: 0x0000, lo: 0x0f},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xbf},
+ // Block 0xfe, offset 0x759
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xff, offset 0x760
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x100, offset 0x763
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0xbf},
+ // Block 0x101, offset 0x766
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x102, offset 0x76a
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x103, offset 0x770
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x104, offset 0x775
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x105, offset 0x77a
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x106, offset 0x782
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa2},
+ {value: 0x0040, lo: 0xa3, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x107, offset 0x787
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x108, offset 0x78b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0x109, offset 0x78f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0x10a, offset 0x792
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x10b, offset 0x795
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x10c, offset 0x799
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x10d, offset 0x79d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x10e, offset 0x7a0
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0x10f, offset 0x7b0
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0x110, offset 0x7c1
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0x111, offset 0x7c6
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x112, offset 0x7c8
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x113, offset 0x7ca
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 42466 bytes (41KiB); checksum: 355A58A4
diff --git a/src/vendor/golang.org/x/net/idna/tables9.0.0.go b/src/vendor/golang.org/x/net/idna/tables9.0.0.go
new file mode 100644
index 000000000..8b65fa167
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/tables9.0.0.go
@@ -0,0 +1,4486 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// +build !go1.10
+
+package idna
+
+// UnicodeVersion is the Unicode version from which the tables in this package are derived.
+const UnicodeVersion = "9.0.0"
+
+var mappings string = "" + // Size: 8175 bytes
+ "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" +
+ "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" +
+ "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" +
+ "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" +
+ "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" +
+ "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" +
+ "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" +
+ "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" +
+ "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" +
+ "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" +
+ "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" +
+ "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" +
+ "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" +
+ "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" +
+ "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" +
+ "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" +
+ "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" +
+ "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" +
+ "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" +
+ "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" +
+ "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" +
+ "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" +
+ "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" +
+ "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" +
+ "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" +
+ "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" +
+ ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" +
+ "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" +
+ "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" +
+ "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" +
+ "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" +
+ "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" +
+ "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" +
+ "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" +
+ "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" +
+ "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" +
+ "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" +
+ "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" +
+ "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" +
+ "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" +
+ "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" +
+ "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" +
+ "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" +
+ "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" +
+ "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" +
+ "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" +
+ "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" +
+ "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" +
+ "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" +
+ "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" +
+ "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" +
+ "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" +
+ "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" +
+ "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" +
+ "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" +
+ "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" +
+ "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" +
+ "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" +
+ "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" +
+ "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" +
+ "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" +
+ "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" +
+ "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" +
+ "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" +
+ "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" +
+ "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" +
+ "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" +
+ "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" +
+ "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" +
+ "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" +
+ "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" +
+ "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" +
+ "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" +
+ " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" +
+ "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" +
+ "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" +
+ "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" +
+ "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" +
+ "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" +
+ "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" +
+ "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" +
+ "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" +
+ "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" +
+ "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" +
+ "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" +
+ "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" +
+ "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" +
+ "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" +
+ "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" +
+ "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" +
+ "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" +
+ "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" +
+ "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" +
+ "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" +
+ "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" +
+ "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" +
+ "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" +
+ "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" +
+ "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" +
+ "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" +
+ "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" +
+ "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" +
+ "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" +
+ "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" +
+ "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" +
+ "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" +
+ "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" +
+ "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" +
+ "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" +
+ "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" +
+ "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" +
+ "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" +
+ "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" +
+ "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" +
+ "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" +
+ "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" +
+ "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" +
+ "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" +
+ "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" +
+ "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" +
+ "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" +
+ "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" +
+ "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" +
+ "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" +
+ "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" +
+ "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" +
+ "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" +
+ "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" +
+ "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" +
+ "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" +
+ "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" +
+ "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" +
+ "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" +
+ "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" +
+ "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" +
+ "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" +
+ "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻"
+
+var xorData string = "" + // Size: 4855 bytes
+ "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" +
+ "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" +
+ "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" +
+ "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" +
+ "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" +
+ "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" +
+ "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" +
+ "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" +
+ "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" +
+ "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" +
+ "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" +
+ "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" +
+ "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" +
+ "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" +
+ "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" +
+ "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" +
+ "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" +
+ "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" +
+ "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" +
+ "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" +
+ "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" +
+ "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" +
+ "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" +
+ "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" +
+ "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" +
+ "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" +
+ "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" +
+ "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" +
+ "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" +
+ "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" +
+ "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" +
+ "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" +
+ "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" +
+ "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" +
+ "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" +
+ "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" +
+ "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " +
+ "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" +
+ "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" +
+ "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" +
+ "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" +
+ "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" +
+ ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" +
+ "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" +
+ "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" +
+ "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" +
+ "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" +
+ "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" +
+ "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" +
+ "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" +
+ "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" +
+ "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" +
+ "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" +
+ "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" +
+ "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" +
+ "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" +
+ "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" +
+ "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" +
+ "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" +
+ "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" +
+ "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" +
+ "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" +
+ "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" +
+ "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" +
+ "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" +
+ "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" +
+ "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" +
+ "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" +
+ "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" +
+ "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" +
+ "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" +
+ "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" +
+ "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" +
+ "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" +
+ "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" +
+ "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" +
+ "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" +
+ "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" +
+ "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" +
+ "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" +
+ "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" +
+ "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" +
+ "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" +
+ "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" +
+ "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" +
+ "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" +
+ "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" +
+ "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" +
+ "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," +
+ "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" +
+ "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" +
+ "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" +
+ "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" +
+ ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" +
+ "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" +
+ "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" +
+ "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" +
+ "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" +
+ "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" +
+ "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" +
+ "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" +
+ "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" +
+ "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" +
+ "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" +
+ "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" +
+ "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" +
+ "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" +
+ "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" +
+ "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" +
+ "\x08\x1a\x0a\x03\x07\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" +
+ "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" +
+ "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" +
+ "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" +
+ "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" +
+ "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" +
+ "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" +
+ "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" +
+ "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" +
+ "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" +
+ "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" +
+ "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" +
+ "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" +
+ "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" +
+ "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" +
+ "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" +
+ "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" +
+ "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" +
+ "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" +
+ "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" +
+ "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." +
+ "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" +
+ "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" +
+ "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " +
+ "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" +
+ "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" +
+ "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" +
+ "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" +
+ "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" +
+ "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" +
+ "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," +
+ "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" +
+ "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" +
+ "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" +
+ "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" +
+ "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" +
+ "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" +
+ "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" +
+ "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" +
+ "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" +
+ "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" +
+ "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" +
+ "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" +
+ "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" +
+ "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" +
+ "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" +
+ "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" +
+ "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" +
+ "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" +
+ "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" +
+ "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" +
+ "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" +
+ "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" +
+ "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" +
+ "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" +
+ "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" +
+ "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" +
+ "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" +
+ "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" +
+ "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" +
+ "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" +
+ "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" +
+ "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" +
+ "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" +
+ "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" +
+ "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" +
+ "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" +
+ "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" +
+ "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" +
+ "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," +
+ "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" +
+ "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" +
+ "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" +
+ "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" +
+ "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" +
+ "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" +
+ "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" +
+ "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" +
+ "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" +
+ "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" +
+ "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" +
+ "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" +
+ "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" +
+ "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" +
+ "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" +
+ "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" +
+ "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" +
+ "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" +
+ "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" +
+ "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" +
+ "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" +
+ "\x04\x03\x0c?\x05\x03\x0c\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" +
+ "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" +
+ "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" +
+ "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" +
+ "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" +
+ "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" +
+ "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" +
+ "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" +
+ "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" +
+ "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" +
+ "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" +
+ "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" +
+ "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" +
+ "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" +
+ "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" +
+ "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" +
+ "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" +
+ "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" +
+ "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" +
+ "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" +
+ "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" +
+ "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" +
+ "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" +
+ "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" +
+ "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" +
+ "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" +
+ "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" +
+ "\x05\x22\x05\x03\x050\x1d"
+
+// lookup returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupUnsafe(s []byte) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// lookupString returns the trie value for the first UTF-8 encoding in s and
+// the width in bytes of this encoding. The size will be 0 if s does not
+// hold enough bytes to complete the encoding. len(s) must be greater than 0.
+func (t *idnaTrie) lookupString(s string) (v uint16, sz int) {
+ c0 := s[0]
+ switch {
+ case c0 < 0x80: // is ASCII
+ return idnaValues[c0], 1
+ case c0 < 0xC2:
+ return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
+ case c0 < 0xE0: // 2-byte UTF-8
+ if len(s) < 2 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c1), 2
+ case c0 < 0xF0: // 3-byte UTF-8
+ if len(s) < 3 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c2), 3
+ case c0 < 0xF8: // 4-byte UTF-8
+ if len(s) < 4 {
+ return 0, 0
+ }
+ i := idnaIndex[c0]
+ c1 := s[1]
+ if c1 < 0x80 || 0xC0 <= c1 {
+ return 0, 1 // Illegal UTF-8: not a continuation byte.
+ }
+ o := uint32(i)<<6 + uint32(c1)
+ i = idnaIndex[o]
+ c2 := s[2]
+ if c2 < 0x80 || 0xC0 <= c2 {
+ return 0, 2 // Illegal UTF-8: not a continuation byte.
+ }
+ o = uint32(i)<<6 + uint32(c2)
+ i = idnaIndex[o]
+ c3 := s[3]
+ if c3 < 0x80 || 0xC0 <= c3 {
+ return 0, 3 // Illegal UTF-8: not a continuation byte.
+ }
+ return t.lookupValue(uint32(i), c3), 4
+ }
+ // Illegal rune
+ return 0, 1
+}
+
+// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s.
+// s must start with a full and valid UTF-8 encoded rune.
+func (t *idnaTrie) lookupStringUnsafe(s string) uint16 {
+ c0 := s[0]
+ if c0 < 0x80 { // is ASCII
+ return idnaValues[c0]
+ }
+ i := idnaIndex[c0]
+ if c0 < 0xE0 { // 2-byte UTF-8
+ return t.lookupValue(uint32(i), s[1])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[1])]
+ if c0 < 0xF0 { // 3-byte UTF-8
+ return t.lookupValue(uint32(i), s[2])
+ }
+ i = idnaIndex[uint32(i)<<6+uint32(s[2])]
+ if c0 < 0xF8 { // 4-byte UTF-8
+ return t.lookupValue(uint32(i), s[3])
+ }
+ return 0
+}
+
+// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff.
+type idnaTrie struct{}
+
+func newIdnaTrie(i int) *idnaTrie {
+ return &idnaTrie{}
+}
+
+// lookupValue determines the type of block n and looks up the value for b.
+func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 {
+ switch {
+ case n < 124:
+ return uint16(idnaValues[n<<6+uint32(b)])
+ default:
+ n -= 124
+ return uint16(idnaSparse.lookup(n, b))
+ }
+}
+
+// idnaValues: 126 blocks, 8064 entries, 16128 bytes
+// The third block is the zero block.
+var idnaValues = [8064]uint16{
+ // Block 0x0, offset 0x0
+ 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080,
+ 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080,
+ 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080,
+ 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080,
+ 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080,
+ 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080,
+ 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080,
+ 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080,
+ 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008,
+ 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080,
+ 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080,
+ // Block 0x1, offset 0x40
+ 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105,
+ 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105,
+ 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105,
+ 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105,
+ 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080,
+ 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008,
+ 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008,
+ 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008,
+ 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008,
+ 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080,
+ 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080,
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040,
+ 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040,
+ 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040,
+ 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040,
+ 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040,
+ 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018,
+ 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018,
+ 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a,
+ 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005,
+ 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018,
+ 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018,
+ // Block 0x4, offset 0x100
+ 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008,
+ 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008,
+ 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008,
+ 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008,
+ 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008,
+ 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008,
+ 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008,
+ 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008,
+ 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008,
+ 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d,
+ 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199,
+ // Block 0x5, offset 0x140
+ 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d,
+ 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008,
+ 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008,
+ 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008,
+ 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008,
+ 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008,
+ 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008,
+ 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008,
+ 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008,
+ 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d,
+ 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9,
+ // Block 0x6, offset 0x180
+ 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008,
+ 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d,
+ 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d,
+ 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d,
+ 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155,
+ 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008,
+ 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d,
+ 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd,
+ 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d,
+ 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008,
+ 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9,
+ 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d,
+ 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d,
+ 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d,
+ 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008,
+ 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008,
+ 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008,
+ 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008,
+ 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008,
+ 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008,
+ 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008,
+ // Block 0x8, offset 0x200
+ 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008,
+ 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008,
+ 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008,
+ 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008,
+ 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008,
+ 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008,
+ 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008,
+ 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008,
+ 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008,
+ 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d,
+ 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008,
+ // Block 0x9, offset 0x240
+ 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018,
+ 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008,
+ 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008,
+ 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018,
+ 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a,
+ 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369,
+ 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018,
+ 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018,
+ 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018,
+ 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018,
+ 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018,
+ // Block 0xa, offset 0x280
+ 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d,
+ 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308,
+ 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308,
+ 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308,
+ 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308,
+ 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308,
+ 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308,
+ 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308,
+ 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008,
+ 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008,
+ 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2,
+ 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040,
+ 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105,
+ 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105,
+ 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105,
+ 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d,
+ 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d,
+ 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008,
+ 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008,
+ 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008,
+ 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008,
+ // Block 0xc, offset 0x300
+ 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008,
+ 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008,
+ 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd,
+ 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008,
+ 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008,
+ 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008,
+ 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008,
+ 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008,
+ 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd,
+ 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008,
+ 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d,
+ // Block 0xd, offset 0x340
+ 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008,
+ 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008,
+ 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008,
+ 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008,
+ 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008,
+ 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008,
+ 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008,
+ 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008,
+ 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008,
+ 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008,
+ 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008,
+ // Block 0xe, offset 0x380
+ 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308,
+ 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008,
+ 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008,
+ 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008,
+ 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008,
+ 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008,
+ 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008,
+ 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008,
+ 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008,
+ 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008,
+ 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d,
+ 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d,
+ 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008,
+ 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008,
+ 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008,
+ 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008,
+ 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008,
+ 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008,
+ 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008,
+ 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008,
+ 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008,
+ // Block 0x10, offset 0x400
+ 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008,
+ 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008,
+ 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008,
+ 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008,
+ 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008,
+ 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008,
+ 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008,
+ 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008,
+ 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5,
+ 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5,
+ 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5,
+ // Block 0x11, offset 0x440
+ 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840,
+ 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818,
+ 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308,
+ 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308,
+ 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040,
+ 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08,
+ 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08,
+ 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08,
+ 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08,
+ 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08,
+ 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08,
+ // Block 0x12, offset 0x480
+ 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08,
+ 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308,
+ 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308,
+ 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308,
+ 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308,
+ 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808,
+ 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808,
+ 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08,
+ 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429,
+ 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08,
+ 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08,
+ 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08,
+ 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08,
+ 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308,
+ 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840,
+ 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308,
+ 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018,
+ 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08,
+ 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008,
+ 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08,
+ 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08,
+ // Block 0x14, offset 0x500
+ 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818,
+ 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818,
+ 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308,
+ 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08,
+ 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08,
+ 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08,
+ 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08,
+ 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08,
+ 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308,
+ 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308,
+ 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308,
+ // Block 0x15, offset 0x540
+ 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308,
+ 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008,
+ 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308,
+ 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308,
+ 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1,
+ 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308,
+ 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008,
+ 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008,
+ 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008,
+ 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008,
+ 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008,
+ // Block 0x16, offset 0x580
+ 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008,
+ 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008,
+ 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040,
+ 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008,
+ 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008,
+ 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008,
+ 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040,
+ 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008,
+ 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040,
+ 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040,
+ 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040,
+ 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008,
+ 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040,
+ 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008,
+ 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1,
+ 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308,
+ 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008,
+ 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008,
+ 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018,
+ 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018,
+ 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040,
+ // Block 0x18, offset 0x600
+ 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008,
+ 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040,
+ 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040,
+ 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008,
+ 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008,
+ 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008,
+ 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040,
+ 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008,
+ 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008,
+ 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040,
+ 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008,
+ // Block 0x19, offset 0x640
+ 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040,
+ 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308,
+ 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308,
+ 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040,
+ 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040,
+ 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040,
+ 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008,
+ 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008,
+ 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308,
+ 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040,
+ 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008,
+ 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008,
+ 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008,
+ 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008,
+ 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008,
+ 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008,
+ 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040,
+ 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008,
+ 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008,
+ 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040,
+ 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308,
+ 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008,
+ 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040,
+ 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040,
+ 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040,
+ 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308,
+ 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008,
+ 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008,
+ 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040,
+ 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040,
+ 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008,
+ 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008,
+ 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040,
+ 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008,
+ 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008,
+ 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008,
+ 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040,
+ 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008,
+ 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008,
+ 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040,
+ 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308,
+ // Block 0x1d, offset 0x740
+ 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040,
+ 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008,
+ 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040,
+ 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008,
+ 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9,
+ 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308,
+ 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008,
+ 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008,
+ 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018,
+ 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040,
+ 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040,
+ // Block 0x1e, offset 0x780
+ 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008,
+ 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040,
+ 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040,
+ 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040,
+ 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040,
+ 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008,
+ 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008,
+ 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008,
+ 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008,
+ 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040,
+ 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040,
+ 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308,
+ 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040,
+ 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040,
+ 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040,
+ 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308,
+ 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008,
+ 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008,
+ 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040,
+ 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018,
+ 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018,
+ // Block 0x20, offset 0x800
+ 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008,
+ 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008,
+ 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040,
+ 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008,
+ 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008,
+ 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008,
+ 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040,
+ 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008,
+ 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008,
+ 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040,
+ 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308,
+ // Block 0x21, offset 0x840
+ 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040,
+ 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008,
+ 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040,
+ 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040,
+ 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040,
+ 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308,
+ 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008,
+ 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008,
+ 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040,
+ 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040,
+ 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040,
+ // Block 0x22, offset 0x880
+ 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040,
+ 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008,
+ 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040,
+ 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008,
+ 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018,
+ 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308,
+ 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008,
+ 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008,
+ 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018,
+ 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008,
+ 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008,
+ // Block 0x23, offset 0x8c0
+ 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040,
+ 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040,
+ 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040,
+ 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008,
+ 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008,
+ 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008,
+ 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040,
+ 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008,
+ 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308,
+ 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308,
+ 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040,
+ // Block 0x24, offset 0x900
+ 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008,
+ 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008,
+ 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008,
+ 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79,
+ 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008,
+ 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008,
+ 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9,
+ 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040,
+ 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59,
+ 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308,
+ 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008,
+ // Block 0x25, offset 0x940
+ 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018,
+ 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008,
+ 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308,
+ 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308,
+ 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11,
+ 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308,
+ 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308,
+ 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308,
+ 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308,
+ 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308,
+ 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018,
+ // Block 0x26, offset 0x980
+ 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008,
+ 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008,
+ 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008,
+ 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008,
+ 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008,
+ 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008,
+ 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008,
+ 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008,
+ 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41,
+ 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008,
+ 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269,
+ // Block 0x27, offset 0x9c0
+ 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1,
+ 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011,
+ 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041,
+ 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9,
+ 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099,
+ 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269,
+ 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1,
+ 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008,
+ 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008,
+ 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008,
+ 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008,
+ // Block 0x28, offset 0xa00
+ 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008,
+ 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008,
+ 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008,
+ 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008,
+ 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169,
+ 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9,
+ 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251,
+ 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9,
+ 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359,
+ 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1,
+ 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429,
+ // Block 0x29, offset 0xa40
+ 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008,
+ 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008,
+ 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008,
+ 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008,
+ 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008,
+ 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008,
+ 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008,
+ 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008,
+ 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008,
+ 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008,
+ 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008,
+ // Block 0x2a, offset 0xa80
+ 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008,
+ 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008,
+ 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008,
+ 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008,
+ 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008,
+ 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008,
+ 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008,
+ 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008,
+ 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008,
+ 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008,
+ 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008,
+ // Block 0x2b, offset 0xac0
+ 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008,
+ 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045,
+ 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008,
+ 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008,
+ 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045,
+ 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008,
+ 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045,
+ 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045,
+ 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489,
+ 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1,
+ 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040,
+ // Block 0x2c, offset 0xb00
+ 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1,
+ 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591,
+ 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1,
+ 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1,
+ 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771,
+ 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891,
+ 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831,
+ 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951,
+ 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040,
+ 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459,
+ 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686,
+ // Block 0x2d, offset 0xb40
+ 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040,
+ 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489,
+ 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008,
+ 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008,
+ 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2,
+ 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61,
+ 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045,
+ 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa,
+ 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040,
+ 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9,
+ 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040,
+ // Block 0x2e, offset 0xb80
+ 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a,
+ 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0,
+ 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d,
+ 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e,
+ 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018,
+ 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018,
+ 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040,
+ 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a,
+ 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018,
+ 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018,
+ 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018,
+ // Block 0x2f, offset 0xbc0
+ 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018,
+ 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018,
+ 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018,
+ 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9,
+ 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018,
+ 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340,
+ 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040,
+ 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340,
+ 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61,
+ 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd,
+ 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71,
+ // Block 0x30, offset 0xc00
+ 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61,
+ 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5,
+ 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09,
+ 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359,
+ 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040,
+ 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018,
+ 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018,
+ 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018,
+ 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018,
+ 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018,
+ 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040,
+ // Block 0x31, offset 0xc40
+ 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e,
+ 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249,
+ 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41,
+ 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018,
+ 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269,
+ 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018,
+ 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018,
+ 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09,
+ 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9,
+ 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd,
+ 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109,
+ // Block 0x32, offset 0xc80
+ 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9,
+ 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018,
+ 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151,
+ 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279,
+ 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399,
+ 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439,
+ 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369,
+ 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61,
+ 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451,
+ 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5,
+ 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61,
+ // Block 0x33, offset 0xcc0
+ 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018,
+ 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040,
+ 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040,
+ 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040,
+ 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040,
+ 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51,
+ 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601,
+ 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691,
+ 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26,
+ 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6,
+ 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a,
+ // Block 0x34, offset 0xd00
+ 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a,
+ 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040,
+ 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040,
+ 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040,
+ 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46,
+ 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06,
+ 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6,
+ 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86,
+ 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46,
+ 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199,
+ 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259,
+ // Block 0x35, offset 0xd40
+ 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99,
+ 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089,
+ 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9,
+ 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249,
+ 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71,
+ 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9,
+ 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1,
+ 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018,
+ 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018,
+ 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018,
+ 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018,
+ // Block 0x36, offset 0xd80
+ 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008,
+ 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008,
+ 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008,
+ 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008,
+ 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008,
+ 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd,
+ 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d,
+ 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9,
+ 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d,
+ 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008,
+ 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9,
+ // Block 0x37, offset 0xdc0
+ 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008,
+ 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008,
+ 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008,
+ 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008,
+ 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008,
+ 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008,
+ 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018,
+ 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308,
+ 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040,
+ 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018,
+ 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018,
+ // Block 0x38, offset 0xe00
+ 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d,
+ 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d,
+ 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d,
+ 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040,
+ 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040,
+ 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040,
+ 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040,
+ 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040,
+ 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040,
+ 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040,
+ 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040,
+ // Block 0x39, offset 0xe40
+ 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008,
+ 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018,
+ 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018,
+ 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018,
+ 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018,
+ 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018,
+ 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018,
+ 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018,
+ 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018,
+ 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018,
+ 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018,
+ // Block 0x3a, offset 0xe80
+ 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd,
+ 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd,
+ 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d,
+ 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d,
+ 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d,
+ 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd,
+ 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d,
+ 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd,
+ 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d,
+ 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd,
+ 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d,
+ // Block 0x3b, offset 0xec0
+ 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd,
+ 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d,
+ 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018,
+ 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd,
+ 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d,
+ 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008,
+ 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008,
+ 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008,
+ 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008,
+ 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040,
+ 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040,
+ // Block 0x3c, offset 0xf00
+ 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd,
+ 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018,
+ 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761,
+ 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1,
+ 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881,
+ 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd,
+ 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d,
+ 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d,
+ 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd,
+ 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d,
+ 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018,
+ // Block 0x3d, offset 0xf40
+ 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d,
+ 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d,
+ 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd,
+ 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd,
+ 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d,
+ 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d,
+ 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd,
+ 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d,
+ 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999,
+ 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29,
+ 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89,
+ // Block 0x3e, offset 0xf80
+ 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69,
+ 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69,
+ 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15,
+ 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75,
+ 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded,
+ 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d,
+ 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5,
+ 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d,
+ 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d,
+ 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd,
+ 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040,
+ // Block 0x3f, offset 0xfc0
+ 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9,
+ 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1,
+ 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9,
+ 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549,
+ 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1,
+ 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11,
+ 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91,
+ 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9,
+ 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011,
+ 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209,
+ 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361,
+ // Block 0x40, offset 0x1000
+ 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541,
+ 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781,
+ 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979,
+ 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89,
+ 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1,
+ 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99,
+ 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9,
+ 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9,
+ 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069,
+ 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9,
+ 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9,
+ // Block 0x41, offset 0x1040
+ 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271,
+ 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9,
+ 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed,
+ 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371,
+ 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9,
+ 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d,
+ 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211,
+ 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1,
+ 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599,
+ 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9,
+ 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611,
+ // Block 0x42, offset 0x1080
+ 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671,
+ 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709,
+ 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781,
+ 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1,
+ 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811,
+ 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901,
+ 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1,
+ 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11,
+ 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31,
+ 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51,
+ 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d,
+ // Block 0x43, offset 0x10c0
+ 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008,
+ 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008,
+ 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008,
+ 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008,
+ 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008,
+ 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008,
+ 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008,
+ 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308,
+ 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308,
+ 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308,
+ 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008,
+ // Block 0x44, offset 0x1100
+ 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008,
+ 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008,
+ 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008,
+ 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008,
+ 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11,
+ 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008,
+ 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008,
+ 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008,
+ 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008,
+ 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008,
+ 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008,
+ // Block 0x45, offset 0x1140
+ 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018,
+ 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018,
+ 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018,
+ 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008,
+ 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008,
+ 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008,
+ 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008,
+ 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008,
+ 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008,
+ 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008,
+ 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008,
+ // Block 0x46, offset 0x1180
+ 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008,
+ 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008,
+ 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008,
+ 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008,
+ 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008,
+ 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008,
+ 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008,
+ 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008,
+ 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008,
+ 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d,
+ 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008,
+ // Block 0x47, offset 0x11c0
+ 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008,
+ 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d,
+ 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008,
+ 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008,
+ 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008,
+ 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008,
+ 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008,
+ 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040,
+ 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008,
+ 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040,
+ 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040,
+ // Block 0x48, offset 0x1200
+ 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575,
+ 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635,
+ 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008,
+ 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715,
+ 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5,
+ 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008,
+ 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008,
+ 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935,
+ 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5,
+ 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5,
+ 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35,
+ // Block 0x49, offset 0x1240
+ 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35,
+ 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5,
+ 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19,
+ 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91,
+ 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040,
+ 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040,
+ 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040,
+ 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040,
+ 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040,
+ 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040,
+ 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040,
+ // Block 0x4a, offset 0x1280
+ 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001,
+ 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040,
+ 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040,
+ 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9,
+ 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1,
+ 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149,
+ 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2,
+ 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1,
+ 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1,
+ 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479,
+ 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040,
+ // Block 0x4b, offset 0x12c0
+ 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040,
+ 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659,
+ 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721,
+ 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751,
+ 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769,
+ 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799,
+ 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1,
+ 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1,
+ 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9,
+ 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829,
+ 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841,
+ // Block 0x4c, offset 0x1300
+ 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871,
+ 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9,
+ 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9,
+ 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919,
+ 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931,
+ 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961,
+ 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991,
+ 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1,
+ 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818,
+ 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818,
+ 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818,
+ // Block 0x4d, offset 0x1340
+ 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040,
+ 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040,
+ 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040,
+ 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09,
+ 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479,
+ 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81,
+ 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1,
+ 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19,
+ 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91,
+ 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1,
+ 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09,
+ // Block 0x4e, offset 0x1380
+ 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1,
+ 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1,
+ 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1,
+ 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991,
+ 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81,
+ 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a,
+ 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99,
+ 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89,
+ 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79,
+ 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19,
+ 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469,
+ // Block 0x4f, offset 0x13c0
+ 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649,
+ 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9,
+ 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49,
+ 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21,
+ 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9,
+ 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01,
+ 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91,
+ 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9,
+ 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171,
+ 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289,
+ 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329,
+ // Block 0x50, offset 0x1400
+ 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1,
+ 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621,
+ 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739,
+ 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1,
+ 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9,
+ 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29,
+ 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079,
+ 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1,
+ 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171,
+ 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261,
+ 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301,
+ // Block 0x51, offset 0x1440
+ 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1,
+ 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1,
+ 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171,
+ 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261,
+ 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351,
+ 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441,
+ 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509,
+ 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1,
+ 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081,
+ 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239,
+ 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018,
+ // Block 0x52, offset 0x1480
+ 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040,
+ 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040,
+ 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609,
+ 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721,
+ 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839,
+ 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919,
+ 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9,
+ 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9,
+ 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9,
+ 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1,
+ 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79,
+ // Block 0x53, offset 0x14c0
+ 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989,
+ 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040,
+ 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040,
+ 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040,
+ 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040,
+ 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040,
+ 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040,
+ 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040,
+ 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9,
+ 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12,
+ 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040,
+ // Block 0x54, offset 0x1500
+ 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0,
+ 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0,
+ 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55,
+ 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75,
+ 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040,
+ 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308,
+ 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308,
+ 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308,
+ 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2,
+ 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35,
+ 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55,
+ // Block 0x55, offset 0x1540
+ 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018,
+ 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56,
+ 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95,
+ 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa,
+ 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95,
+ 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99,
+ 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda,
+ 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040,
+ 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040,
+ 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081,
+ 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1,
+ // Block 0x56, offset 0x1580
+ 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141,
+ 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171,
+ 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1,
+ 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1,
+ 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201,
+ 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219,
+ 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249,
+ 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291,
+ 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1,
+ 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9,
+ 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1,
+ // Block 0x57, offset 0x15c0
+ 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321,
+ 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339,
+ 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369,
+ 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381,
+ 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1,
+ 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9,
+ 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9,
+ 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1,
+ 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441,
+ 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9,
+ 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0,
+ // Block 0x58, offset 0x1600
+ 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea,
+ 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2,
+ 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9,
+ 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81,
+ 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2,
+ 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159,
+ 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41,
+ 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9,
+ 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9,
+ 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a,
+ 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a,
+ // Block 0x59, offset 0x1640
+ 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09,
+ 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51,
+ 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039,
+ 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279,
+ 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a,
+ 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115,
+ 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5,
+ 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295,
+ 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355,
+ 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415,
+ 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215,
+ // Block 0x5a, offset 0x1680
+ 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515,
+ 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595,
+ 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5,
+ 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655,
+ 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115,
+ 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735,
+ 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5,
+ 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5,
+ 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5,
+ 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5,
+ 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040,
+ // Block 0x5b, offset 0x16c0
+ 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5,
+ 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715,
+ 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040,
+ 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935,
+ 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040,
+ 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6,
+ 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35,
+ 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040,
+ 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040,
+ 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340,
+ 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040,
+ // Block 0x5c, offset 0x1700
+ 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08,
+ 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808,
+ 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08,
+ 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908,
+ 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08,
+ 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808,
+ 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040,
+ 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18,
+ 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818,
+ 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040,
+ 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040,
+ // Block 0x5d, offset 0x1740
+ 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08,
+ 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08,
+ 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08,
+ 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040,
+ 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040,
+ 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040,
+ 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18,
+ 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818,
+ 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040,
+ 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040,
+ 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040,
+ // Block 0x5e, offset 0x1780
+ 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008,
+ 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008,
+ 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040,
+ 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008,
+ 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008,
+ 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008,
+ 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040,
+ 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008,
+ 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008,
+ 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040,
+ 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008,
+ // Block 0x5f, offset 0x17c0
+ 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040,
+ 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008,
+ 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040,
+ 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008,
+ 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008,
+ 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008,
+ 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308,
+ 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040,
+ 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040,
+ 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040,
+ 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040,
+ // Block 0x60, offset 0x1800
+ 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199,
+ 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359,
+ 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269,
+ 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369,
+ 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9,
+ 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259,
+ 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99,
+ 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089,
+ 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9,
+ 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249,
+ 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359,
+ // Block 0x61, offset 0x1840
+ 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269,
+ 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369,
+ 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9,
+ 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259,
+ 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99,
+ 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089,
+ 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9,
+ 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249,
+ 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71,
+ 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9,
+ 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369,
+ // Block 0x62, offset 0x1880
+ 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9,
+ 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259,
+ 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99,
+ 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089,
+ 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040,
+ 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040,
+ 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71,
+ 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9,
+ 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1,
+ 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199,
+ 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259,
+ // Block 0x63, offset 0x18c0
+ 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99,
+ 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089,
+ 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9,
+ 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249,
+ 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71,
+ 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9,
+ 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1,
+ 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199,
+ 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359,
+ 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269,
+ 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089,
+ // Block 0x64, offset 0x1900
+ 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9,
+ 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040,
+ 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71,
+ 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9,
+ 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040,
+ 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199,
+ 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359,
+ 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269,
+ 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369,
+ 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9,
+ 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040,
+ // Block 0x65, offset 0x1940
+ 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040,
+ 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9,
+ 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040,
+ 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199,
+ 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359,
+ 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269,
+ 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369,
+ 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9,
+ 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259,
+ 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99,
+ 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9,
+ // Block 0x66, offset 0x1980
+ 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1,
+ 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199,
+ 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359,
+ 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269,
+ 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369,
+ 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9,
+ 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259,
+ 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99,
+ 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089,
+ 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9,
+ 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199,
+ // Block 0x67, offset 0x19c0
+ 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359,
+ 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269,
+ 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369,
+ 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9,
+ 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259,
+ 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99,
+ 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089,
+ 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9,
+ 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249,
+ 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71,
+ 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269,
+ // Block 0x68, offset 0x1a00
+ 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369,
+ 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9,
+ 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259,
+ 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99,
+ 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089,
+ 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9,
+ 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249,
+ 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71,
+ 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9,
+ 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1,
+ 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9,
+ // Block 0x69, offset 0x1a40
+ 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259,
+ 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99,
+ 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089,
+ 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9,
+ 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249,
+ 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71,
+ 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9,
+ 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1,
+ 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199,
+ 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359,
+ 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99,
+ // Block 0x6a, offset 0x1a80
+ 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089,
+ 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9,
+ 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249,
+ 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71,
+ 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9,
+ 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1,
+ 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099,
+ 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429,
+ 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71,
+ 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9,
+ 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9,
+ // Block 0x6b, offset 0x1ac0
+ 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9,
+ 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11,
+ 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109,
+ 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1,
+ 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429,
+ 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099,
+ 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429,
+ 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71,
+ 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9,
+ 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01,
+ 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9,
+ // Block 0x6c, offset 0x1b00
+ 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11,
+ 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109,
+ 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1,
+ 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429,
+ 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099,
+ 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429,
+ 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71,
+ 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9,
+ 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01,
+ 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1,
+ 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11,
+ // Block 0x6d, offset 0x1b40
+ 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109,
+ 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1,
+ 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429,
+ 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099,
+ 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429,
+ 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71,
+ 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9,
+ 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01,
+ 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1,
+ 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41,
+ 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109,
+ // Block 0x6e, offset 0x1b80
+ 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1,
+ 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429,
+ 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099,
+ 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429,
+ 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71,
+ 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9,
+ 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01,
+ 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1,
+ 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41,
+ 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1,
+ 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1,
+ // Block 0x6f, offset 0x1bc0
+ 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429,
+ 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41,
+ 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079,
+ 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1,
+ 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61,
+ 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9,
+ 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81,
+ 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079,
+ 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1,
+ 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61,
+ 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1,
+ // Block 0x70, offset 0x1c00
+ 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115,
+ 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135,
+ 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115,
+ 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175,
+ 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115,
+ 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08,
+ 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08,
+ 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08,
+ 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08,
+ 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08,
+ 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08,
+ // Block 0x71, offset 0x1c40
+ 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411,
+ 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1,
+ 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9,
+ 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231,
+ 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949,
+ 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040,
+ 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429,
+ 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339,
+ 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1,
+ 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351,
+ 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040,
+ // Block 0x72, offset 0x1c80
+ 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040,
+ 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1,
+ 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9,
+ 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231,
+ 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949,
+ 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040,
+ 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429,
+ 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339,
+ 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1,
+ 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351,
+ 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040,
+ // Block 0x73, offset 0x1cc0
+ 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411,
+ 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1,
+ 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9,
+ 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231,
+ 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040,
+ 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249,
+ 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429,
+ 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339,
+ 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1,
+ 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351,
+ 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040,
+ // Block 0x74, offset 0x1d00
+ 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02,
+ 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018,
+ 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2,
+ 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72,
+ 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32,
+ 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2,
+ 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2,
+ 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040,
+ 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199,
+ 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359,
+ 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99,
+ // Block 0x75, offset 0x1d40
+ 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089,
+ 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1,
+ 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018,
+ 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018,
+ 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018,
+ 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018,
+ 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018,
+ 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040,
+ 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018,
+ 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018,
+ 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018,
+ // Block 0x76, offset 0x1d80
+ 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040,
+ 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040,
+ 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289,
+ 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349,
+ 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409,
+ 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9,
+ 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589,
+ 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649,
+ 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709,
+ 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9,
+ 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040,
+ // Block 0x77, offset 0x1dc0
+ 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79,
+ 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39,
+ 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9,
+ 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39,
+ 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9,
+ 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79,
+ 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39,
+ 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9,
+ 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059,
+ 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9,
+ 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179,
+ // Block 0x78, offset 0x1e00
+ 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239,
+ 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9,
+ 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399,
+ 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459,
+ 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309,
+ 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559,
+ 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9,
+ 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679,
+ 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9,
+ 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d,
+ 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9,
+ // Block 0x79, offset 0x1e40
+ 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9,
+ 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959,
+ 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d,
+ 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d,
+ 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9,
+ 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99,
+ 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9,
+ 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9,
+ 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99,
+ 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39,
+ 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99,
+ // Block 0x7a, offset 0x1e80
+ 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639,
+ 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9,
+ 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d,
+ 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9,
+ 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d,
+ 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd,
+ 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979,
+ 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19,
+ 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d,
+ 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d,
+ 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59,
+ // Block 0x7b, offset 0x1ec0
+ 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99,
+ 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39,
+ 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9,
+ 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39,
+ 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd,
+ 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19,
+ 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9,
+ 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59,
+ 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd,
+ 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d,
+ 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079,
+ // Block 0x7c, offset 0x1f00
+ 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d,
+ 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d,
+ 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879,
+ 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919,
+ 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd,
+ 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9,
+ 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99,
+ 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39,
+ 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9,
+ 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d,
+ 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79,
+ // Block 0x7d, offset 0x1f40
+ 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19,
+ 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9,
+ 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59,
+ 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9,
+ 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d,
+ 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040,
+ 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040,
+ 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040,
+ 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040,
+ 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040,
+ 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040,
+}
+
+// idnaIndex: 35 blocks, 2240 entries, 4480 bytes
+// Block 0 is the zero block.
+var idnaIndex = [2240]uint16{
+ // Block 0x0, offset 0x0
+ // Block 0x1, offset 0x40
+ // Block 0x2, offset 0x80
+ // Block 0x3, offset 0xc0
+ 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05,
+ 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a,
+ 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83,
+ 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87,
+ 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07,
+ 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c,
+ 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20,
+ // Block 0x4, offset 0x100
+ 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15,
+ 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d,
+ 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91,
+ 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96,
+ // Block 0x5, offset 0x140
+ 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e,
+ 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6,
+ 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f,
+ 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae,
+ 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6,
+ 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe,
+ 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3,
+ 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b,
+ // Block 0x6, offset 0x180
+ 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b,
+ 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca,
+ 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b,
+ 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b,
+ 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b,
+ 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1,
+ 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6,
+ 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36,
+ // Block 0x7, offset 0x1c0
+ 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2,
+ 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40,
+ 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f,
+ 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f,
+ 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f,
+ 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f,
+ 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f,
+ 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f,
+ // Block 0x8, offset 0x200
+ 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f,
+ 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f,
+ 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f,
+ 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f,
+ 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f,
+ 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f,
+ 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b,
+ 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f,
+ // Block 0x9, offset 0x240
+ 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f,
+ 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f,
+ 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f,
+ 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f,
+ 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f,
+ 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f,
+ 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f,
+ 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f,
+ // Block 0xa, offset 0x280
+ 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f,
+ 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f,
+ 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f,
+ 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f,
+ 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f,
+ 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f,
+ 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f,
+ 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4,
+ // Block 0xb, offset 0x2c0
+ 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f,
+ 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f,
+ 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f,
+ 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9,
+ 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1,
+ 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9,
+ 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f,
+ 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f,
+ // Block 0xc, offset 0x300
+ 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f,
+ 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f,
+ 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f,
+ 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb,
+ // Block 0xd, offset 0x340
+ 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba,
+ 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba,
+ 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba,
+ 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba,
+ 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba,
+ 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba,
+ 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba,
+ 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba,
+ // Block 0xe, offset 0x380
+ 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba,
+ 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba,
+ 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba,
+ 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba,
+ 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff,
+ 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b,
+ 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51,
+ 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59,
+ // Block 0xf, offset 0x3c0
+ 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109,
+ 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f,
+ 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba,
+ 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba,
+ 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d,
+ 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba,
+ 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba,
+ 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba,
+ // Block 0x10, offset 0x400
+ 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f,
+ 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba,
+ 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138,
+ 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba,
+ 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba,
+ 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba,
+ 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba,
+ 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba,
+ // Block 0x11, offset 0x440
+ 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f,
+ 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba,
+ 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba,
+ 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba,
+ 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba,
+ 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba,
+ 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba,
+ 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba,
+ // Block 0x12, offset 0x480
+ 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f,
+ 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f,
+ 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba,
+ 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba,
+ 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba,
+ 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba,
+ 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba,
+ 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba,
+ // Block 0x13, offset 0x4c0
+ 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba,
+ 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba,
+ 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f,
+ 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba,
+ 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba,
+ 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba,
+ 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba,
+ 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba,
+ // Block 0x14, offset 0x500
+ 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba,
+ 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba,
+ 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba,
+ 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba,
+ 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f,
+ 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba,
+ 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba,
+ 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150,
+ // Block 0x15, offset 0x540
+ 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f,
+ 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f,
+ 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f,
+ 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151,
+ 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f,
+ 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba,
+ 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba,
+ 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba,
+ // Block 0x16, offset 0x580
+ 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba,
+ 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba,
+ 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba,
+ 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba,
+ 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba,
+ 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba,
+ 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba,
+ 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba,
+ // Block 0x17, offset 0x5c0
+ 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a,
+ 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba,
+ 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65,
+ 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d,
+ 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b,
+ 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba,
+ 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba,
+ 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba,
+ // Block 0x18, offset 0x600
+ 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba,
+ 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba,
+ 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba,
+ 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba,
+ 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba,
+ 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba,
+ 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba,
+ 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba,
+ // Block 0x19, offset 0x640
+ 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168,
+ 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b,
+ 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b,
+ 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c,
+ 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173,
+ 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba,
+ 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba,
+ 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba,
+ // Block 0x1a, offset 0x680
+ 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f,
+ 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f,
+ 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f,
+ 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f,
+ 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f,
+ 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f,
+ 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f,
+ 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f,
+ // Block 0x1b, offset 0x6c0
+ 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f,
+ 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f,
+ 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f,
+ 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f,
+ 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f,
+ 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f,
+ 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f,
+ 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f,
+ // Block 0x1c, offset 0x700
+ 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f,
+ 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f,
+ 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f,
+ 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f,
+ 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f,
+ 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f,
+ 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f,
+ 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba,
+ // Block 0x1d, offset 0x740
+ 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba,
+ 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba,
+ 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba,
+ 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba,
+ 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a,
+ 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba,
+ 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba,
+ 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba,
+ // Block 0x1e, offset 0x780
+ 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07,
+ 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17,
+ 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07,
+ 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b,
+ 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b,
+ 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b,
+ // Block 0x1f, offset 0x7c0
+ 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b,
+ 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b,
+ 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b,
+ 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b,
+ 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b,
+ 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b,
+ 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b,
+ 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b,
+ // Block 0x20, offset 0x800
+ 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d,
+ 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba,
+ 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba,
+ 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba,
+ 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba,
+ 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba,
+ 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba,
+ 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba,
+ // Block 0x21, offset 0x840
+ 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b,
+ 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b,
+ 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b,
+ 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b,
+ 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b,
+ 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b,
+ 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b,
+ 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b,
+ // Block 0x22, offset 0x880
+ 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b,
+ 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b,
+}
+
+// idnaSparseOffset: 258 entries, 516 bytes
+var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a}
+
+// idnaSparseValues: 1869 entries, 7476 bytes
+var idnaSparseValues = [1869]valueRange{
+ // Block 0x0, offset 0x0
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe105, lo: 0x80, hi: 0x96},
+ {value: 0x0018, lo: 0x97, hi: 0x97},
+ {value: 0xe105, lo: 0x98, hi: 0x9e},
+ {value: 0x001f, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbf},
+ // Block 0x1, offset 0x8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0xe01d, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0335, lo: 0x83, hi: 0x83},
+ {value: 0x034d, lo: 0x84, hi: 0x84},
+ {value: 0x0365, lo: 0x85, hi: 0x85},
+ {value: 0xe00d, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0xe00d, lo: 0x88, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x89},
+ {value: 0xe00d, lo: 0x8a, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe00d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0x8d},
+ {value: 0xe00d, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0xbf},
+ // Block 0x2, offset 0x19
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x0249, lo: 0xb0, hi: 0xb0},
+ {value: 0x037d, lo: 0xb1, hi: 0xb1},
+ {value: 0x0259, lo: 0xb2, hi: 0xb2},
+ {value: 0x0269, lo: 0xb3, hi: 0xb3},
+ {value: 0x034d, lo: 0xb4, hi: 0xb4},
+ {value: 0x0395, lo: 0xb5, hi: 0xb5},
+ {value: 0xe1bd, lo: 0xb6, hi: 0xb6},
+ {value: 0x0279, lo: 0xb7, hi: 0xb7},
+ {value: 0x0289, lo: 0xb8, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbf},
+ // Block 0x3, offset 0x25
+ {value: 0x0000, lo: 0x01},
+ {value: 0x3308, lo: 0x80, hi: 0xbf},
+ // Block 0x4, offset 0x27
+ {value: 0x0000, lo: 0x04},
+ {value: 0x03f5, lo: 0x80, hi: 0x8f},
+ {value: 0xe105, lo: 0x90, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x5, offset 0x2c
+ {value: 0x0000, lo: 0x07},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x0545, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x0008, lo: 0x99, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x6, offset 0x34
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0401, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x88},
+ {value: 0x0018, lo: 0x89, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x3308, lo: 0x91, hi: 0xbd},
+ {value: 0x0818, lo: 0xbe, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x7, offset 0x3f
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0818, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x82},
+ {value: 0x0818, lo: 0x83, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x85},
+ {value: 0x0818, lo: 0x86, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0808, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x8, offset 0x4b
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0a08, lo: 0x80, hi: 0x87},
+ {value: 0x0c08, lo: 0x88, hi: 0x99},
+ {value: 0x0a08, lo: 0x9a, hi: 0xbf},
+ // Block 0x9, offset 0x4f
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3308, lo: 0x80, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8c},
+ {value: 0x0c08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0a08, lo: 0x8e, hi: 0x98},
+ {value: 0x0c08, lo: 0x99, hi: 0x9b},
+ {value: 0x0a08, lo: 0x9c, hi: 0xaa},
+ {value: 0x0c08, lo: 0xab, hi: 0xac},
+ {value: 0x0a08, lo: 0xad, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb1},
+ {value: 0x0a08, lo: 0xb2, hi: 0xb2},
+ {value: 0x0c08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0a08, lo: 0xb5, hi: 0xb7},
+ {value: 0x0c08, lo: 0xb8, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbf},
+ // Block 0xa, offset 0x5e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xb0},
+ {value: 0x0808, lo: 0xb1, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xb, offset 0x63
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0xc, offset 0x6b
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x99},
+ {value: 0x0808, lo: 0x9a, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa3},
+ {value: 0x0808, lo: 0xa4, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa7},
+ {value: 0x0808, lo: 0xa8, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0818, lo: 0xb0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd, offset 0x77
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0c08, lo: 0x80, hi: 0x80},
+ {value: 0x0a08, lo: 0x81, hi: 0x85},
+ {value: 0x0c08, lo: 0x86, hi: 0x87},
+ {value: 0x0a08, lo: 0x88, hi: 0x88},
+ {value: 0x0c08, lo: 0x89, hi: 0x89},
+ {value: 0x0a08, lo: 0x8a, hi: 0x93},
+ {value: 0x0c08, lo: 0x94, hi: 0x94},
+ {value: 0x0a08, lo: 0x95, hi: 0x95},
+ {value: 0x0808, lo: 0x96, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xe, offset 0x85
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0a08, lo: 0xa0, hi: 0xa9},
+ {value: 0x0c08, lo: 0xaa, hi: 0xac},
+ {value: 0x0808, lo: 0xad, hi: 0xad},
+ {value: 0x0c08, lo: 0xae, hi: 0xae},
+ {value: 0x0a08, lo: 0xaf, hi: 0xb0},
+ {value: 0x0c08, lo: 0xb1, hi: 0xb2},
+ {value: 0x0a08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0a08, lo: 0xb6, hi: 0xb8},
+ {value: 0x0c08, lo: 0xb9, hi: 0xb9},
+ {value: 0x0a08, lo: 0xba, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0xf, offset 0x93
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa1},
+ {value: 0x0840, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xbf},
+ // Block 0x10, offset 0x98
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x11, offset 0xa1
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x85},
+ {value: 0x3008, lo: 0x86, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x3008, lo: 0x8a, hi: 0x8c},
+ {value: 0x3b08, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x12, offset 0xb1
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xa9},
+ {value: 0x0008, lo: 0xaa, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x13, offset 0xbf
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x14, offset 0xcc
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0040, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x15, offset 0xd8
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x89},
+ {value: 0x3b08, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x3008, lo: 0x98, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x16, offset 0xe9
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb2},
+ {value: 0x08f1, lo: 0xb3, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb9},
+ {value: 0x3b08, lo: 0xba, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x0018, lo: 0xbf, hi: 0xbf},
+ // Block 0x17, offset 0xf3
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x8e},
+ {value: 0x0018, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0xbf},
+ // Block 0x18, offset 0xfa
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0961, lo: 0x9c, hi: 0x9c},
+ {value: 0x0999, lo: 0x9d, hi: 0x9d},
+ {value: 0x0008, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x19, offset 0x107
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8a},
+ {value: 0x0008, lo: 0x8b, hi: 0x8b},
+ {value: 0xe03d, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x1a, offset 0x118
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0xbf},
+ // Block 0x1b, offset 0x11f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x1c, offset 0x12a
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x3008, lo: 0x96, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x3308, lo: 0x9e, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3008, lo: 0xa2, hi: 0xa4},
+ {value: 0x0008, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xbf},
+ // Block 0x1d, offset 0x139
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x8c},
+ {value: 0x3308, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x8e},
+ {value: 0x3008, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x3008, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0x1e, offset 0x147
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x86},
+ {value: 0x055d, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8c},
+ {value: 0x055d, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbb},
+ {value: 0xe105, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0x1f, offset 0x151
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0018, lo: 0x80, hi: 0xbf},
+ // Block 0x20, offset 0x153
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa0},
+ {value: 0x2018, lo: 0xa1, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x21, offset 0x158
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa7},
+ {value: 0x2018, lo: 0xa8, hi: 0xbf},
+ // Block 0x22, offset 0x15b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x2018, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0xbf},
+ // Block 0x23, offset 0x15e
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0008, lo: 0x80, hi: 0xbf},
+ // Block 0x24, offset 0x160
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x99},
+ {value: 0x0008, lo: 0x9a, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x25, offset 0x16c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x26, offset 0x177
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x27, offset 0x17f
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0x0008, lo: 0x92, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbf},
+ // Block 0x28, offset 0x185
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x29, offset 0x18b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2a, offset 0x190
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x2b, offset 0x195
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x2c, offset 0x198
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xbf},
+ // Block 0x2d, offset 0x19c
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x2e, offset 0x1a2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0x2f, offset 0x1a7
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8d},
+ {value: 0x0008, lo: 0x8e, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x3b08, lo: 0x94, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3b08, lo: 0xb4, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x30, offset 0x1b3
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x31, offset 0x1bd
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xb3},
+ {value: 0x3340, lo: 0xb4, hi: 0xb5},
+ {value: 0x3008, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbf},
+ // Block 0x32, offset 0x1c3
+ {value: 0x0000, lo: 0x10},
+ {value: 0x3008, lo: 0x80, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x3008, lo: 0x87, hi: 0x88},
+ {value: 0x3308, lo: 0x89, hi: 0x91},
+ {value: 0x3b08, lo: 0x92, hi: 0x92},
+ {value: 0x3308, lo: 0x93, hi: 0x93},
+ {value: 0x0018, lo: 0x94, hi: 0x96},
+ {value: 0x0008, lo: 0x97, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x33, offset 0x1d4
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0018, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x86},
+ {value: 0x0218, lo: 0x87, hi: 0x87},
+ {value: 0x0018, lo: 0x88, hi: 0x8a},
+ {value: 0x33c0, lo: 0x8b, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0208, lo: 0xa0, hi: 0xbf},
+ // Block 0x34, offset 0x1de
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0208, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x35, offset 0x1e1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0208, lo: 0x87, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xa9},
+ {value: 0x0208, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x36, offset 0x1e9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0x37, offset 0x1ec
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb8},
+ {value: 0x3308, lo: 0xb9, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x38, offset 0x1f9
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0008, lo: 0x86, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0x39, offset 0x201
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x3a, offset 0x205
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0028, lo: 0x9a, hi: 0x9a},
+ {value: 0x0040, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0xbf},
+ // Block 0x3b, offset 0x20c
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x3308, lo: 0x97, hi: 0x98},
+ {value: 0x3008, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x3c, offset 0x214
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x94},
+ {value: 0x3008, lo: 0x95, hi: 0x95},
+ {value: 0x3308, lo: 0x96, hi: 0x96},
+ {value: 0x3008, lo: 0x97, hi: 0x97},
+ {value: 0x3308, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3b08, lo: 0xa0, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xac},
+ {value: 0x3008, lo: 0xad, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0x3d, offset 0x224
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xbd},
+ {value: 0x3318, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x3e, offset 0x230
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0040, lo: 0x80, hi: 0xbf},
+ // Block 0x3f, offset 0x232
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x83},
+ {value: 0x3008, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x40, offset 0x23c
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x3808, lo: 0x84, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x41, offset 0x248
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3808, lo: 0xaa, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xbf},
+ // Block 0x42, offset 0x254
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa9},
+ {value: 0x3008, lo: 0xaa, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3808, lo: 0xb2, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbf},
+ // Block 0x43, offset 0x260
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x3008, lo: 0xa4, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbf},
+ // Block 0x44, offset 0x268
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x45, offset 0x26d
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0e29, lo: 0x80, hi: 0x80},
+ {value: 0x0e41, lo: 0x81, hi: 0x81},
+ {value: 0x0e59, lo: 0x82, hi: 0x82},
+ {value: 0x0e71, lo: 0x83, hi: 0x83},
+ {value: 0x0e89, lo: 0x84, hi: 0x85},
+ {value: 0x0ea1, lo: 0x86, hi: 0x86},
+ {value: 0x0eb9, lo: 0x87, hi: 0x87},
+ {value: 0x057d, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0x46, offset 0x277
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x92},
+ {value: 0x0018, lo: 0x93, hi: 0x93},
+ {value: 0x3308, lo: 0x94, hi: 0xa0},
+ {value: 0x3008, lo: 0xa1, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa8},
+ {value: 0x0008, lo: 0xa9, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x0008, lo: 0xae, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x47, offset 0x288
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0x48, offset 0x28c
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x87},
+ {value: 0xe045, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0xe045, lo: 0x98, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0xe045, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb7},
+ {value: 0xe045, lo: 0xb8, hi: 0xbf},
+ // Block 0x49, offset 0x297
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x3318, lo: 0x90, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbf},
+ // Block 0x4a, offset 0x29b
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x88},
+ {value: 0x24c1, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x4b, offset 0x2a4
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x24f1, lo: 0xac, hi: 0xac},
+ {value: 0x2529, lo: 0xad, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xae},
+ {value: 0x2579, lo: 0xaf, hi: 0xaf},
+ {value: 0x25b1, lo: 0xb0, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0x4c, offset 0x2ac
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x9f},
+ {value: 0x0080, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xad},
+ {value: 0x0080, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x4d, offset 0x2b2
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xa8},
+ {value: 0x09c5, lo: 0xa9, hi: 0xa9},
+ {value: 0x09e5, lo: 0xaa, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xbf},
+ // Block 0x4e, offset 0x2b7
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x4f, offset 0x2ba
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xbf},
+ // Block 0x50, offset 0x2bd
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x28c1, lo: 0x8c, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0xbf},
+ // Block 0x51, offset 0x2c1
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0e66, lo: 0xb4, hi: 0xb4},
+ {value: 0x292a, lo: 0xb5, hi: 0xb5},
+ {value: 0x0e86, lo: 0xb6, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x52, offset 0x2c7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x9b},
+ {value: 0x2941, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0xbf},
+ // Block 0x53, offset 0x2cb
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0x54, offset 0x2cf
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0018, lo: 0x98, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbc},
+ {value: 0x0018, lo: 0xbd, hi: 0xbf},
+ // Block 0x55, offset 0x2d5
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xab},
+ {value: 0x0018, lo: 0xac, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x56, offset 0x2dc
+ {value: 0x0000, lo: 0x05},
+ {value: 0xe185, lo: 0x80, hi: 0x8f},
+ {value: 0x03f5, lo: 0x90, hi: 0x9f},
+ {value: 0x0ea5, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x57, offset 0x2e2
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xa6},
+ {value: 0x0008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xac},
+ {value: 0x0008, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x58, offset 0x2ea
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xae},
+ {value: 0xe075, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0x59, offset 0x2f1
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x0008, lo: 0xb8, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x5a, offset 0x2fc
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xbf},
+ // Block 0x5b, offset 0x306
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0008, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x5c, offset 0x30a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0xbf},
+ // Block 0x5d, offset 0x30d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9e},
+ {value: 0x0edd, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0x5e, offset 0x313
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xb2},
+ {value: 0x0efd, lo: 0xb3, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0x5f, offset 0x317
+ {value: 0x0020, lo: 0x01},
+ {value: 0x0f1d, lo: 0x80, hi: 0xbf},
+ // Block 0x60, offset 0x319
+ {value: 0x0020, lo: 0x02},
+ {value: 0x171d, lo: 0x80, hi: 0x8f},
+ {value: 0x18fd, lo: 0x90, hi: 0xbf},
+ // Block 0x61, offset 0x31c
+ {value: 0x0020, lo: 0x01},
+ {value: 0x1efd, lo: 0x80, hi: 0xbf},
+ // Block 0x62, offset 0x31e
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0xbf},
+ // Block 0x63, offset 0x321
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x98},
+ {value: 0x3308, lo: 0x99, hi: 0x9a},
+ {value: 0x29e2, lo: 0x9b, hi: 0x9b},
+ {value: 0x2a0a, lo: 0x9c, hi: 0x9c},
+ {value: 0x0008, lo: 0x9d, hi: 0x9e},
+ {value: 0x2a31, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0008, lo: 0xa1, hi: 0xbf},
+ // Block 0x64, offset 0x32b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xbe},
+ {value: 0x2a69, lo: 0xbf, hi: 0xbf},
+ // Block 0x65, offset 0x32e
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0040, lo: 0x80, hi: 0x84},
+ {value: 0x0008, lo: 0x85, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xb0},
+ {value: 0x2a1d, lo: 0xb1, hi: 0xb1},
+ {value: 0x2a3d, lo: 0xb2, hi: 0xb2},
+ {value: 0x2a5d, lo: 0xb3, hi: 0xb3},
+ {value: 0x2a7d, lo: 0xb4, hi: 0xb4},
+ {value: 0x2a5d, lo: 0xb5, hi: 0xb5},
+ {value: 0x2a9d, lo: 0xb6, hi: 0xb6},
+ {value: 0x2abd, lo: 0xb7, hi: 0xb7},
+ {value: 0x2add, lo: 0xb8, hi: 0xb9},
+ {value: 0x2afd, lo: 0xba, hi: 0xbb},
+ {value: 0x2b1d, lo: 0xbc, hi: 0xbd},
+ {value: 0x2afd, lo: 0xbe, hi: 0xbf},
+ // Block 0x66, offset 0x33d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x67, offset 0x341
+ {value: 0x0030, lo: 0x04},
+ {value: 0x2aa2, lo: 0x80, hi: 0x9d},
+ {value: 0x305a, lo: 0x9e, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x30a2, lo: 0xa0, hi: 0xbf},
+ // Block 0x68, offset 0x346
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x69, offset 0x349
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0040, lo: 0x8d, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0x6a, offset 0x34d
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0x6b, offset 0x352
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xbf},
+ // Block 0x6c, offset 0x357
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb1},
+ {value: 0x0018, lo: 0xb2, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x6d, offset 0x35d
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0xb6},
+ {value: 0x0008, lo: 0xb7, hi: 0xb7},
+ {value: 0x2009, lo: 0xb8, hi: 0xb8},
+ {value: 0x6e89, lo: 0xb9, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xbf},
+ // Block 0x6e, offset 0x363
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x3308, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x3308, lo: 0x8b, hi: 0x8b},
+ {value: 0x0008, lo: 0x8c, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa6},
+ {value: 0x3008, lo: 0xa7, hi: 0xa7},
+ {value: 0x0018, lo: 0xa8, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x6f, offset 0x372
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0208, lo: 0x80, hi: 0xb1},
+ {value: 0x0108, lo: 0xb2, hi: 0xb2},
+ {value: 0x0008, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0x70, offset 0x378
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xbf},
+ // Block 0x71, offset 0x37c
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x3008, lo: 0x80, hi: 0x83},
+ {value: 0x3b08, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8d},
+ {value: 0x0018, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xba},
+ {value: 0x0008, lo: 0xbb, hi: 0xbb},
+ {value: 0x0018, lo: 0xbc, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x72, offset 0x38b
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x73, offset 0x390
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x3308, lo: 0x87, hi: 0x91},
+ {value: 0x3008, lo: 0x92, hi: 0x92},
+ {value: 0x3808, lo: 0x93, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0x74, offset 0x398
+ {value: 0x0000, lo: 0x09},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x3008, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb9},
+ {value: 0x3008, lo: 0xba, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbf},
+ // Block 0x75, offset 0x3a2
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0x76, offset 0x3ad
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xa8},
+ {value: 0x3308, lo: 0xa9, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb0},
+ {value: 0x3308, lo: 0xb1, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x77, offset 0x3b5
+ {value: 0x0000, lo: 0x10},
+ {value: 0x0008, lo: 0x80, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8c},
+ {value: 0x3008, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xb9},
+ {value: 0x0008, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbc},
+ {value: 0x3008, lo: 0xbd, hi: 0xbd},
+ {value: 0x0008, lo: 0xbe, hi: 0xbf},
+ // Block 0x78, offset 0x3c6
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb0},
+ {value: 0x0008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb4},
+ {value: 0x0008, lo: 0xb5, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb8},
+ {value: 0x0008, lo: 0xb9, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbf},
+ // Block 0x79, offset 0x3cf
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x9a},
+ {value: 0x0008, lo: 0x9b, hi: 0x9d},
+ {value: 0x0018, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xaa},
+ {value: 0x3008, lo: 0xab, hi: 0xab},
+ {value: 0x3308, lo: 0xac, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb5},
+ {value: 0x3b08, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x7a, offset 0x3df
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x88},
+ {value: 0x0008, lo: 0x89, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x90},
+ {value: 0x0008, lo: 0x91, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x7b, offset 0x3ec
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x4465, lo: 0x9c, hi: 0x9c},
+ {value: 0x447d, lo: 0x9d, hi: 0x9d},
+ {value: 0x2971, lo: 0x9e, hi: 0x9e},
+ {value: 0xe06d, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa5},
+ {value: 0x0040, lo: 0xa6, hi: 0xaf},
+ {value: 0x4495, lo: 0xb0, hi: 0xbf},
+ // Block 0x7c, offset 0x3f6
+ {value: 0x0000, lo: 0x04},
+ {value: 0x44b5, lo: 0x80, hi: 0x8f},
+ {value: 0x44d5, lo: 0x90, hi: 0x9f},
+ {value: 0x44f5, lo: 0xa0, hi: 0xaf},
+ {value: 0x44d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x7d, offset 0x3fb
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0xa2},
+ {value: 0x3008, lo: 0xa3, hi: 0xa4},
+ {value: 0x3308, lo: 0xa5, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa7},
+ {value: 0x3308, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xaa},
+ {value: 0x0018, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3b08, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0x7e, offset 0x408
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0x7f, offset 0x40c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x80, offset 0x411
+ {value: 0x0020, lo: 0x01},
+ {value: 0x4515, lo: 0x80, hi: 0xbf},
+ // Block 0x81, offset 0x413
+ {value: 0x0020, lo: 0x03},
+ {value: 0x4d15, lo: 0x80, hi: 0x94},
+ {value: 0x4ad5, lo: 0x95, hi: 0x95},
+ {value: 0x4fb5, lo: 0x96, hi: 0xbf},
+ // Block 0x82, offset 0x417
+ {value: 0x0020, lo: 0x01},
+ {value: 0x54f5, lo: 0x80, hi: 0xbf},
+ // Block 0x83, offset 0x419
+ {value: 0x0020, lo: 0x03},
+ {value: 0x5cf5, lo: 0x80, hi: 0x84},
+ {value: 0x5655, lo: 0x85, hi: 0x85},
+ {value: 0x5d95, lo: 0x86, hi: 0xbf},
+ // Block 0x84, offset 0x41d
+ {value: 0x0020, lo: 0x08},
+ {value: 0x6b55, lo: 0x80, hi: 0x8f},
+ {value: 0x6d15, lo: 0x90, hi: 0x90},
+ {value: 0x6d55, lo: 0x91, hi: 0xab},
+ {value: 0x6ea1, lo: 0xac, hi: 0xac},
+ {value: 0x70b5, lo: 0xad, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x70d5, lo: 0xb0, hi: 0xbf},
+ // Block 0x85, offset 0x426
+ {value: 0x0020, lo: 0x05},
+ {value: 0x72d5, lo: 0x80, hi: 0xad},
+ {value: 0x6535, lo: 0xae, hi: 0xae},
+ {value: 0x7895, lo: 0xaf, hi: 0xb5},
+ {value: 0x6f55, lo: 0xb6, hi: 0xb6},
+ {value: 0x7975, lo: 0xb7, hi: 0xbf},
+ // Block 0x86, offset 0x42c
+ {value: 0x0028, lo: 0x03},
+ {value: 0x7c21, lo: 0x80, hi: 0x82},
+ {value: 0x7be1, lo: 0x83, hi: 0x83},
+ {value: 0x7c99, lo: 0x84, hi: 0xbf},
+ // Block 0x87, offset 0x430
+ {value: 0x0038, lo: 0x0f},
+ {value: 0x9db1, lo: 0x80, hi: 0x83},
+ {value: 0x9e59, lo: 0x84, hi: 0x85},
+ {value: 0x9e91, lo: 0x86, hi: 0x87},
+ {value: 0x9ec9, lo: 0x88, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x91},
+ {value: 0xa089, lo: 0x92, hi: 0x97},
+ {value: 0xa1a1, lo: 0x98, hi: 0x9c},
+ {value: 0xa281, lo: 0x9d, hi: 0xb3},
+ {value: 0x9d41, lo: 0xb4, hi: 0xb4},
+ {value: 0x9db1, lo: 0xb5, hi: 0xb5},
+ {value: 0xa789, lo: 0xb6, hi: 0xbb},
+ {value: 0xa869, lo: 0xbc, hi: 0xbc},
+ {value: 0xa7f9, lo: 0xbd, hi: 0xbd},
+ {value: 0xa8d9, lo: 0xbe, hi: 0xbf},
+ // Block 0x88, offset 0x440
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8c},
+ {value: 0x0008, lo: 0x8d, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbb},
+ {value: 0x0008, lo: 0xbc, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0x89, offset 0x44a
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0x8a, offset 0x44f
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x8b, offset 0x452
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x82},
+ {value: 0x0040, lo: 0x83, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0x8c, offset 0x458
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x8e},
+ {value: 0x0040, lo: 0x8f, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0x8d, offset 0x45f
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x0040, lo: 0xbe, hi: 0xbf},
+ // Block 0x8e, offset 0x464
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9c},
+ {value: 0x0040, lo: 0x9d, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x8f, offset 0x468
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x90},
+ {value: 0x0040, lo: 0x91, hi: 0x9f},
+ {value: 0x3308, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x90, offset 0x46e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x91, offset 0x473
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x81},
+ {value: 0x0008, lo: 0x82, hi: 0x89},
+ {value: 0x0018, lo: 0x8a, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbf},
+ // Block 0x92, offset 0x47c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0x93, offset 0x481
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0xbf},
+ // Block 0x94, offset 0x487
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x97},
+ {value: 0x8ad5, lo: 0x98, hi: 0x9f},
+ {value: 0x8aed, lo: 0xa0, hi: 0xa7},
+ {value: 0x0008, lo: 0xa8, hi: 0xbf},
+ // Block 0x95, offset 0x48e
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x8aed, lo: 0xb0, hi: 0xb7},
+ {value: 0x8ad5, lo: 0xb8, hi: 0xbf},
+ // Block 0x96, offset 0x495
+ {value: 0x0000, lo: 0x06},
+ {value: 0xe145, lo: 0x80, hi: 0x87},
+ {value: 0xe1c5, lo: 0x88, hi: 0x8f},
+ {value: 0xe145, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0xbb},
+ {value: 0x0040, lo: 0xbc, hi: 0xbf},
+ // Block 0x97, offset 0x49c
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0x98, offset 0x4a0
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xae},
+ {value: 0x0018, lo: 0xaf, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x99, offset 0x4a5
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0x9a, offset 0x4a8
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xbf},
+ // Block 0x9b, offset 0x4ad
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0808, lo: 0x80, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x87},
+ {value: 0x0808, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0808, lo: 0x8a, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb6},
+ {value: 0x0808, lo: 0xb7, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbb},
+ {value: 0x0808, lo: 0xbc, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbe},
+ {value: 0x0808, lo: 0xbf, hi: 0xbf},
+ // Block 0x9c, offset 0x4b9
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x96},
+ {value: 0x0818, lo: 0x97, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb6},
+ {value: 0x0818, lo: 0xb7, hi: 0xbf},
+ // Block 0x9d, offset 0x4bf
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xa6},
+ {value: 0x0818, lo: 0xa7, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0x9e, offset 0x4c4
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb3},
+ {value: 0x0808, lo: 0xb4, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xba},
+ {value: 0x0818, lo: 0xbb, hi: 0xbf},
+ // Block 0x9f, offset 0x4cb
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0818, lo: 0x96, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbe},
+ {value: 0x0818, lo: 0xbf, hi: 0xbf},
+ // Block 0xa0, offset 0x4d3
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0808, lo: 0x80, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbb},
+ {value: 0x0818, lo: 0xbc, hi: 0xbd},
+ {value: 0x0808, lo: 0xbe, hi: 0xbf},
+ // Block 0xa1, offset 0x4d8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0818, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x0818, lo: 0x92, hi: 0xbf},
+ // Block 0xa2, offset 0x4dc
+ {value: 0x0000, lo: 0x0f},
+ {value: 0x0808, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x84},
+ {value: 0x3308, lo: 0x85, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x8b},
+ {value: 0x3308, lo: 0x8c, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x94},
+ {value: 0x0808, lo: 0x95, hi: 0x97},
+ {value: 0x0040, lo: 0x98, hi: 0x98},
+ {value: 0x0808, lo: 0x99, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xba},
+ {value: 0x0040, lo: 0xbb, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xa3, offset 0x4ec
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0818, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0818, lo: 0x90, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xbc},
+ {value: 0x0818, lo: 0xbd, hi: 0xbf},
+ // Block 0xa4, offset 0x4f3
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0x9c},
+ {value: 0x0818, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xa5, offset 0x4f7
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb8},
+ {value: 0x0018, lo: 0xb9, hi: 0xbf},
+ // Block 0xa6, offset 0x4fb
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0808, lo: 0x80, hi: 0x95},
+ {value: 0x0040, lo: 0x96, hi: 0x97},
+ {value: 0x0818, lo: 0x98, hi: 0x9f},
+ {value: 0x0808, lo: 0xa0, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb7},
+ {value: 0x0818, lo: 0xb8, hi: 0xbf},
+ // Block 0xa7, offset 0x502
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0808, lo: 0x80, hi: 0xbf},
+ // Block 0xa8, offset 0x504
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0808, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0xbf},
+ // Block 0xa9, offset 0x507
+ {value: 0x0000, lo: 0x02},
+ {value: 0x03dd, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xaa, offset 0x50a
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0808, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xb9},
+ {value: 0x0818, lo: 0xba, hi: 0xbf},
+ // Block 0xab, offset 0x50e
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0818, lo: 0xa0, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xac, offset 0x512
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3008, lo: 0x80, hi: 0x80},
+ {value: 0x3308, lo: 0x81, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xad, offset 0x518
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x85},
+ {value: 0x3b08, lo: 0x86, hi: 0x86},
+ {value: 0x0018, lo: 0x87, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x91},
+ {value: 0x0018, lo: 0x92, hi: 0xa5},
+ {value: 0x0008, lo: 0xa6, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xae, offset 0x521
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb6},
+ {value: 0x3008, lo: 0xb7, hi: 0xb8},
+ {value: 0x3b08, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x0018, lo: 0xbb, hi: 0xbc},
+ {value: 0x0340, lo: 0xbd, hi: 0xbd},
+ {value: 0x0018, lo: 0xbe, hi: 0xbf},
+ // Block 0xaf, offset 0x52d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb0, offset 0x534
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xb2},
+ {value: 0x3b08, lo: 0xb3, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xbf},
+ // Block 0xb1, offset 0x53d
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb3},
+ {value: 0x0018, lo: 0xb4, hi: 0xb5},
+ {value: 0x0008, lo: 0xb6, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xb2, offset 0x545
+ {value: 0x0000, lo: 0x06},
+ {value: 0x3308, lo: 0x80, hi: 0x81},
+ {value: 0x3008, lo: 0x82, hi: 0x82},
+ {value: 0x0008, lo: 0x83, hi: 0xb2},
+ {value: 0x3008, lo: 0xb3, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xbe},
+ {value: 0x3008, lo: 0xbf, hi: 0xbf},
+ // Block 0xb3, offset 0x54c
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3808, lo: 0x80, hi: 0x80},
+ {value: 0x0008, lo: 0x81, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x89},
+ {value: 0x3308, lo: 0x8a, hi: 0x8c},
+ {value: 0x0018, lo: 0x8d, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0008, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x0018, lo: 0xa1, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xb4, offset 0x55a
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xae},
+ {value: 0x3308, lo: 0xaf, hi: 0xb1},
+ {value: 0x3008, lo: 0xb2, hi: 0xb3},
+ {value: 0x3308, lo: 0xb4, hi: 0xb4},
+ {value: 0x3808, lo: 0xb5, hi: 0xb5},
+ {value: 0x3308, lo: 0xb6, hi: 0xb7},
+ {value: 0x0018, lo: 0xb8, hi: 0xbd},
+ {value: 0x3308, lo: 0xbe, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xb5, offset 0x567
+ {value: 0x0000, lo: 0x0c},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x0008, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0x8d},
+ {value: 0x0040, lo: 0x8e, hi: 0x8e},
+ {value: 0x0008, lo: 0x8f, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9e},
+ {value: 0x0008, lo: 0x9f, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbf},
+ // Block 0xb6, offset 0x574
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x3308, lo: 0x9f, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa9},
+ {value: 0x3b08, lo: 0xaa, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0040, lo: 0xba, hi: 0xbf},
+ // Block 0xb7, offset 0x57d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x3008, lo: 0xb5, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbf},
+ // Block 0xb8, offset 0x581
+ {value: 0x0000, lo: 0x0d},
+ {value: 0x3008, lo: 0x80, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x84},
+ {value: 0x3008, lo: 0x85, hi: 0x85},
+ {value: 0x3308, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x8a},
+ {value: 0x0018, lo: 0x8b, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0x9b},
+ {value: 0x0040, lo: 0x9c, hi: 0x9c},
+ {value: 0x0018, lo: 0x9d, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xb9, offset 0x58f
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xb8},
+ {value: 0x3008, lo: 0xb9, hi: 0xb9},
+ {value: 0x3308, lo: 0xba, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbe},
+ {value: 0x3308, lo: 0xbf, hi: 0xbf},
+ // Block 0xba, offset 0x597
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x3008, lo: 0x81, hi: 0x81},
+ {value: 0x3b08, lo: 0x82, hi: 0x82},
+ {value: 0x3308, lo: 0x83, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x85},
+ {value: 0x0018, lo: 0x86, hi: 0x86},
+ {value: 0x0008, lo: 0x87, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xbb, offset 0x5a2
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xb7},
+ {value: 0x3008, lo: 0xb8, hi: 0xbb},
+ {value: 0x3308, lo: 0xbc, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbc, offset 0x5ab
+ {value: 0x0000, lo: 0x05},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x97},
+ {value: 0x0008, lo: 0x98, hi: 0x9b},
+ {value: 0x3308, lo: 0x9c, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0xbf},
+ // Block 0xbd, offset 0x5b1
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3008, lo: 0xb0, hi: 0xb2},
+ {value: 0x3308, lo: 0xb3, hi: 0xba},
+ {value: 0x3008, lo: 0xbb, hi: 0xbc},
+ {value: 0x3308, lo: 0xbd, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xbe, offset 0x5b9
+ {value: 0x0000, lo: 0x08},
+ {value: 0x3308, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x83},
+ {value: 0x0008, lo: 0x84, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xbf, offset 0x5c2
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x3308, lo: 0xab, hi: 0xab},
+ {value: 0x3008, lo: 0xac, hi: 0xac},
+ {value: 0x3308, lo: 0xad, hi: 0xad},
+ {value: 0x3008, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb5},
+ {value: 0x3808, lo: 0xb6, hi: 0xb6},
+ {value: 0x3308, lo: 0xb7, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbf},
+ // Block 0xc0, offset 0x5cc
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x89},
+ {value: 0x0040, lo: 0x8a, hi: 0xbf},
+ // Block 0xc1, offset 0x5cf
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9f},
+ {value: 0x3008, lo: 0xa0, hi: 0xa1},
+ {value: 0x3308, lo: 0xa2, hi: 0xa5},
+ {value: 0x3008, lo: 0xa6, hi: 0xa6},
+ {value: 0x3308, lo: 0xa7, hi: 0xaa},
+ {value: 0x3b08, lo: 0xab, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xb9},
+ {value: 0x0018, lo: 0xba, hi: 0xbf},
+ // Block 0xc2, offset 0x5db
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x049d, lo: 0xa0, hi: 0xbf},
+ // Block 0xc3, offset 0x5de
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbe},
+ {value: 0x0008, lo: 0xbf, hi: 0xbf},
+ // Block 0xc4, offset 0x5e3
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb8},
+ {value: 0x0040, lo: 0xb9, hi: 0xbf},
+ // Block 0xc5, offset 0x5e6
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x89},
+ {value: 0x0008, lo: 0x8a, hi: 0xae},
+ {value: 0x3008, lo: 0xaf, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xb7},
+ {value: 0x3308, lo: 0xb8, hi: 0xbd},
+ {value: 0x3008, lo: 0xbe, hi: 0xbe},
+ {value: 0x3b08, lo: 0xbf, hi: 0xbf},
+ // Block 0xc6, offset 0x5f0
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0008, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0018, lo: 0x9a, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0008, lo: 0xb2, hi: 0xbf},
+ // Block 0xc7, offset 0x5f9
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x91},
+ {value: 0x3308, lo: 0x92, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xa8},
+ {value: 0x3008, lo: 0xa9, hi: 0xa9},
+ {value: 0x3308, lo: 0xaa, hi: 0xb0},
+ {value: 0x3008, lo: 0xb1, hi: 0xb1},
+ {value: 0x3308, lo: 0xb2, hi: 0xb3},
+ {value: 0x3008, lo: 0xb4, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xc8, offset 0x605
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0xbf},
+ // Block 0xc9, offset 0x608
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xca, offset 0x60d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0040, lo: 0x84, hi: 0xbf},
+ // Block 0xcb, offset 0x610
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xbf},
+ // Block 0xcc, offset 0x613
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0xbf},
+ // Block 0xcd, offset 0x616
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0008, lo: 0x80, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa9},
+ {value: 0x0040, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xce, offset 0x61d
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb4},
+ {value: 0x0018, lo: 0xb5, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xcf, offset 0x624
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0xaf},
+ {value: 0x3308, lo: 0xb0, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xbf},
+ // Block 0xd0, offset 0x628
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x0008, lo: 0x80, hi: 0x83},
+ {value: 0x0018, lo: 0x84, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9a},
+ {value: 0x0018, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x0008, lo: 0xa3, hi: 0xb7},
+ {value: 0x0040, lo: 0xb8, hi: 0xbc},
+ {value: 0x0008, lo: 0xbd, hi: 0xbf},
+ // Block 0xd1, offset 0x633
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0xbf},
+ // Block 0xd2, offset 0x636
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0008, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x90},
+ {value: 0x3008, lo: 0x91, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xd3, offset 0x63c
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x8e},
+ {value: 0x3308, lo: 0x8f, hi: 0x92},
+ {value: 0x0008, lo: 0x93, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xd4, offset 0x641
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xa0},
+ {value: 0x0040, lo: 0xa1, hi: 0xbf},
+ // Block 0xd5, offset 0x645
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xd6, offset 0x648
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb2},
+ {value: 0x0040, lo: 0xb3, hi: 0xbf},
+ // Block 0xd7, offset 0x64b
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0xbf},
+ // Block 0xd8, offset 0x64e
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0008, lo: 0x80, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xaf},
+ {value: 0x0008, lo: 0xb0, hi: 0xbc},
+ {value: 0x0040, lo: 0xbd, hi: 0xbf},
+ // Block 0xd9, offset 0x653
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0008, lo: 0x80, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0x0008, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9b},
+ {value: 0x0018, lo: 0x9c, hi: 0x9c},
+ {value: 0x3308, lo: 0x9d, hi: 0x9e},
+ {value: 0x0018, lo: 0x9f, hi: 0x9f},
+ {value: 0x03c0, lo: 0xa0, hi: 0xa3},
+ {value: 0x0040, lo: 0xa4, hi: 0xbf},
+ // Block 0xda, offset 0x65d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xdb, offset 0x660
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xa6},
+ {value: 0x0040, lo: 0xa7, hi: 0xa8},
+ {value: 0x0018, lo: 0xa9, hi: 0xbf},
+ // Block 0xdc, offset 0x664
+ {value: 0x0000, lo: 0x0e},
+ {value: 0x0018, lo: 0x80, hi: 0x9d},
+ {value: 0xb5b9, lo: 0x9e, hi: 0x9e},
+ {value: 0xb601, lo: 0x9f, hi: 0x9f},
+ {value: 0xb649, lo: 0xa0, hi: 0xa0},
+ {value: 0xb6b1, lo: 0xa1, hi: 0xa1},
+ {value: 0xb719, lo: 0xa2, hi: 0xa2},
+ {value: 0xb781, lo: 0xa3, hi: 0xa3},
+ {value: 0xb7e9, lo: 0xa4, hi: 0xa4},
+ {value: 0x3018, lo: 0xa5, hi: 0xa6},
+ {value: 0x3318, lo: 0xa7, hi: 0xa9},
+ {value: 0x0018, lo: 0xaa, hi: 0xac},
+ {value: 0x3018, lo: 0xad, hi: 0xb2},
+ {value: 0x0340, lo: 0xb3, hi: 0xba},
+ {value: 0x3318, lo: 0xbb, hi: 0xbf},
+ // Block 0xdd, offset 0x673
+ {value: 0x0000, lo: 0x0b},
+ {value: 0x3318, lo: 0x80, hi: 0x82},
+ {value: 0x0018, lo: 0x83, hi: 0x84},
+ {value: 0x3318, lo: 0x85, hi: 0x8b},
+ {value: 0x0018, lo: 0x8c, hi: 0xa9},
+ {value: 0x3318, lo: 0xaa, hi: 0xad},
+ {value: 0x0018, lo: 0xae, hi: 0xba},
+ {value: 0xb851, lo: 0xbb, hi: 0xbb},
+ {value: 0xb899, lo: 0xbc, hi: 0xbc},
+ {value: 0xb8e1, lo: 0xbd, hi: 0xbd},
+ {value: 0xb949, lo: 0xbe, hi: 0xbe},
+ {value: 0xb9b1, lo: 0xbf, hi: 0xbf},
+ // Block 0xde, offset 0x67f
+ {value: 0x0000, lo: 0x03},
+ {value: 0xba19, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0xa8},
+ {value: 0x0040, lo: 0xa9, hi: 0xbf},
+ // Block 0xdf, offset 0x683
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x81},
+ {value: 0x3318, lo: 0x82, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x85},
+ {value: 0x0040, lo: 0x86, hi: 0xbf},
+ // Block 0xe0, offset 0x688
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe1, offset 0x68d
+ {value: 0x0000, lo: 0x03},
+ {value: 0x3308, lo: 0x80, hi: 0xb6},
+ {value: 0x0018, lo: 0xb7, hi: 0xba},
+ {value: 0x3308, lo: 0xbb, hi: 0xbf},
+ // Block 0xe2, offset 0x691
+ {value: 0x0000, lo: 0x04},
+ {value: 0x3308, lo: 0x80, hi: 0xac},
+ {value: 0x0018, lo: 0xad, hi: 0xb4},
+ {value: 0x3308, lo: 0xb5, hi: 0xb5},
+ {value: 0x0018, lo: 0xb6, hi: 0xbf},
+ // Block 0xe3, offset 0x696
+ {value: 0x0000, lo: 0x08},
+ {value: 0x0018, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x84},
+ {value: 0x0018, lo: 0x85, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xa0},
+ {value: 0x3308, lo: 0xa1, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+ // Block 0xe4, offset 0x69f
+ {value: 0x0000, lo: 0x0a},
+ {value: 0x3308, lo: 0x80, hi: 0x86},
+ {value: 0x0040, lo: 0x87, hi: 0x87},
+ {value: 0x3308, lo: 0x88, hi: 0x98},
+ {value: 0x0040, lo: 0x99, hi: 0x9a},
+ {value: 0x3308, lo: 0x9b, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xa2},
+ {value: 0x3308, lo: 0xa3, hi: 0xa4},
+ {value: 0x0040, lo: 0xa5, hi: 0xa5},
+ {value: 0x3308, lo: 0xa6, hi: 0xaa},
+ {value: 0x0040, lo: 0xab, hi: 0xbf},
+ // Block 0xe5, offset 0x6aa
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0808, lo: 0x80, hi: 0x84},
+ {value: 0x0040, lo: 0x85, hi: 0x86},
+ {value: 0x0818, lo: 0x87, hi: 0x8f},
+ {value: 0x3308, lo: 0x90, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xe6, offset 0x6b0
+ {value: 0x0000, lo: 0x07},
+ {value: 0x0a08, lo: 0x80, hi: 0x83},
+ {value: 0x3308, lo: 0x84, hi: 0x8a},
+ {value: 0x0040, lo: 0x8b, hi: 0x8f},
+ {value: 0x0808, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9d},
+ {value: 0x0818, lo: 0x9e, hi: 0x9f},
+ {value: 0x0040, lo: 0xa0, hi: 0xbf},
+ // Block 0xe7, offset 0x6b8
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0040, lo: 0x80, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb1},
+ {value: 0x0040, lo: 0xb2, hi: 0xbf},
+ // Block 0xe8, offset 0x6bc
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0xab},
+ {value: 0x0040, lo: 0xac, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xbf},
+ // Block 0xe9, offset 0x6c0
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x93},
+ {value: 0x0040, lo: 0x94, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xae},
+ {value: 0x0040, lo: 0xaf, hi: 0xb0},
+ {value: 0x0018, lo: 0xb1, hi: 0xbf},
+ // Block 0xea, offset 0x6c6
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0018, lo: 0x81, hi: 0x8f},
+ {value: 0x0040, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xb5},
+ {value: 0x0040, lo: 0xb6, hi: 0xbf},
+ // Block 0xeb, offset 0x6cc
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8f},
+ {value: 0xc1c1, lo: 0x90, hi: 0x90},
+ {value: 0x0018, lo: 0x91, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xbf},
+ // Block 0xec, offset 0x6d1
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0040, lo: 0x80, hi: 0xa5},
+ {value: 0x0018, lo: 0xa6, hi: 0xbf},
+ // Block 0xed, offset 0x6d4
+ {value: 0x0000, lo: 0x0d},
+ {value: 0xc7e9, lo: 0x80, hi: 0x80},
+ {value: 0xc839, lo: 0x81, hi: 0x81},
+ {value: 0xc889, lo: 0x82, hi: 0x82},
+ {value: 0xc8d9, lo: 0x83, hi: 0x83},
+ {value: 0xc929, lo: 0x84, hi: 0x84},
+ {value: 0xc979, lo: 0x85, hi: 0x85},
+ {value: 0xc9c9, lo: 0x86, hi: 0x86},
+ {value: 0xca19, lo: 0x87, hi: 0x87},
+ {value: 0xca69, lo: 0x88, hi: 0x88},
+ {value: 0x0040, lo: 0x89, hi: 0x8f},
+ {value: 0xcab9, lo: 0x90, hi: 0x90},
+ {value: 0xcad9, lo: 0x91, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xbf},
+ // Block 0xee, offset 0x6e2
+ {value: 0x0000, lo: 0x06},
+ {value: 0x0018, lo: 0x80, hi: 0x92},
+ {value: 0x0040, lo: 0x93, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xac},
+ {value: 0x0040, lo: 0xad, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb6},
+ {value: 0x0040, lo: 0xb7, hi: 0xbf},
+ // Block 0xef, offset 0x6e9
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0xb3},
+ {value: 0x0040, lo: 0xb4, hi: 0xbf},
+ // Block 0xf0, offset 0x6ec
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x94},
+ {value: 0x0040, lo: 0x95, hi: 0xbf},
+ // Block 0xf1, offset 0x6ef
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xbf},
+ // Block 0xf2, offset 0x6f3
+ {value: 0x0000, lo: 0x05},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x99},
+ {value: 0x0040, lo: 0x9a, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xbf},
+ // Block 0xf3, offset 0x6f9
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x87},
+ {value: 0x0040, lo: 0x88, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0xad},
+ {value: 0x0040, lo: 0xae, hi: 0xbf},
+ // Block 0xf4, offset 0x6fe
+ {value: 0x0000, lo: 0x09},
+ {value: 0x0040, lo: 0x80, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0x9f},
+ {value: 0x0018, lo: 0xa0, hi: 0xa7},
+ {value: 0x0040, lo: 0xa8, hi: 0xaf},
+ {value: 0x0018, lo: 0xb0, hi: 0xb0},
+ {value: 0x0040, lo: 0xb1, hi: 0xb2},
+ {value: 0x0018, lo: 0xb3, hi: 0xbe},
+ {value: 0x0040, lo: 0xbf, hi: 0xbf},
+ // Block 0xf5, offset 0x708
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0018, lo: 0x80, hi: 0x8b},
+ {value: 0x0040, lo: 0x8c, hi: 0x8f},
+ {value: 0x0018, lo: 0x90, hi: 0x9e},
+ {value: 0x0040, lo: 0x9f, hi: 0xbf},
+ // Block 0xf6, offset 0x70d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x91},
+ {value: 0x0040, lo: 0x92, hi: 0xbf},
+ // Block 0xf7, offset 0x710
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0018, lo: 0x80, hi: 0x80},
+ {value: 0x0040, lo: 0x81, hi: 0xbf},
+ // Block 0xf8, offset 0x713
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0x96},
+ {value: 0x0040, lo: 0x97, hi: 0xbf},
+ // Block 0xf9, offset 0x716
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xb4},
+ {value: 0x0040, lo: 0xb5, hi: 0xbf},
+ // Block 0xfa, offset 0x719
+ {value: 0x0000, lo: 0x03},
+ {value: 0x0008, lo: 0x80, hi: 0x9d},
+ {value: 0x0040, lo: 0x9e, hi: 0x9f},
+ {value: 0x0008, lo: 0xa0, hi: 0xbf},
+ // Block 0xfb, offset 0x71d
+ {value: 0x0000, lo: 0x02},
+ {value: 0x0008, lo: 0x80, hi: 0xa1},
+ {value: 0x0040, lo: 0xa2, hi: 0xbf},
+ // Block 0xfc, offset 0x720
+ {value: 0x0020, lo: 0x0f},
+ {value: 0xdeb9, lo: 0x80, hi: 0x89},
+ {value: 0x8dfd, lo: 0x8a, hi: 0x8a},
+ {value: 0xdff9, lo: 0x8b, hi: 0x9c},
+ {value: 0x8e1d, lo: 0x9d, hi: 0x9d},
+ {value: 0xe239, lo: 0x9e, hi: 0xa2},
+ {value: 0x8e3d, lo: 0xa3, hi: 0xa3},
+ {value: 0xe2d9, lo: 0xa4, hi: 0xab},
+ {value: 0x7ed5, lo: 0xac, hi: 0xac},
+ {value: 0xe3d9, lo: 0xad, hi: 0xaf},
+ {value: 0x8e5d, lo: 0xb0, hi: 0xb0},
+ {value: 0xe439, lo: 0xb1, hi: 0xb6},
+ {value: 0x8e7d, lo: 0xb7, hi: 0xb9},
+ {value: 0xe4f9, lo: 0xba, hi: 0xba},
+ {value: 0x8edd, lo: 0xbb, hi: 0xbb},
+ {value: 0xe519, lo: 0xbc, hi: 0xbf},
+ // Block 0xfd, offset 0x730
+ {value: 0x0020, lo: 0x10},
+ {value: 0x937d, lo: 0x80, hi: 0x80},
+ {value: 0xf099, lo: 0x81, hi: 0x86},
+ {value: 0x939d, lo: 0x87, hi: 0x8a},
+ {value: 0xd9f9, lo: 0x8b, hi: 0x8b},
+ {value: 0xf159, lo: 0x8c, hi: 0x96},
+ {value: 0x941d, lo: 0x97, hi: 0x97},
+ {value: 0xf2b9, lo: 0x98, hi: 0xa3},
+ {value: 0x943d, lo: 0xa4, hi: 0xa6},
+ {value: 0xf439, lo: 0xa7, hi: 0xaa},
+ {value: 0x949d, lo: 0xab, hi: 0xab},
+ {value: 0xf4b9, lo: 0xac, hi: 0xac},
+ {value: 0x94bd, lo: 0xad, hi: 0xad},
+ {value: 0xf4d9, lo: 0xae, hi: 0xaf},
+ {value: 0x94dd, lo: 0xb0, hi: 0xb1},
+ {value: 0xf519, lo: 0xb2, hi: 0xbe},
+ {value: 0x2040, lo: 0xbf, hi: 0xbf},
+ // Block 0xfe, offset 0x741
+ {value: 0x0000, lo: 0x04},
+ {value: 0x0040, lo: 0x80, hi: 0x80},
+ {value: 0x0340, lo: 0x81, hi: 0x81},
+ {value: 0x0040, lo: 0x82, hi: 0x9f},
+ {value: 0x0340, lo: 0xa0, hi: 0xbf},
+ // Block 0xff, offset 0x746
+ {value: 0x0000, lo: 0x01},
+ {value: 0x0340, lo: 0x80, hi: 0xbf},
+ // Block 0x100, offset 0x748
+ {value: 0x0000, lo: 0x01},
+ {value: 0x33c0, lo: 0x80, hi: 0xbf},
+ // Block 0x101, offset 0x74a
+ {value: 0x0000, lo: 0x02},
+ {value: 0x33c0, lo: 0x80, hi: 0xaf},
+ {value: 0x0040, lo: 0xb0, hi: 0xbf},
+}
+
+// Total table size 41662 bytes (40KiB); checksum: 355A58A4
diff --git a/src/vendor/golang.org/x/net/idna/trie.go b/src/vendor/golang.org/x/net/idna/trie.go
new file mode 100644
index 000000000..c4ef847e7
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/trie.go
@@ -0,0 +1,72 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package idna
+
+// appendMapping appends the mapping for the respective rune. isMapped must be
+// true. A mapping is a categorization of a rune as defined in UTS #46.
+func (c info) appendMapping(b []byte, s string) []byte {
+ index := int(c >> indexShift)
+ if c&xorBit == 0 {
+ s := mappings[index:]
+ return append(b, s[1:s[0]+1]...)
+ }
+ b = append(b, s...)
+ if c&inlineXOR == inlineXOR {
+ // TODO: support and handle two-byte inline masks
+ b[len(b)-1] ^= byte(index)
+ } else {
+ for p := len(b) - int(xorData[index]); p < len(b); p++ {
+ index++
+ b[p] ^= xorData[index]
+ }
+ }
+ return b
+}
+
+// Sparse block handling code.
+
+type valueRange struct {
+ value uint16 // header: value:stride
+ lo, hi byte // header: lo:n
+}
+
+type sparseBlocks struct {
+ values []valueRange
+ offset []uint16
+}
+
+var idnaSparse = sparseBlocks{
+ values: idnaSparseValues[:],
+ offset: idnaSparseOffset[:],
+}
+
+// Don't use newIdnaTrie to avoid unconditional linking in of the table.
+var trie = &idnaTrie{}
+
+// lookup determines the type of block n and looks up the value for b.
+// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
+// is a list of ranges with an accompanying value. Given a matching range r,
+// the value for b is by r.value + (b - r.lo) * stride.
+func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
+ offset := t.offset[n]
+ header := t.values[offset]
+ lo := offset + 1
+ hi := lo + uint16(header.lo)
+ for lo < hi {
+ m := lo + (hi-lo)/2
+ r := t.values[m]
+ if r.lo <= b && b <= r.hi {
+ return r.value + uint16(b-r.lo)*header.value
+ }
+ if b < r.lo {
+ hi = m
+ } else {
+ lo = m + 1
+ }
+ }
+ return 0
+}
diff --git a/src/vendor/golang.org/x/net/idna/trieval.go b/src/vendor/golang.org/x/net/idna/trieval.go
new file mode 100644
index 000000000..7a8cf889b
--- /dev/null
+++ b/src/vendor/golang.org/x/net/idna/trieval.go
@@ -0,0 +1,119 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package idna
+
+// This file contains definitions for interpreting the trie value of the idna
+// trie generated by "go run gen*.go". It is shared by both the generator
+// program and the resultant package. Sharing is achieved by the generator
+// copying gen_trieval.go to trieval.go and changing what's above this comment.
+
+// info holds information from the IDNA mapping table for a single rune. It is
+// the value returned by a trie lookup. In most cases, all information fits in
+// a 16-bit value. For mappings, this value may contain an index into a slice
+// with the mapped string. Such mappings can consist of the actual mapped value
+// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
+// input rune. This technique is used by the cases packages and reduces the
+// table size significantly.
+//
+// The per-rune values have the following format:
+//
+// if mapped {
+// if inlinedXOR {
+// 15..13 inline XOR marker
+// 12..11 unused
+// 10..3 inline XOR mask
+// } else {
+// 15..3 index into xor or mapping table
+// }
+// } else {
+// 15..14 unused
+// 13 mayNeedNorm
+// 12..11 attributes
+// 10..8 joining type
+// 7..3 category type
+// }
+// 2 use xor pattern
+// 1..0 mapped category
+//
+// See the definitions below for a more detailed description of the various
+// bits.
+type info uint16
+
+const (
+ catSmallMask = 0x3
+ catBigMask = 0xF8
+ indexShift = 3
+ xorBit = 0x4 // interpret the index as an xor pattern
+ inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
+
+ joinShift = 8
+ joinMask = 0x07
+
+ // Attributes
+ attributesMask = 0x1800
+ viramaModifier = 0x1800
+ modifier = 0x1000
+ rtl = 0x0800
+
+ mayNeedNorm = 0x2000
+)
+
+// A category corresponds to a category defined in the IDNA mapping table.
+type category uint16
+
+const (
+ unknown category = 0 // not currently defined in unicode.
+ mapped category = 1
+ disallowedSTD3Mapped category = 2
+ deviation category = 3
+)
+
+const (
+ valid category = 0x08
+ validNV8 category = 0x18
+ validXV8 category = 0x28
+ disallowed category = 0x40
+ disallowedSTD3Valid category = 0x80
+ ignored category = 0xC0
+)
+
+// join types and additional rune information
+const (
+ joiningL = (iota + 1)
+ joiningD
+ joiningT
+ joiningR
+
+ //the following types are derived during processing
+ joinZWJ
+ joinZWNJ
+ joinVirama
+ numJoinTypes
+)
+
+func (c info) isMapped() bool {
+ return c&0x3 != 0
+}
+
+func (c info) category() category {
+ small := c & catSmallMask
+ if small != 0 {
+ return category(small)
+ }
+ return category(c & catBigMask)
+}
+
+func (c info) joinType() info {
+ if c.isMapped() {
+ return 0
+ }
+ return (c >> joinShift) & joinMask
+}
+
+func (c info) isModifier() bool {
+ return c&(modifier|catSmallMask) == modifier
+}
+
+func (c info) isViramaModifier() bool {
+ return c&(attributesMask|catSmallMask) == viramaModifier
+}
diff --git a/src/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/src/vendor/golang.org/x/oauth2/CONTRIBUTING.md
index 46aa2b12d..dfbed62cf 100644
--- a/src/vendor/golang.org/x/oauth2/CONTRIBUTING.md
+++ b/src/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -4,16 +4,15 @@ Go is an open source project.
It is the work of hundreds of contributors. We appreciate your help!
-
## Filing issues
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
-1. What version of Go are you using (`go version`)?
-2. What operating system and processor architecture are you using?
-3. What did you do?
-4. What did you expect to see?
-5. What did you see instead?
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
@@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
-**We do not accept GitHub pull requests**
-(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
-
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.
-
diff --git a/src/vendor/golang.org/x/oauth2/README.md b/src/vendor/golang.org/x/oauth2/README.md
index eb8dcee17..0f443e693 100644
--- a/src/vendor/golang.org/x/oauth2/README.md
+++ b/src/vendor/golang.org/x/oauth2/README.md
@@ -19,54 +19,12 @@ See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+## Policy for new packages
-## App Engine
-
-In change 96e89be (March 2015), we removed the `oauth2.Context2` type in favor
-of the [`context.Context`](https://golang.org/x/net/context#Context) type from
-the `golang.org/x/net/context` package
-
-This means it's no longer possible to use the "Classic App Engine"
-`appengine.Context` type with the `oauth2` package. (You're using
-Classic App Engine if you import the package `"appengine"`.)
-
-To work around this, you may use the new `"google.golang.org/appengine"`
-package. This package has almost the same API as the `"appengine"` package,
-but it can be fetched with `go get` and used on "Managed VMs" and well as
-Classic App Engine.
-
-See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
-for information on updating your app.
-
-If you don't want to update your entire app to use the new App Engine packages,
-you may use both sets of packages in parallel, using only the new packages
-with the `oauth2` package.
-
-```go
-import (
- "golang.org/x/net/context"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- newappengine "google.golang.org/appengine"
- newurlfetch "google.golang.org/appengine/urlfetch"
-
- "appengine"
-)
-
-func handler(w http.ResponseWriter, r *http.Request) {
- var c appengine.Context = appengine.NewContext(r)
- c.Infof("Logging a message with the old package")
-
- var ctx context.Context = newappengine.NewContext(r)
- client := &http.Client{
- Transport: &oauth2.Transport{
- Source: google.AppEngineTokenSource(ctx, "scope"),
- Base: &newurlfetch.Transport{Context: ctx},
- },
- }
- client.Get("...")
-}
-```
+We no longer accept new provider-specific packages in this repo. For
+defining provider endpoints and provider-specific OAuth2 behavior, we
+encourage you to create packages elsewhere. We'll keep the existing
+packages for compatibility.
## Report Issues / Send Patches
diff --git a/src/vendor/golang.org/x/oauth2/client_appengine.go b/src/vendor/golang.org/x/oauth2/client_appengine.go
deleted file mode 100644
index 8962c49d1..000000000
--- a/src/vendor/golang.org/x/oauth2/client_appengine.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-// App Engine hooks.
-
-package oauth2
-
-import (
- "net/http"
-
- "golang.org/x/net/context"
- "golang.org/x/oauth2/internal"
- "google.golang.org/appengine/urlfetch"
-)
-
-func init() {
- internal.RegisterContextClientFunc(contextClientAppEngine)
-}
-
-func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
- return urlfetch.Client(ctx), nil
-}
diff --git a/src/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/src/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
index 53a96b6d6..7a0b9ed10 100644
--- a/src/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
+++ b/src/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go
@@ -14,12 +14,12 @@
package clientcredentials // import "golang.org/x/oauth2/clientcredentials"
import (
+ "context"
"fmt"
"net/http"
"net/url"
"strings"
- "golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
)
@@ -42,19 +42,27 @@ type Config struct {
// EndpointParams specifies additional parameters for requests to the token endpoint.
EndpointParams url.Values
+
+ // AuthStyle optionally specifies how the endpoint wants the
+ // client ID & client secret sent. The zero value means to
+ // auto-detect.
+ AuthStyle oauth2.AuthStyle
}
// Token uses client credentials to retrieve a token.
-// The HTTP client to use is derived from the context.
-// If nil, http.DefaultClient is used.
+//
+// The provided context optionally controls which HTTP client is used. See the oauth2.HTTPClient variable.
func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
return c.TokenSource(ctx).Token()
}
// Client returns an HTTP client using the provided token.
-// The token will auto-refresh as necessary. The underlying
-// HTTP transport will be obtained using the provided context.
-// The returned client and its Transport should not be modified.
+// The token will auto-refresh as necessary.
+//
+// The provided context optionally controls which HTTP client
+// is returned. See the oauth2.HTTPClient variable.
+//
+// The returned Client and its Transport should not be modified.
func (c *Config) Client(ctx context.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx))
}
@@ -82,16 +90,24 @@ type tokenSource struct {
func (c *tokenSource) Token() (*oauth2.Token, error) {
v := url.Values{
"grant_type": {"client_credentials"},
- "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")),
+ }
+ if len(c.conf.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.conf.Scopes, " "))
}
for k, p := range c.conf.EndpointParams {
- if _, ok := v[k]; ok {
+ // Allow grant_type to be overridden to allow interoperability with
+ // non-compliant implementations.
+ if _, ok := v[k]; ok && k != "grant_type" {
return nil, fmt.Errorf("oauth2: cannot overwrite parameter %q", k)
}
v[k] = p
}
- tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v)
+
+ tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle))
if err != nil {
+ if rErr, ok := err.(*internal.RetrieveError); ok {
+ return nil, (*oauth2.RetrieveError)(rErr)
+ }
return nil, err
}
t := &oauth2.Token{
diff --git a/src/vendor/golang.org/x/oauth2/go.mod b/src/vendor/golang.org/x/oauth2/go.mod
new file mode 100644
index 000000000..b34578155
--- /dev/null
+++ b/src/vendor/golang.org/x/oauth2/go.mod
@@ -0,0 +1,10 @@
+module golang.org/x/oauth2
+
+go 1.11
+
+require (
+ cloud.google.com/go v0.34.0
+ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e
+ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
+ google.golang.org/appengine v1.4.0
+)
diff --git a/src/vendor/golang.org/x/oauth2/go.sum b/src/vendor/golang.org/x/oauth2/go.sum
new file mode 100644
index 000000000..6f0079b0d
--- /dev/null
+++ b/src/vendor/golang.org/x/oauth2/go.sum
@@ -0,0 +1,12 @@
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
diff --git a/src/vendor/golang.org/x/oauth2/internal/client_appengine.go b/src/vendor/golang.org/x/oauth2/internal/client_appengine.go
new file mode 100644
index 000000000..743487188
--- /dev/null
+++ b/src/vendor/golang.org/x/oauth2/internal/client_appengine.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import "google.golang.org/appengine/urlfetch"
+
+func init() {
+ appengineClientHook = urlfetch.Client
+}
diff --git a/src/vendor/golang.org/x/oauth2/internal/oauth2.go b/src/vendor/golang.org/x/oauth2/internal/oauth2.go
index 6978192a9..c0ab196cf 100644
--- a/src/vendor/golang.org/x/oauth2/internal/oauth2.go
+++ b/src/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -5,14 +5,11 @@
package internal
import (
- "bufio"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
- "io"
- "strings"
)
// ParseKey converts the binary contents of a private key file
@@ -29,7 +26,7 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+ return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err)
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
@@ -38,38 +35,3 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
}
return parsed, nil
}
-
-func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
- result := map[string]map[string]string{
- "": {}, // root section
- }
- scanner := bufio.NewScanner(ini)
- currentSection := ""
- for scanner.Scan() {
- line := strings.TrimSpace(scanner.Text())
- if strings.HasPrefix(line, ";") {
- // comment.
- continue
- }
- if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
- currentSection = strings.TrimSpace(line[1 : len(line)-1])
- result[currentSection] = map[string]string{}
- continue
- }
- parts := strings.SplitN(line, "=", 2)
- if len(parts) == 2 && parts[0] != "" {
- result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
- }
- }
- if err := scanner.Err(); err != nil {
- return nil, fmt.Errorf("error scanning ini: %v", err)
- }
- return result, nil
-}
-
-func CondVal(v string) []string {
- if v == "" {
- return nil
- }
- return []string{v}
-}
diff --git a/src/vendor/golang.org/x/oauth2/internal/token.go b/src/vendor/golang.org/x/oauth2/internal/token.go
index cf959ea69..83f7847e4 100644
--- a/src/vendor/golang.org/x/oauth2/internal/token.go
+++ b/src/vendor/golang.org/x/oauth2/internal/token.go
@@ -5,22 +5,25 @@
package internal
import (
+ "context"
"encoding/json"
+ "errors"
"fmt"
"io"
"io/ioutil"
+ "math"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
+ "sync"
"time"
- "golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
-// Token represents the crendentials used to authorize
+// Token represents the credentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
@@ -76,6 +79,9 @@ func (e *tokenJSON) expiry() (t time.Time) {
type expirationTime int32
func (e *expirationTime) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 || string(b) == "null" {
+ return nil
+ }
var n json.Number
err := json.Unmarshal(b, &n)
if err != nil {
@@ -85,95 +91,78 @@ func (e *expirationTime) UnmarshalJSON(b []byte) error {
if err != nil {
return err
}
+ if i > math.MaxInt32 {
+ i = math.MaxInt32
+ }
*e = expirationTime(i)
return nil
}
-var brokenAuthHeaderProviders = []string{
- "https://accounts.google.com/",
- "https://api.codeswholesale.com/oauth/token",
- "https://api.dropbox.com/",
- "https://api.dropboxapi.com/",
- "https://api.instagram.com/",
- "https://api.netatmo.net/",
- "https://api.odnoklassniki.ru/",
- "https://api.pushbullet.com/",
- "https://api.soundcloud.com/",
- "https://api.twitch.tv/",
- "https://app.box.com/",
- "https://connect.stripe.com/",
- "https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214
- "https://login.microsoftonline.com/",
- "https://login.salesforce.com/",
- "https://login.windows.net",
- "https://oauth.sandbox.trainingpeaks.com/",
- "https://oauth.trainingpeaks.com/",
- "https://oauth.vk.com/",
- "https://openapi.baidu.com/",
- "https://slack.com/",
- "https://test-sandbox.auth.corp.google.com",
- "https://test.salesforce.com/",
- "https://user.gini.net/",
- "https://www.douban.com/",
- "https://www.googleapis.com/",
- "https://www.linkedin.com/",
- "https://www.strava.com/oauth/",
- "https://www.wunderlist.com/oauth/",
- "https://api.patreon.com/",
- "https://sandbox.codeswholesale.com/oauth/token",
- "https://api.sipgate.com/v1/authorization/oauth",
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
+
+// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type.
+type AuthStyle int
+
+const (
+ AuthStyleUnknown AuthStyle = 0
+ AuthStyleInParams AuthStyle = 1
+ AuthStyleInHeader AuthStyle = 2
+)
+
+// authStyleCache is the set of tokenURLs we've successfully used via
+// RetrieveToken and which style auth we ended up using.
+// It's called a cache, but it doesn't (yet?) shrink. It's expected that
+// the set of OAuth2 servers a program contacts over time is fixed and
+// small.
+var authStyleCache struct {
+ sync.Mutex
+ m map[string]AuthStyle // keyed by tokenURL
}
-// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
-var brokenAuthHeaderDomains = []string{
- ".force.com",
- ".myshopify.com",
- ".okta.com",
- ".oktapreview.com",
+// ResetAuthCache resets the global authentication style cache used
+// for AuthStyleUnknown token requests.
+func ResetAuthCache() {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ authStyleCache.m = nil
}
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+// lookupAuthStyle reports which auth style we last used with tokenURL
+// when calling RetrieveToken and whether we have ever done so.
+func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ style, ok = authStyleCache.m[tokenURL]
+ return
}
-// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
-// implements the OAuth2 spec correctly
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-// In summary:
-// - Reddit only accepts client secret in the Authorization header
-// - Dropbox accepts either it in URL param or Auth header, but not both.
-// - Google only accepts URL param (not spec compliant?), not Auth header
-// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
-func providerAuthHeaderWorks(tokenURL string) bool {
- for _, s := range brokenAuthHeaderProviders {
- if strings.HasPrefix(tokenURL, s) {
- // Some sites fail to implement the OAuth2 spec fully.
- return false
- }
+// setAuthStyle adds an entry to authStyleCache, documented above.
+func setAuthStyle(tokenURL string, v AuthStyle) {
+ authStyleCache.Lock()
+ defer authStyleCache.Unlock()
+ if authStyleCache.m == nil {
+ authStyleCache.m = make(map[string]AuthStyle)
}
-
- if u, err := url.Parse(tokenURL); err == nil {
- for _, s := range brokenAuthHeaderDomains {
- if strings.HasSuffix(u.Host, s) {
- return false
- }
- }
- }
-
- // Assume the provider implements the spec properly
- // otherwise. We can add more exceptions as they're
- // discovered. We will _not_ be adding configurable hooks
- // to this package to let users select server bugs.
- return true
+ authStyleCache.m[tokenURL] = v
}
-func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
- hc, err := ContextClient(ctx)
- if err != nil {
- return nil, err
- }
- bustedAuth := !providerAuthHeaderWorks(tokenURL)
- if bustedAuth {
+// newTokenRequest returns a new *http.Request to retrieve a new token
+// from tokenURL using the provided clientID, clientSecret, and POST
+// body parameters.
+//
+// inParams is whether the clientID & clientSecret should be encoded
+// as the POST body. An 'inParams' value of true means to send it in
+// the POST body (along with any values in v); false means to send it
+// in the Authorization header.
+func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) {
+ if authStyle == AuthStyleInParams {
+ v = cloneURLValues(v)
if clientID != "" {
v.Set("client_id", clientID)
}
@@ -186,20 +175,78 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if !bustedAuth {
+ if authStyle == AuthStyleInHeader {
req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret))
}
- r, err := ctxhttp.Do(ctx, hc, req)
+ return req, nil
+}
+
+func cloneURLValues(v url.Values) url.Values {
+ v2 := make(url.Values, len(v))
+ for k, vv := range v {
+ v2[k] = append([]string(nil), vv...)
+ }
+ return v2
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) {
+ needsAuthStyleProbe := authStyle == 0
+ if needsAuthStyleProbe {
+ if style, ok := lookupAuthStyle(tokenURL); ok {
+ authStyle = style
+ needsAuthStyleProbe = false
+ } else {
+ authStyle = AuthStyleInHeader // the first way we'll try
+ }
+ }
+ req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ if err != nil {
+ return nil, err
+ }
+ token, err := doTokenRoundTrip(ctx, req)
+ if err != nil && needsAuthStyleProbe {
+ // If we get an error, assume the server wants the
+ // clientID & clientSecret in a different form.
+ // See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+ // In summary:
+ // - Reddit only accepts client secret in the Authorization header
+ // - Dropbox accepts either it in URL param or Auth header, but not both.
+ // - Google only accepts URL param (not spec compliant?), not Auth header
+ // - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+ //
+ // We used to maintain a big table in this code of all the sites and which way
+ // they went, but maintaining it didn't scale & got annoying.
+ // So just try both ways.
+ authStyle = AuthStyleInParams // the second way we'll try
+ req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle)
+ token, err = doTokenRoundTrip(ctx, req)
+ }
+ if needsAuthStyleProbe && err == nil {
+ setAuthStyle(tokenURL, authStyle)
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token != nil && token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, err
+}
+
+func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
+ r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
if err != nil {
return nil, err
}
- defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ r.Body.Close()
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
- return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ return nil, &RetrieveError{
+ Response: r,
+ Body: body,
+ }
}
var token *Token
@@ -217,7 +264,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
Raw: vals,
}
e := vals.Get("expires_in")
- if e == "" {
+ if e == "" || e == "null" {
// TODO(jbd): Facebook's OAuth2 implementation is broken and
// returns expires_in field in expires. Remove the fallback to expires,
// when Facebook fixes their implementation.
@@ -241,10 +288,17 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
- // Don't overwrite `RefreshToken` with an empty value
- // if this was a token refreshing request.
- if token.RefreshToken == "" {
- token.RefreshToken = v.Get("refresh_token")
+ if token.AccessToken == "" {
+ return nil, errors.New("oauth2: server response missing access_token")
}
return token, nil
}
+
+type RetrieveError struct {
+ Response *http.Response
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/src/vendor/golang.org/x/oauth2/internal/transport.go b/src/vendor/golang.org/x/oauth2/internal/transport.go
index 783bd98c8..572074a63 100644
--- a/src/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/src/vendor/golang.org/x/oauth2/internal/transport.go
@@ -5,9 +5,8 @@
package internal
import (
+ "context"
"net/http"
-
- "golang.org/x/net/context"
)
// HTTPClient is the context key to use with golang.org/x/net/context's
@@ -19,50 +18,16 @@ var HTTPClient ContextKey
// because nobody else can create a ContextKey, being unexported.
type ContextKey struct{}
-// ContextClientFunc is a func which tries to return an *http.Client
-// given a Context value. If it returns an error, the search stops
-// with that error. If it returns (nil, nil), the search continues
-// down the list of registered funcs.
-type ContextClientFunc func(context.Context) (*http.Client, error)
+var appengineClientHook func(context.Context) *http.Client
-var contextClientFuncs []ContextClientFunc
-
-func RegisterContextClientFunc(fn ContextClientFunc) {
- contextClientFuncs = append(contextClientFuncs, fn)
-}
-
-func ContextClient(ctx context.Context) (*http.Client, error) {
+func ContextClient(ctx context.Context) *http.Client {
if ctx != nil {
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
- return hc, nil
+ return hc
}
}
- for _, fn := range contextClientFuncs {
- c, err := fn(ctx)
- if err != nil {
- return nil, err
- }
- if c != nil {
- return c, nil
- }
+ if appengineClientHook != nil {
+ return appengineClientHook(ctx)
}
- return http.DefaultClient, nil
-}
-
-func ContextTransport(ctx context.Context) http.RoundTripper {
- hc, err := ContextClient(ctx)
- // This is a rare error case (somebody using nil on App Engine).
- if err != nil {
- return ErrorTransport{err}
- }
- return hc.Transport
-}
-
-// ErrorTransport returns the specified error on RoundTrip.
-// This RoundTripper should be used in rare error cases where
-// error handling can be postponed to response handling time.
-type ErrorTransport struct{ Err error }
-
-func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
- return nil, t.Err
+ return http.DefaultClient
}
diff --git a/src/vendor/golang.org/x/oauth2/oauth2.go b/src/vendor/golang.org/x/oauth2/oauth2.go
index 4bafe873d..428283f0b 100644
--- a/src/vendor/golang.org/x/oauth2/oauth2.go
+++ b/src/vendor/golang.org/x/oauth2/oauth2.go
@@ -3,19 +3,20 @@
// license that can be found in the LICENSE file.
// Package oauth2 provides support for making
-// OAuth2 authorized and authenticated HTTP requests.
+// OAuth2 authorized and authenticated HTTP requests,
+// as specified in RFC 6749.
// It can additionally grant authorization with Bearer JWT.
package oauth2 // import "golang.org/x/oauth2"
import (
"bytes"
+ "context"
"errors"
"net/http"
"net/url"
"strings"
"sync"
- "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
)
@@ -25,17 +26,13 @@ import (
// Deprecated: Use context.Background() or context.TODO() instead.
var NoContext = context.TODO()
-// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
-// identified by the tokenURL prefix as an OAuth2 implementation
-// which doesn't support the HTTP Basic authentication
-// scheme to authenticate with the authorization server.
-// Once a server is registered, credentials (client_id and client_secret)
-// will be passed as query parameters rather than being present
-// in the Authorization header.
-// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
-func RegisterBrokenAuthHeaderProvider(tokenURL string) {
- internal.RegisterBrokenAuthHeaderProvider(tokenURL)
-}
+// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op.
+//
+// Deprecated: this function no longer does anything. Caller code that
+// wants to avoid potential extra HTTP requests made during
+// auto-probing of the provider's auth style should set
+// Endpoint.AuthStyle.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {}
// Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
@@ -70,13 +67,38 @@ type TokenSource interface {
Token() (*Token, error)
}
-// Endpoint contains the OAuth 2.0 provider's authorization and token
+// Endpoint represents an OAuth 2.0 provider's authorization and token
// endpoint URLs.
type Endpoint struct {
AuthURL string
TokenURL string
+
+ // AuthStyle optionally specifies how the endpoint wants the
+ // client ID & client secret sent. The zero value means to
+ // auto-detect.
+ AuthStyle AuthStyle
}
+// AuthStyle represents how requests for tokens are authenticated
+// to the server.
+type AuthStyle int
+
+const (
+ // AuthStyleAutoDetect means to auto-detect which authentication
+ // style the provider wants by trying both ways and caching
+ // the successful way for the future.
+ AuthStyleAutoDetect AuthStyle = 0
+
+ // AuthStyleInParams sends the "client_id" and "client_secret"
+ // in the POST body as application/x-www-form-urlencoded parameters.
+ AuthStyleInParams AuthStyle = 1
+
+ // AuthStyleInHeader sends the client_id and client_password
+ // using HTTP Basic Authorization. This is an optional style
+ // described in the OAuth2 RFC 6749 section 2.3.1.
+ AuthStyleInHeader AuthStyle = 2
+)
+
var (
// AccessTypeOnline and AccessTypeOffline are options passed
// to the Options.AuthCodeURL method. They modify the
@@ -117,21 +139,30 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
// that asks for permissions for the required scopes explicitly.
//
// State is a token to protect the user from CSRF attacks. You must
-// always provide a non-zero string and validate that it matches the
+// always provide a non-empty string and validate that it matches the
// the state query parameter on your redirect callback.
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
// as ApprovalForce.
+// It can also be used to pass the PKCE challenge.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
var buf bytes.Buffer
buf.WriteString(c.Endpoint.AuthURL)
v := url.Values{
"response_type": {"code"},
"client_id": {c.ClientID},
- "redirect_uri": internal.CondVal(c.RedirectURL),
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- "state": internal.CondVal(state),
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ if state != "" {
+ // TODO(light): Docs say never to omit state; don't allow empty.
+ v.Set("state", state)
}
for _, opt := range opts {
opt.setValue(v)
@@ -154,15 +185,17 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
// and when other authorization grant types are not available."
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
//
-// The HTTP client to use is derived from the context.
-// If nil, http.DefaultClient is used.
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
- return retrieveToken(ctx, c, url.Values{
+ v := url.Values{
"grant_type": {"password"},
"username": {username},
"password": {password},
- "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
- })
+ }
+ if len(c.Scopes) > 0 {
+ v.Set("scope", strings.Join(c.Scopes, " "))
+ }
+ return retrieveToken(ctx, c, v)
}
// Exchange converts an authorization code into a token.
@@ -170,17 +203,25 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor
// It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL).
//
-// The HTTP client to use is derived from the context.
-// If a client is not provided via the context, http.DefaultClient is used.
+// The provided context optionally controls which HTTP client is used. See the HTTPClient variable.
//
// The code will be in the *http.Request.FormValue("code"). Before
// calling Exchange, be sure to validate FormValue("state").
-func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
- return retrieveToken(ctx, c, url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- "redirect_uri": internal.CondVal(c.RedirectURL),
- })
+//
+// Opts may include the PKCE verifier code if previously used in AuthCodeURL.
+// See https://www.oauth.com/oauth2-servers/pkce/ for more info.
+func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) {
+ v := url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ }
+ if c.RedirectURL != "" {
+ v.Set("redirect_uri", c.RedirectURL)
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ return retrieveToken(ctx, c, v)
}
// Client returns an HTTP client using the provided token.
@@ -300,15 +341,11 @@ var HTTPClient internal.ContextKey
// packages.
func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil {
- c, err := internal.ContextClient(ctx)
- if err != nil {
- return &http.Client{Transport: internal.ErrorTransport{Err: err}}
- }
- return c
+ return internal.ContextClient(ctx)
}
return &http.Client{
Transport: &Transport{
- Base: internal.ContextTransport(ctx),
+ Base: internal.ContextClient(ctx).Transport,
Source: ReuseTokenSource(nil, src),
},
}
diff --git a/src/vendor/golang.org/x/oauth2/token.go b/src/vendor/golang.org/x/oauth2/token.go
index 7a3167f15..822720341 100644
--- a/src/vendor/golang.org/x/oauth2/token.go
+++ b/src/vendor/golang.org/x/oauth2/token.go
@@ -5,13 +5,14 @@
package oauth2
import (
+ "context"
+ "fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
- "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
)
@@ -20,7 +21,7 @@ import (
// expirations due to client-server time mismatches.
const expiryDelta = 10 * time.Second
-// Token represents the crendentials used to authorize
+// Token represents the credentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
@@ -117,13 +118,16 @@ func (t *Token) Extra(key string) interface{} {
return v
}
+// timeNow is time.Now but pulled out as a variable for tests.
+var timeNow = time.Now
+
// expired reports whether the token is expired.
// t must be non-nil.
func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
- return t.Expiry.Add(-expiryDelta).Before(time.Now())
+ return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
@@ -150,9 +154,25 @@ func tokenFromInternal(t *internal.Token) *Token {
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
// with an error..
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
- tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+ tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle))
if err != nil {
+ if rErr, ok := err.(*internal.RetrieveError); ok {
+ return nil, (*RetrieveError)(rErr)
+ }
return nil, err
}
return tokenFromInternal(tk), nil
}
+
+// RetrieveError is the error returned when the token endpoint returns a
+// non-2XX HTTP status code.
+type RetrieveError struct {
+ Response *http.Response
+ // Body is the body that was consumed by reading Response.Body.
+ // It may be truncated.
+ Body []byte
+}
+
+func (r *RetrieveError) Error() string {
+ return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body)
+}
diff --git a/src/vendor/golang.org/x/oauth2/transport.go b/src/vendor/golang.org/x/oauth2/transport.go
index 92ac7e253..aa0d34f1e 100644
--- a/src/vendor/golang.org/x/oauth2/transport.go
+++ b/src/vendor/golang.org/x/oauth2/transport.go
@@ -31,9 +31,17 @@ type Transport struct {
}
// RoundTrip authorizes and authenticates the request with an
-// access token. If no token exists or token is expired,
-// tries to refresh/fetch a new token.
+// access token from Transport's Source.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ reqBodyClosed := false
+ if req.Body != nil {
+ defer func() {
+ if !reqBodyClosed {
+ req.Body.Close()
+ }
+ }()
+ }
+
if t.Source == nil {
return nil, errors.New("oauth2: Transport's Source is nil")
}
@@ -46,6 +54,10 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
token.SetAuthHeader(req2)
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
+
+ // req.Body is assumed to have been closed by the base RoundTripper.
+ reqBodyClosed = true
+
if err != nil {
t.setModReq(req, nil)
return nil, err
diff --git a/src/vendor/golang.org/x/sys/unix/README.md b/src/vendor/golang.org/x/sys/unix/README.md
index bc6f6031f..eb2f78ae2 100644
--- a/src/vendor/golang.org/x/sys/unix/README.md
+++ b/src/vendor/golang.org/x/sys/unix/README.md
@@ -14,7 +14,7 @@ migrating the build system to use containers so the builds are reproducible.
This is being done on an OS-by-OS basis. Please update this documentation as
components of the build system change.
-### Old Build System (currently for `GOOS != "Linux" || GOARCH == "sparc64"`)
+### Old Build System (currently for `GOOS != "linux"`)
The old build system generates the Go files based on the C header files
present on your system. This means that files
@@ -32,9 +32,9 @@ To build the files for your current OS and architecture, make sure GOOS and
GOARCH are set correctly and run `mkall.sh`. This will generate the files for
your specific system. Running `mkall.sh -n` shows the commands that will be run.
-Requirements: bash, perl, go
+Requirements: bash, go
-### New Build System (currently for `GOOS == "Linux" && GOARCH != "sparc64"`)
+### New Build System (currently for `GOOS == "linux"`)
The new build system uses a Docker container to generate the go files directly
from source checkouts of the kernel and various system libraries. This means
@@ -52,14 +52,14 @@ system and have your GOOS and GOARCH set accordingly. Running `mkall.sh` will
then generate all of the files for all of the GOOS/GOARCH pairs in the new build
system. Running `mkall.sh -n` shows the commands that will be run.
-Requirements: bash, perl, go, docker
+Requirements: bash, go, docker
## Component files
This section describes the various files used in the code generation process.
It also contains instructions on how to modify these files to add a new
architecture/OS or to add additional syscalls, types, or constants. Note that
-if you are using the new build system, the scripts cannot be called normally.
+if you are using the new build system, the scripts/programs cannot be called normally.
They must be called from within the docker container.
### asm files
@@ -81,8 +81,8 @@ each GOOS/GOARCH pair.
### mksysnum
-Mksysnum is a script located at `${GOOS}/mksysnum.pl` (or `mksysnum_${GOOS}.pl`
-for the old system). This script takes in a list of header files containing the
+Mksysnum is a Go program located at `${GOOS}/mksysnum.go` (or `mksysnum_${GOOS}.go`
+for the old system). This program takes in a list of header files containing the
syscall number declarations and parses them to produce the corresponding list of
Go numeric constants. See `zsysnum_${GOOS}_${GOARCH}.go` for the generated
constants.
@@ -92,14 +92,14 @@ new installation of the target OS (or updating the source checkouts for the
new build system). However, depending on the OS, you make need to update the
parsing in mksysnum.
-### mksyscall.pl
+### mksyscall.go
The `syscall.go`, `syscall_${GOOS}.go`, `syscall_${GOOS}_${GOARCH}.go` are
hand-written Go files which implement system calls (for unix, the specific OS,
or the specific OS/Architecture pair respectively) that need special handling
and list `//sys` comments giving prototypes for ones that can be generated.
-The mksyscall.pl script takes the `//sys` and `//sysnb` comments and converts
+The mksyscall.go program takes the `//sys` and `//sysnb` comments and converts
them into syscalls. This requires the name of the prototype in the comment to
match a syscall number in the `zsysnum_${GOOS}_${GOARCH}.go` file. The function
prototype can be exported (capitalized) or not.
@@ -160,7 +160,7 @@ signal numbers, and constants. Generated by `mkerrors.sh` (see above).
### `zsyscall_${GOOS}_${GOARCH}.go`
A file containing all the generated syscalls for a specific GOOS and GOARCH.
-Generated by `mksyscall.pl` (see above).
+Generated by `mksyscall.go` (see above).
### `zsysnum_${GOOS}_${GOARCH}.go`
diff --git a/src/vendor/golang.org/x/sys/unix/affinity_linux.go b/src/vendor/golang.org/x/sys/unix/affinity_linux.go
new file mode 100644
index 000000000..72afe3338
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -0,0 +1,124 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// CPU affinity functions
+
+package unix
+
+import (
+ "unsafe"
+)
+
+const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
+
+// CPUSet represents a CPU affinity mask.
+type CPUSet [cpuSetSize]cpuMask
+
+func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
+ _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
+ if e != 0 {
+ return errnoErr(e)
+ }
+ return nil
+}
+
+// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedGetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set)
+}
+
+// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid.
+// If pid is 0 the calling thread is used.
+func SchedSetaffinity(pid int, set *CPUSet) error {
+ return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set)
+}
+
+// Zero clears the set s, so that it contains no CPUs.
+func (s *CPUSet) Zero() {
+ for i := range s {
+ s[i] = 0
+ }
+}
+
+func cpuBitsIndex(cpu int) int {
+ return cpu / _NCPUBITS
+}
+
+func cpuBitsMask(cpu int) cpuMask {
+ return cpuMask(1 << (uint(cpu) % _NCPUBITS))
+}
+
+// Set adds cpu to the set s.
+func (s *CPUSet) Set(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] |= cpuBitsMask(cpu)
+ }
+}
+
+// Clear removes cpu from the set s.
+func (s *CPUSet) Clear(cpu int) {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ s[i] &^= cpuBitsMask(cpu)
+ }
+}
+
+// IsSet reports whether cpu is in the set s.
+func (s *CPUSet) IsSet(cpu int) bool {
+ i := cpuBitsIndex(cpu)
+ if i < len(s) {
+ return s[i]&cpuBitsMask(cpu) != 0
+ }
+ return false
+}
+
+// Count returns the number of CPUs in the set s.
+func (s *CPUSet) Count() int {
+ c := 0
+ for _, b := range s {
+ c += onesCount64(uint64(b))
+ }
+ return c
+}
+
+// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64.
+// Once this package can require Go 1.9, we can delete this
+// and update the caller to use bits.OnesCount64.
+func onesCount64(x uint64) int {
+ const m0 = 0x5555555555555555 // 01010101 ...
+ const m1 = 0x3333333333333333 // 00110011 ...
+ const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ...
+ const m3 = 0x00ff00ff00ff00ff // etc.
+ const m4 = 0x0000ffff0000ffff
+
+ // Implementation: Parallel summing of adjacent bits.
+ // See "Hacker's Delight", Chap. 5: Counting Bits.
+ // The following pattern shows the general approach:
+ //
+ // x = x>>1&(m0&m) + x&(m0&m)
+ // x = x>>2&(m1&m) + x&(m1&m)
+ // x = x>>4&(m2&m) + x&(m2&m)
+ // x = x>>8&(m3&m) + x&(m3&m)
+ // x = x>>16&(m4&m) + x&(m4&m)
+ // x = x>>32&(m5&m) + x&(m5&m)
+ // return int(x)
+ //
+ // Masking (& operations) can be left away when there's no
+ // danger that a field's sum will carry over into the next
+ // field: Since the result cannot be > 64, 8 bits is enough
+ // and we can ignore the masks for the shifts by 8 and up.
+ // Per "Hacker's Delight", the first line can be simplified
+ // more, but it saves at best one instruction, so we leave
+ // it alone for clarity.
+ const m = 1<<64 - 1
+ x = x>>1&(m0&m) + x&(m0&m)
+ x = x>>2&(m1&m) + x&(m1&m)
+ x = (x>>4 + x) & (m2 & m)
+ x += x >> 8
+ x += x >> 16
+ x += x >> 32
+ return int(x) & (1<<7 - 1)
+}
diff --git a/src/vendor/golang.org/x/sys/unix/aliases.go b/src/vendor/golang.org/x/sys/unix/aliases.go
new file mode 100644
index 000000000..951fce4d0
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/aliases.go
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build go1.9
+
+package unix
+
+import "syscall"
+
+type Signal = syscall.Signal
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/src/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s b/src/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
new file mode 100644
index 000000000..06f84b855
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/asm_aix_ppc64.s
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
+//
+
+TEXT ·syscall6(SB),NOSPLIT,$0-88
+ JMP syscall·syscall6(SB)
+
+TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
+ JMP syscall·rawSyscall6(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/src/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
index d5ed6726c..603dd5728 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s
@@ -13,17 +13,17 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-64
+TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-88
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
-TEXT ·Syscall9(SB),NOSPLIT,$0-112
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
JMP syscall·Syscall9(SB)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-64
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-88
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s b/src/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s
new file mode 100644
index 000000000..d9318cbf0
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/asm_freebsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM64, FreeBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ JMP syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ JMP syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ JMP syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ JMP syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ JMP syscall·RawSyscall6(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_386.s b/src/vendor/golang.org/x/sys/unix/asm_linux_386.s
index 4db290932..448bebbb5 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_386.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_386.s
@@ -10,21 +10,51 @@
// System calls for 386, Linux
//
+// See ../runtime/sys_linux_386.s for the reason why we always use int 0x80
+// instead of the glibc-specific "CALL 0x10(GS)".
+#define INVOKE_SYSCALL INT $0x80
+
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-28
+TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ CALL runtime·entersyscall(SB)
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL a2+8(FP), CX
+ MOVL a3+12(FP), DX
+ MOVL $0, SI
+ MOVL $0, DI
+ INVOKE_SYSCALL
+ MOVL AX, r1+16(FP)
+ MOVL DX, r2+20(FP)
+ RET
+
TEXT ·socketcall(SB),NOSPLIT,$0-36
JMP syscall·socketcall(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/src/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
index 44e25c62f..c6468a958 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_amd64.s
@@ -13,17 +13,45 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ CALL runtime·entersyscall(SB)
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVQ a1+8(FP), DI
+ MOVQ a2+16(FP), SI
+ MOVQ a3+24(FP), DX
+ MOVQ $0, R10
+ MOVQ $0, R8
+ MOVQ $0, R9
+ MOVQ trap+0(FP), AX // syscall entry
+ SYSCALL
+ MOVQ AX, r1+32(FP)
+ MOVQ DX, r2+40(FP)
+ RET
+
TEXT ·gettimeofday(SB),NOSPLIT,$0-16
JMP syscall·gettimeofday(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/src/vendor/golang.org/x/sys/unix/asm_linux_arm.s
index cf0b57465..cf0f3575c 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_arm.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_arm.s
@@ -13,17 +13,44 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-28
+TEXT ·Syscall(SB),NOSPLIT,$0-28
B syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
B syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ BL runtime·entersyscall(SB)
+ MOVW trap+0(FP), R7
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
B syscall·RawSyscall6(SB)
-TEXT ·seek(SB),NOSPLIT,$0-32
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW a2+8(FP), R1
+ MOVW a3+12(FP), R2
+ SWI $0
+ MOVW R0, r1+16(FP)
+ MOVW $0, R0
+ MOVW R0, r2+20(FP)
+ RET
+
+TEXT ·seek(SB),NOSPLIT,$0-28
B syscall·seek(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/src/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
index 4be9bfede..afe6fdf6b 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_arm64.s
@@ -11,14 +11,42 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
B syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-80
B syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP) // r1
+ MOVD R1, r2+40(FP) // r2
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
B syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
B syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R0
+ MOVD a2+16(FP), R1
+ MOVD a3+24(FP), R2
+ MOVD $0, R3
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD trap+0(FP), R8 // syscall entry
+ SVC
+ MOVD R0, r1+32(FP)
+ MOVD R1, r2+40(FP)
+ RET
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s b/src/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
index 724e580c4..ab9d63831 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_mips64x.s
@@ -15,14 +15,42 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
+TEXT ·Syscall(SB),NOSPLIT,$0-56
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
JMP syscall·Syscall6(SB)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ JAL runtime·entersyscall(SB)
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVV a1+8(FP), R4
+ MOVV a2+16(FP), R5
+ MOVV a3+24(FP), R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVV R2, r1+32(FP)
+ MOVV R3, r2+40(FP)
+ RET
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s b/src/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
index 2ea425755..99e539904 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_mipsx.s
@@ -15,17 +15,40 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-28
+TEXT ·Syscall(SB),NOSPLIT,$0-28
JMP syscall·Syscall(SB)
-TEXT ·Syscall6(SB),NOSPLIT,$0-40
+TEXT ·Syscall6(SB),NOSPLIT,$0-40
JMP syscall·Syscall6(SB)
-TEXT ·Syscall9(SB),NOSPLIT,$0-52
+TEXT ·Syscall9(SB),NOSPLIT,$0-52
JMP syscall·Syscall9(SB)
-TEXT ·RawSyscall(SB),NOSPLIT,$0-28
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
+ JAL runtime·entersyscall(SB)
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW R0, R7
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP) // r1
+ MOVW R3, r2+20(FP) // r2
+ JAL runtime·exitsyscall(SB)
+ RET
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
+ MOVW a1+4(FP), R4
+ MOVW a2+8(FP), R5
+ MOVW a3+12(FP), R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ MOVW R2, r1+16(FP)
+ MOVW R3, r2+20(FP)
+ RET
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/src/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
index 8d231feb4..88f712557 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s
@@ -15,14 +15,30 @@
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
-TEXT ·Syscall(SB),NOSPLIT,$0-56
- BR syscall·Syscall(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
-TEXT ·Syscall6(SB),NOSPLIT,$0-80
- BR syscall·Syscall6(SB)
-
-TEXT ·RawSyscall(SB),NOSPLIT,$0-56
- BR syscall·RawSyscall(SB)
-
-TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
- BR syscall·RawSyscall6(SB)
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R3
+ MOVD a2+16(FP), R4
+ MOVD a3+24(FP), R5
+ MOVD R0, R6
+ MOVD R0, R7
+ MOVD R0, R8
+ MOVD trap+0(FP), R9 // syscall entry
+ SYSCALL R9
+ MOVD R3, r1+32(FP)
+ MOVD R4, r2+40(FP)
+ RET
diff --git a/src/vendor/golang.org/x/sys/unix/asm_linux_s390x.s b/src/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
index 11889859f..a5a863c6b 100644
--- a/src/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
+++ b/src/vendor/golang.org/x/sys/unix/asm_linux_s390x.s
@@ -21,8 +21,36 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56
TEXT ·Syscall6(SB),NOSPLIT,$0-80
BR syscall·Syscall6(SB)
+TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+
TEXT ·RawSyscall(SB),NOSPLIT,$0-56
BR syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
BR syscall·RawSyscall6(SB)
+
+TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ RET
diff --git a/src/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s b/src/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
new file mode 100644
index 000000000..6f98ba5a3
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/asm_netbsd_arm64.s
@@ -0,0 +1,29 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+//
+// System call support for ARM64, NetBSD
+//
+
+// Just jump to package syscall's implementation for all these functions.
+// The runtime may know about them.
+
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ B syscall·Syscall(SB)
+
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ B syscall·Syscall6(SB)
+
+TEXT ·Syscall9(SB),NOSPLIT,$0-104
+ B syscall·Syscall9(SB)
+
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ B syscall·RawSyscall(SB)
+
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ B syscall·RawSyscall6(SB)
diff --git a/src/vendor/golang.org/x/sys/unix/cap_freebsd.go b/src/vendor/golang.org/x/sys/unix/cap_freebsd.go
index 83b6bceab..df5204877 100644
--- a/src/vendor/golang.org/x/sys/unix/cap_freebsd.go
+++ b/src/vendor/golang.org/x/sys/unix/cap_freebsd.go
@@ -7,7 +7,7 @@
package unix
import (
- errorspkg "errors"
+ "errors"
"fmt"
)
@@ -60,26 +60,26 @@ func CapRightsSet(rights *CapRights, setrights []uint64) error {
n := caparsize(rights)
if n < capArSizeMin || n > capArSizeMax {
- return errorspkg.New("bad rights size")
+ return errors.New("bad rights size")
}
for _, right := range setrights {
if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errorspkg.New("bad right version")
+ return errors.New("bad right version")
}
i, err := rightToIndex(right)
if err != nil {
return err
}
if i >= n {
- return errorspkg.New("index overflow")
+ return errors.New("index overflow")
}
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch")
+ return errors.New("index mismatch")
}
rights.Rights[i] |= right
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch (after assign)")
+ return errors.New("index mismatch (after assign)")
}
}
@@ -95,26 +95,26 @@ func CapRightsClear(rights *CapRights, clearrights []uint64) error {
n := caparsize(rights)
if n < capArSizeMin || n > capArSizeMax {
- return errorspkg.New("bad rights size")
+ return errors.New("bad rights size")
}
for _, right := range clearrights {
if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return errorspkg.New("bad right version")
+ return errors.New("bad right version")
}
i, err := rightToIndex(right)
if err != nil {
return err
}
if i >= n {
- return errorspkg.New("index overflow")
+ return errors.New("index overflow")
}
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch")
+ return errors.New("index mismatch")
}
rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF)
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return errorspkg.New("index mismatch (after assign)")
+ return errors.New("index mismatch (after assign)")
}
}
@@ -130,22 +130,22 @@ func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) {
n := caparsize(rights)
if n < capArSizeMin || n > capArSizeMax {
- return false, errorspkg.New("bad rights size")
+ return false, errors.New("bad rights size")
}
for _, right := range setrights {
if caprver(right) != CAP_RIGHTS_VERSION_00 {
- return false, errorspkg.New("bad right version")
+ return false, errors.New("bad right version")
}
i, err := rightToIndex(right)
if err != nil {
return false, err
}
if i >= n {
- return false, errorspkg.New("index overflow")
+ return false, errors.New("index overflow")
}
if capidxbit(rights.Rights[i]) != capidxbit(right) {
- return false, errorspkg.New("index mismatch")
+ return false, errors.New("index mismatch")
}
if (rights.Rights[i] & right) != right {
return false, nil
diff --git a/src/vendor/golang.org/x/sys/unix/constants.go b/src/vendor/golang.org/x/sys/unix/constants.go
index a96f0ebc2..3a6ac648d 100644
--- a/src/vendor/golang.org/x/sys/unix/constants.go
+++ b/src/vendor/golang.org/x/sys/unix/constants.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
package unix
diff --git a/src/vendor/golang.org/x/sys/unix/dev_aix_ppc.go b/src/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
new file mode 100644
index 000000000..5e5fb4510
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/dev_aix_ppc.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+// +build ppc
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used by AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev >> 16) & 0xffff)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32(dev & 0xffff)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ return uint64(((major) << 16) | (minor))
+}
diff --git a/src/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go b/src/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
new file mode 100644
index 000000000..8b401244c
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/dev_aix_ppc64.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix
+// +build ppc64
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used AIX.
+
+package unix
+
+// Major returns the major component of a Linux device number.
+func Major(dev uint64) uint32 {
+ return uint32((dev & 0x3fffffff00000000) >> 32)
+}
+
+// Minor returns the minor component of a Linux device number.
+func Minor(dev uint64) uint32 {
+ return uint32((dev & 0x00000000ffffffff) >> 0)
+}
+
+// Mkdev returns a Linux device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+ var DEVNO64 uint64
+ DEVNO64 = 0x8000000000000000
+ return ((uint64(major) << 32) | (uint64(minor) & 0x00000000FFFFFFFF) | DEVNO64)
+}
diff --git a/src/vendor/golang.org/x/sys/unix/dirent.go b/src/vendor/golang.org/x/sys/unix/dirent.go
index bd475812b..4407c505a 100644
--- a/src/vendor/golang.org/x/sys/unix/dirent.go
+++ b/src/vendor/golang.org/x/sys/unix/dirent.go
@@ -2,101 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package unix
-import "unsafe"
-
-// readInt returns the size-bytes unsigned integer in native byte order at offset off.
-func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
- if len(b) < int(off+size) {
- return 0, false
- }
- if isBigEndian {
- return readIntBE(b[off:], size), true
- }
- return readIntLE(b[off:], size), true
-}
-
-func readIntBE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[1]) | uint64(b[0])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
- uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
-
-func readIntLE(b []byte, size uintptr) uint64 {
- switch size {
- case 1:
- return uint64(b[0])
- case 2:
- _ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8
- case 4:
- _ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
- case 8:
- _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
- return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
- uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
- default:
- panic("syscall: readInt with unsupported size")
- }
-}
+import "syscall"
// ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number of
// bytes consumed from buf, the number of entries added
// to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
- origlen := len(buf)
- count = 0
- for max != 0 && len(buf) > 0 {
- reclen, ok := direntReclen(buf)
- if !ok || reclen > uint64(len(buf)) {
- return origlen, count, names
- }
- rec := buf[:reclen]
- buf = buf[reclen:]
- ino, ok := direntIno(rec)
- if !ok {
- break
- }
- if ino == 0 { // File absent in directory.
- continue
- }
- const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
- namlen, ok := direntNamlen(rec)
- if !ok || namoff+namlen > uint64(len(rec)) {
- break
- }
- name := rec[namoff : namoff+namlen]
- for i, c := range name {
- if c == 0 {
- name = name[:i]
- break
- }
- }
- // Check for useless names before allocating a string.
- if string(name) == "." || string(name) == ".." {
- continue
- }
- max--
- count++
- names = append(names, string(name))
- }
- return origlen - len(buf), count, names
+ return syscall.ParseDirent(buf, max, names)
}
diff --git a/src/vendor/golang.org/x/sys/unix/env_unix.go b/src/vendor/golang.org/x/sys/unix/env_unix.go
index 2e06b33f2..84178b0a1 100644
--- a/src/vendor/golang.org/x/sys/unix/env_unix.go
+++ b/src/vendor/golang.org/x/sys/unix/env_unix.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
// Unix environment variables.
@@ -25,3 +25,7 @@ func Clearenv() {
func Environ() []string {
return syscall.Environ()
}
+
+func Unsetenv(key string) error {
+ return syscall.Unsetenv(key)
+}
diff --git a/src/vendor/golang.org/x/sys/unix/env_unset.go b/src/vendor/golang.org/x/sys/unix/env_unset.go
deleted file mode 100644
index c44fdc4af..000000000
--- a/src/vendor/golang.org/x/sys/unix/env_unset.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build go1.4
-
-package unix
-
-import "syscall"
-
-func Unsetenv(key string) error {
- // This was added in Go 1.4.
- return syscall.Unsetenv(key)
-}
diff --git a/src/vendor/golang.org/x/sys/unix/flock.go b/src/vendor/golang.org/x/sys/unix/fcntl.go
similarity index 65%
rename from src/vendor/golang.org/x/sys/unix/flock.go
rename to src/vendor/golang.org/x/sys/unix/fcntl.go
index 2994ce75f..39c03f1ef 100644
--- a/src/vendor/golang.org/x/sys/unix/flock.go
+++ b/src/vendor/golang.org/x/sys/unix/fcntl.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build darwin dragonfly freebsd linux netbsd openbsd
+// +build dragonfly freebsd linux netbsd openbsd
package unix
@@ -12,6 +12,16 @@ import "unsafe"
// systems by flock_linux_32bit.go to be SYS_FCNTL64.
var fcntl64Syscall uintptr = SYS_FCNTL
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg))
+ var err error
+ if errno != 0 {
+ err = errno
+ }
+ return int(valptr), err
+}
+
// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))
diff --git a/src/vendor/golang.org/x/sys/unix/fcntl_darwin.go b/src/vendor/golang.org/x/sys/unix/fcntl_darwin.go
new file mode 100644
index 000000000..5868a4a47
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/fcntl_darwin.go
@@ -0,0 +1,18 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+import "unsafe"
+
+// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
+func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
+ return fcntl(int(fd), cmd, arg)
+}
+
+// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
+func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
+ _, err := fcntl(int(fd), cmd, int(uintptr(unsafe.Pointer(lk))))
+ return err
+}
diff --git a/src/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/src/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
similarity index 100%
rename from src/vendor/golang.org/x/sys/unix/flock_linux_32bit.go
rename to src/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go
diff --git a/src/vendor/golang.org/x/sys/unix/gccgo.go b/src/vendor/golang.org/x/sys/unix/gccgo.go
index 40bed3fa8..cd6f5a613 100644
--- a/src/vendor/golang.org/x/sys/unix/gccgo.go
+++ b/src/vendor/golang.org/x/sys/unix/gccgo.go
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// +build gccgo
+// +build !aix
package unix
@@ -11,9 +12,19 @@ import "syscall"
// We can't use the gc-syntax .s files for gccgo. On the plus side
// much of the functionality can be written directly in Go.
+//extern gccgoRealSyscallNoError
+func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
+
//extern gccgoRealSyscall
func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
+func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ syscall.Entersyscall()
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ syscall.Exitsyscall()
+ return r, 0
+}
+
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
syscall.Entersyscall()
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
@@ -35,6 +46,11 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
return r, 0, syscall.Errno(errno)
}
+func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
+ r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
+ return r, 0
+}
+
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
return r, 0, syscall.Errno(errno)
diff --git a/src/vendor/golang.org/x/sys/unix/gccgo_c.c b/src/vendor/golang.org/x/sys/unix/gccgo_c.c
index 99a774f2b..c44730c5e 100644
--- a/src/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/src/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -3,6 +3,7 @@
// license that can be found in the LICENSE file.
// +build gccgo
+// +build !aix
#include
#include
@@ -31,11 +32,8 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp
return r;
}
-// Define the use function in C so that it is not inlined.
-
-extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));
-
-void
-use(void *p __attribute__ ((unused)))
+uintptr_t
+gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
+ return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
}
diff --git a/src/vendor/golang.org/x/sys/unix/ioctl.go b/src/vendor/golang.org/x/sys/unix/ioctl.go
new file mode 100644
index 000000000..f121a8d64
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/ioctl.go
@@ -0,0 +1,30 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+import "runtime"
+
+// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
+//
+// To change fd's window size, the req argument should be TIOCSWINSZ.
+func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
+ // TODO: if we get the chance, remove the req parameter and
+ // hardcode TIOCSWINSZ.
+ err := ioctlSetWinsize(fd, req, value)
+ runtime.KeepAlive(value)
+ return err
+}
+
+// IoctlSetTermios performs an ioctl on fd with a *Termios.
+//
+// The req value will usually be TCSETA or TIOCSETA.
+func IoctlSetTermios(fd int, req uint, value *Termios) error {
+ // TODO: if we get the chance, remove the req parameter.
+ err := ioctlSetTermios(fd, req, value)
+ runtime.KeepAlive(value)
+ return err
+}
diff --git a/src/vendor/golang.org/x/sys/unix/mkall.sh b/src/vendor/golang.org/x/sys/unix/mkall.sh
old mode 100755
new mode 100644
index 1715122bd..75152f99b
--- a/src/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/src/vendor/golang.org/x/sys/unix/mkall.sh
@@ -10,13 +10,14 @@
GOOSARCH="${GOOS}_${GOARCH}"
# defaults
-mksyscall="./mksyscall.pl"
+mksyscall="go run mksyscall.go"
mkerrors="./mkerrors.sh"
zerrors="zerrors_$GOOSARCH.go"
mksysctl=""
zsysctl="zsysctl_$GOOSARCH.go"
mksysnum=
mktypes=
+mkasm=
run="sh"
cmd=""
@@ -45,8 +46,8 @@ case "$#" in
exit 2
esac
-if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
- # Use then new build system
+if [[ "$GOOS" = "linux" ]]; then
+ # Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS
@@ -59,104 +60,117 @@ _* | *_ | _)
echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
exit 1
;;
+aix_ppc)
+ mkerrors="$mkerrors -maix32"
+ mksyscall="go run mksyscall_aix_ppc.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+aix_ppc64)
+ mkerrors="$mkerrors -maix64"
+ mksyscall="go run mksyscall_aix_ppc64.go -aix"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
darwin_386)
mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
;;
darwin_amd64)
mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk macosx)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
;;
darwin_arm)
mkerrors="$mkerrors"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_darwin.pl $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
+ mksysnum="go run mksysnum.go $(xcrun --show-sdk-path --sdk iphoneos)/usr/include/sys/syscall.h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ mkasm="go run mkasm_darwin.go"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
- mksyscall="./mksyscall.pl -dragonfly"
- mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mksyscall="go run mksyscall.go -dragonfly"
+ mksysnum="go run mksysnum.go 'https://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_386)
mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32"
- mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mksyscall="go run mksyscall.go -l32"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_amd64)
mkerrors="$mkerrors -m64"
- mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
freebsd_arm)
mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -arm"
- mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mksyscall="go run mksyscall.go -l32 -arm"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
-linux_sparc64)
- GOOSARCH_in=syscall_linux_sparc64.go
- unistd_h=/usr/include/sparc64-linux-gnu/asm/unistd.h
+freebsd_arm64)
mkerrors="$mkerrors -m64"
- mksysnum="./mksysnum_linux.pl $unistd_h"
+ mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_386)
mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32 -netbsd"
- mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mksyscall="go run mksyscall.go -l32 -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_amd64)
mkerrors="$mkerrors -m64"
- mksyscall="./mksyscall.pl -netbsd"
- mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mksyscall="go run mksyscall.go -netbsd"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
netbsd_arm)
mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -netbsd -arm"
- mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mksyscall="go run mksyscall.go -l32 -netbsd -arm"
+ mksysnum="go run mksysnum.go 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_386)
mkerrors="$mkerrors -m32"
- mksyscall="./mksyscall.pl -l32 -openbsd"
+ mksyscall="go run mksyscall.go -l32 -openbsd"
mksysctl="./mksysctl_openbsd.pl"
- mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkerrors="$mkerrors -m64"
- mksyscall="./mksyscall.pl -openbsd"
+ mksyscall="go run mksyscall.go -openbsd"
mksysctl="./mksysctl_openbsd.pl"
- mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
mkerrors="$mkerrors"
- mksyscall="./mksyscall.pl -l32 -openbsd -arm"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm"
mksysctl="./mksysctl_openbsd.pl"
- mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
solaris_amd64)
- mksyscall="./mksyscall_solaris.pl"
+ mksyscall="go run mksyscall_solaris.go"
mkerrors="$mkerrors -m64"
mksysnum=
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
@@ -177,12 +191,24 @@ esac
syscall_goos="syscall_bsd.go $syscall_goos"
;;
esac
- if [ -n "$mksyscall" ]; then echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
- ;;
+ if [ -n "$mksyscall" ]; then
+ if [ "$GOOSARCH" == "aix_ppc64" ]; then
+ # aix/ppc64 script generates files instead of writing to stdin.
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
+ elif [ "$GOOS" == "darwin" ]; then
+ # pre-1.12, direct syscalls
+ echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go";
+ # 1.12 and later, syscalls via libSystem
+ echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ else
+ echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
+ fi
+ fi
esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then
echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go";
+ if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
fi
) | $run
diff --git a/src/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/src/vendor/golang.org/x/sys/unix/mkasm_darwin.go
new file mode 100644
index 000000000..4548b993d
--- /dev/null
+++ b/src/vendor/golang.org/x/sys/unix/mkasm_darwin.go
@@ -0,0 +1,61 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go.
+//This program must be run after mksyscall.go.
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+)
+
+func main() {
+ in1, err := ioutil.ReadFile("syscall_darwin.go")
+ if err != nil {
+ log.Fatalf("can't open syscall_darwin.go: %s", err)
+ }
+ arch := os.Args[1]
+ in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch))
+ if err != nil {
+ log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err)
+ }
+ in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch))
+ if err != nil {
+ log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err)
+ }
+ in := string(in1) + string(in2) + string(in3)
+
+ trampolines := map[string]bool{}
+
+ var out bytes.Buffer
+
+ fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " "))
+ fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n")
+ fmt.Fprintf(&out, "\n")
+ fmt.Fprintf(&out, "// +build go1.12\n")
+ fmt.Fprintf(&out, "\n")
+ fmt.Fprintf(&out, "#include \"textflag.h\"\n")
+ for _, line := range strings.Split(in, "\n") {
+ if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") {
+ continue
+ }
+ fn := line[5 : len(line)-13]
+ if !trampolines[fn] {
+ trampolines[fn] = true
+ fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn)
+ fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn)
+ }
+ }
+ err = ioutil.WriteFile(fmt.Sprintf("zsyscall_darwin_%s.s", arch), out.Bytes(), 0644)
+ if err != nil {
+ log.Fatalf("can't write zsyscall_darwin_%s.s: %s", arch, err)
+ }
+}
diff --git a/src/vendor/golang.org/x/sys/unix/mkerrors.sh b/src/vendor/golang.org/x/sys/unix/mkerrors.sh
old mode 100755
new mode 100644
index 2a44da57d..6a23484e5
--- a/src/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/src/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -17,15 +17,17 @@ if test -z "$GOARCH" -o -z "$GOOS"; then
fi
# Check that we are using the new build system if we should
-if [[ "$GOOS" = "linux" ]] && [[ "$GOARCH" != "sparc64" ]]; then
- if [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
- echo 1>&2 "In the new build system, mkerrors should not be called directly."
- echo 1>&2 "See README.md"
- exit 1
- fi
+if [[ "$GOOS" = "linux" ]] && [[ "$GOLANG_SYS_BUILD" != "docker" ]]; then
+ echo 1>&2 "In the Docker based build system, mkerrors should not be called directly."
+ echo 1>&2 "See README.md"
+ exit 1
fi
-CC=${CC:-cc}
+if [[ "$GOOS" = "aix" ]]; then
+ CC=${CC:-gcc}
+else
+ CC=${CC:-cc}
+fi
if [[ "$GOOS" = "solaris" ]]; then
# Assumes GNU versions of utilities in PATH.
@@ -34,6 +36,21 @@ fi
uname=$(uname)
+includes_AIX='
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define AF_LOCAL AF_UNIX
+'
+
includes_Darwin='
#define _DARWIN_C_SOURCE
#define KERNEL
@@ -50,6 +67,7 @@ includes_Darwin='
#include
#include
#include
+#include
#include
#include
#include
@@ -64,8 +82,10 @@ includes_DragonFly='
#include
#include
#include
+#include
#include
#include
+#include
#include
#include
#include
@@ -79,12 +99,13 @@ includes_DragonFly='
'
includes_FreeBSD='
-#include
+#include
#include