From f859348ac6230bda564f608e934f67359233a4b3 Mon Sep 17 00:00:00 2001 From: Tan Jiang Date: Mon, 1 Feb 2016 19:59:10 +0800 Subject: [PATCH] Initial commit --- .gitignore | 6 + Deploy/config/nginx/nginx.conf | 68 + Deploy/config/registry/root.crt | 15 + Deploy/db/Dockerfile | 8 + Deploy/db/docker-entrypoint.sh | 43 + Deploy/db/registry.sql | 104 + Deploy/docker-compose.yml | 61 + Deploy/log/Dockerfile | 28 + Deploy/log/logrotate_docker.conf | 7 + Deploy/log/rsyslog_docker.conf | 7 + Deploy/prepare | 72 + Deploy/templates/registry/config.yml | 33 + Deploy/templates/ui/app.conf | 16 + Deploy/templates/ui/env | 10 + Dockerfile | 30 + Dockerfile.offline | 25 + Godeps/Godeps.json | 71 + Godeps/Readme | 5 + Godeps/_workspace/.gitignore | 2 + .../src/github.com/Sirupsen/logrus/.gitignore | 1 + .../github.com/Sirupsen/logrus/.travis.yml | 7 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 54 + .../src/github.com/Sirupsen/logrus/LICENSE | 21 + .../src/github.com/Sirupsen/logrus/README.md | 357 ++++ .../src/github.com/Sirupsen/logrus/doc.go | 26 + .../src/github.com/Sirupsen/logrus/entry.go | 264 +++ .../github.com/Sirupsen/logrus/entry_test.go | 77 + .../Sirupsen/logrus/examples/basic/basic.go | 50 + .../Sirupsen/logrus/examples/hook/hook.go | 30 + .../github.com/Sirupsen/logrus/exported.go | 193 ++ .../github.com/Sirupsen/logrus/formatter.go | 48 + .../Sirupsen/logrus/formatter_bench_test.go | 98 + .../logrus/formatters/logstash/logstash.go | 56 + .../formatters/logstash/logstash_test.go | 52 + .../github.com/Sirupsen/logrus/hook_test.go | 122 ++ .../src/github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/hooks/bugsnag/bugsnag.go | 68 + .../logrus/hooks/bugsnag/bugsnag_test.go | 64 + .../Sirupsen/logrus/hooks/syslog/README.md | 39 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 + .../logrus/hooks/syslog/syslog_test.go | 26 + .../Sirupsen/logrus/json_formatter.go | 41 + .../Sirupsen/logrus/json_formatter_test.go | 120 ++ .../src/github.com/Sirupsen/logrus/logger.go | 206 ++ .../src/github.com/Sirupsen/logrus/logrus.go | 98 + .../github.com/Sirupsen/logrus/logrus_test.go | 301 +++ .../Sirupsen/logrus/terminal_bsd.go | 9 + .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 161 ++ .../Sirupsen/logrus/text_formatter_test.go | 61 + .../src/github.com/Sirupsen/logrus/writer.go | 31 + .../github.com/Unknwon/goconfig/.gitignore | 3 + .../src/github.com/Unknwon/goconfig/LICENSE | 191 ++ .../src/github.com/Unknwon/goconfig/README.md | 67 + .../github.com/Unknwon/goconfig/README_ZH.md | 64 + .../src/github.com/Unknwon/goconfig/conf.go | 536 ++++++ .../Unknwon/goconfig/goconfig_test.go | 363 ++++ .../src/github.com/Unknwon/goconfig/read.go | 259 +++ .../Unknwon/goconfig/testdata/conf.ini | 46 + .../Unknwon/goconfig/testdata/conf2.ini | 37 + .../Unknwon/goconfig/testdata/conf_test.ini | 50 + .../src/github.com/Unknwon/goconfig/write.go | 108 ++ .../src/github.com/astaxie/beego/.gitignore | 5 + .../src/github.com/astaxie/beego/.travis.yml | 30 + .../github.com/astaxie/beego/CONTRIBUTING.md | 52 + .../src/github.com/astaxie/beego/LICENSE | 13 + .../src/github.com/astaxie/beego/README.md | 60 + .../src/github.com/astaxie/beego/admin.go | 424 +++++ .../src/github.com/astaxie/beego/adminui.go | 355 ++++ .../src/github.com/astaxie/beego/app.go | 362 ++++ .../src/github.com/astaxie/beego/beego.go | 107 ++ .../github.com/astaxie/beego/cache/README.md | 59 + .../github.com/astaxie/beego/cache/cache.go | 103 + .../github.com/astaxie/beego/cache/conv.go | 100 + .../github.com/astaxie/beego/cache/file.go | 274 +++ .../astaxie/beego/cache/memcache/memcache.go | 190 ++ .../github.com/astaxie/beego/cache/memory.go | 244 +++ .../astaxie/beego/cache/redis/redis.go | 240 +++ .../src/github.com/astaxie/beego/config.go | 392 ++++ .../github.com/astaxie/beego/config/config.go | 108 ++ .../github.com/astaxie/beego/config/fake.go | 130 ++ .../github.com/astaxie/beego/config/ini.go | 437 +++++ .../github.com/astaxie/beego/config/json.go | 265 +++ .../astaxie/beego/config/xml/xml.go | 229 +++ .../astaxie/beego/config/yaml/yaml.go | 266 +++ .../astaxie/beego/context/acceptencoder.go | 197 ++ .../astaxie/beego/context/context.go | 217 +++ .../github.com/astaxie/beego/context/input.go | 615 ++++++ .../astaxie/beego/context/output.go | 343 ++++ .../github.com/astaxie/beego/controller.go | 621 ++++++ .../src/github.com/astaxie/beego/doc.go | 17 + .../src/github.com/astaxie/beego/docs.go | 39 + .../src/github.com/astaxie/beego/error.go | 459 +++++ .../src/github.com/astaxie/beego/filter.go | 43 + .../src/github.com/astaxie/beego/flash.go | 110 ++ .../github.com/astaxie/beego/grace/conn.go | 28 + .../github.com/astaxie/beego/grace/grace.go | 158 ++ .../astaxie/beego/grace/listener.go | 62 + .../github.com/astaxie/beego/grace/server.go | 293 +++ .../src/github.com/astaxie/beego/hooks.go | 95 + .../astaxie/beego/httplib/README.md | 97 + .../astaxie/beego/httplib/httplib.go | 552 ++++++ .../src/github.com/astaxie/beego/log.go | 116 ++ .../github.com/astaxie/beego/logs/README.md | 63 + .../src/github.com/astaxie/beego/logs/conn.go | 116 ++ .../github.com/astaxie/beego/logs/console.go | 97 + .../github.com/astaxie/beego/logs/es/es.go | 80 + .../src/github.com/astaxie/beego/logs/file.go | 314 +++ .../src/github.com/astaxie/beego/logs/log.go | 364 ++++ .../src/github.com/astaxie/beego/logs/smtp.go | 160 ++ .../github.com/astaxie/beego/migration/ddl.go | 53 + .../astaxie/beego/migration/migration.go | 278 +++ .../src/github.com/astaxie/beego/mime.go | 556 ++++++ .../src/github.com/astaxie/beego/namespace.go | 390 ++++ .../github.com/astaxie/beego/orm/README.md | 156 ++ .../src/github.com/astaxie/beego/orm/cmd.go | 283 +++ .../github.com/astaxie/beego/orm/cmd_utils.go | 294 +++ .../src/github.com/astaxie/beego/orm/db.go | 1650 ++++++++++++++++ .../github.com/astaxie/beego/orm/db_alias.go | 297 +++ .../github.com/astaxie/beego/orm/db_mysql.go | 104 + .../github.com/astaxie/beego/orm/db_oracle.go | 29 + .../astaxie/beego/orm/db_postgres.go | 165 ++ .../github.com/astaxie/beego/orm/db_sqlite.go | 150 ++ .../github.com/astaxie/beego/orm/db_tables.go | 476 +++++ .../github.com/astaxie/beego/orm/db_tidb.go | 63 + .../github.com/astaxie/beego/orm/db_utils.go | 163 ++ .../github.com/astaxie/beego/orm/models.go | 120 ++ .../astaxie/beego/orm/models_boot.go | 328 ++++ .../astaxie/beego/orm/models_fields.go | 629 ++++++ .../astaxie/beego/orm/models_info_f.go | 452 +++++ .../astaxie/beego/orm/models_info_m.go | 146 ++ .../astaxie/beego/orm/models_utils.go | 207 ++ .../src/github.com/astaxie/beego/orm/orm.go | 534 ++++++ .../github.com/astaxie/beego/orm/orm_conds.go | 116 ++ .../github.com/astaxie/beego/orm/orm_log.go | 176 ++ .../astaxie/beego/orm/orm_object.go | 87 + .../astaxie/beego/orm/orm_querym2m.go | 144 ++ .../astaxie/beego/orm/orm_queryset.go | 261 +++ .../github.com/astaxie/beego/orm/orm_raw.go | 827 ++++++++ .../src/github.com/astaxie/beego/orm/qb.go | 61 + .../github.com/astaxie/beego/orm/qb_mysql.go | 179 ++ .../github.com/astaxie/beego/orm/qb_tidb.go | 176 ++ .../src/github.com/astaxie/beego/orm/types.go | 413 ++++ .../src/github.com/astaxie/beego/orm/utils.go | 266 +++ .../src/github.com/astaxie/beego/parser.go | 231 +++ .../astaxie/beego/plugins/apiauth/apiauth.go | 180 ++ .../astaxie/beego/plugins/auth/basic.go | 107 ++ .../astaxie/beego/plugins/cors/cors.go | 226 +++ .../src/github.com/astaxie/beego/router.go | 879 +++++++++ .../astaxie/beego/session/README.md | 114 ++ .../beego/session/couchbase/sess_couchbase.go | 243 +++ .../beego/session/ledis/ledis_session.go | 179 ++ .../beego/session/memcache/sess_memcache.go | 232 +++ .../astaxie/beego/session/mysql/sess_mysql.go | 233 +++ .../beego/session/postgres/sess_postgresql.go | 248 +++ .../astaxie/beego/session/redis/sess_redis.go | 256 +++ .../astaxie/beego/session/sess_cookie.go | 184 ++ .../astaxie/beego/session/sess_file.go | 283 +++ .../astaxie/beego/session/sess_mem.go | 196 ++ .../astaxie/beego/session/sess_utils.go | 207 ++ .../astaxie/beego/session/session.go | 296 +++ .../github.com/astaxie/beego/staticfile.go | 195 ++ .../astaxie/beego/swagger/docs_spec.go | 160 ++ .../src/github.com/astaxie/beego/template.go | 288 +++ .../github.com/astaxie/beego/templatefunc.go | 729 +++++++ .../astaxie/beego/testing/assertions.go | 15 + .../astaxie/beego/testing/client.go | 65 + .../astaxie/beego/toolbox/healthcheck.go | 48 + .../astaxie/beego/toolbox/profile.go | 184 ++ .../astaxie/beego/toolbox/statistics.go | 143 ++ .../github.com/astaxie/beego/toolbox/task.go | 607 ++++++ .../src/github.com/astaxie/beego/tree.go | 575 ++++++ .../github.com/astaxie/beego/utils/caller.go | 25 + .../astaxie/beego/utils/captcha/LICENSE | 19 + .../astaxie/beego/utils/captcha/README.md | 45 + .../astaxie/beego/utils/captcha/captcha.go | 269 +++ .../astaxie/beego/utils/captcha/image.go | 498 +++++ .../astaxie/beego/utils/captcha/siprng.go | 277 +++ .../github.com/astaxie/beego/utils/debug.go | 478 +++++ .../github.com/astaxie/beego/utils/file.go | 101 + .../github.com/astaxie/beego/utils/mail.go | 344 ++++ .../beego/utils/pagination/controller.go | 26 + .../astaxie/beego/utils/pagination/doc.go | 58 + .../beego/utils/pagination/paginator.go | 189 ++ .../astaxie/beego/utils/pagination/utils.go | 34 + .../github.com/astaxie/beego/utils/rand.go | 48 + .../github.com/astaxie/beego/utils/safemap.go | 86 + .../github.com/astaxie/beego/utils/slice.go | 170 ++ .../astaxie/beego/validation/README.md | 144 ++ .../astaxie/beego/validation/util.go | 268 +++ .../astaxie/beego/validation/validation.go | 374 ++++ .../astaxie/beego/validation/validators.go | 687 +++++++ .../src/github.com/beego/i18n/.gitignore | 22 + .../src/github.com/beego/i18n/LICENSE | 191 ++ .../src/github.com/beego/i18n/README.md | 6 + .../github.com/beego/i18n/beei18n/beei18n.go | 166 ++ .../src/github.com/beego/i18n/beei18n/sync.go | 86 + .../src/github.com/beego/i18n/i18n.go | 212 +++ .../docker/distribution/context/context.go | 85 + .../docker/distribution/context/doc.go | 89 + .../docker/distribution/context/http.go | 364 ++++ .../docker/distribution/context/http_test.go | 285 +++ .../docker/distribution/context/logger.go | 116 ++ .../docker/distribution/context/trace.go | 104 + .../docker/distribution/context/trace_test.go | 85 + .../docker/distribution/context/util.go | 32 + .../docker/distribution/context/version.go | 16 + .../distribution/context/version_test.go | 19 + .../docker/distribution/registry/auth/auth.go | 144 ++ .../registry/auth/htpasswd/access.go | 102 + .../registry/auth/htpasswd/access_test.go | 122 ++ .../registry/auth/htpasswd/htpasswd.go | 80 + .../registry/auth/htpasswd/htpasswd_test.go | 85 + .../registry/auth/silly/access.go | 97 + .../registry/auth/silly/access_test.go | 71 + .../registry/auth/token/accesscontroller.go | 268 +++ .../registry/auth/token/stringset.go | 35 + .../distribution/registry/auth/token/token.go | 343 ++++ .../registry/auth/token/token_test.go | 386 ++++ .../distribution/registry/auth/token/util.go | 58 + .../docker/distribution/uuid/uuid.go | 126 ++ .../docker/distribution/uuid/uuid_test.go | 48 + .../docker/libtrust/CONTRIBUTING.md | 13 + .../src/github.com/docker/libtrust/LICENSE | 191 ++ .../github.com/docker/libtrust/MAINTAINERS | 3 + .../src/github.com/docker/libtrust/README.md | 18 + .../docker/libtrust/certificates.go | 175 ++ .../docker/libtrust/certificates_test.go | 111 ++ .../src/github.com/docker/libtrust/doc.go | 9 + .../src/github.com/docker/libtrust/ec_key.go | 428 +++++ .../github.com/docker/libtrust/ec_key_test.go | 157 ++ .../src/github.com/docker/libtrust/filter.go | 50 + .../github.com/docker/libtrust/filter_test.go | 81 + .../src/github.com/docker/libtrust/hash.go | 56 + .../github.com/docker/libtrust/jsonsign.go | 657 +++++++ .../docker/libtrust/jsonsign_test.go | 380 ++++ .../src/github.com/docker/libtrust/key.go | 253 +++ .../github.com/docker/libtrust/key_files.go | 255 +++ .../docker/libtrust/key_files_test.go | 220 +++ .../github.com/docker/libtrust/key_manager.go | 175 ++ .../github.com/docker/libtrust/key_test.go | 80 + .../src/github.com/docker/libtrust/rsa_key.go | 427 +++++ .../docker/libtrust/rsa_key_test.go | 157 ++ .../docker/libtrust/testutil/certificates.go | 94 + .../docker/libtrust/tlsdemo/README.md | 50 + .../docker/libtrust/tlsdemo/client.go | 89 + .../docker/libtrust/tlsdemo/gencert.go | 62 + .../docker/libtrust/tlsdemo/genkeys.go | 61 + .../docker/libtrust/tlsdemo/server.go | 80 + .../docker/libtrust/trustgraph/graph.go | 50 + .../libtrust/trustgraph/memory_graph.go | 133 ++ .../libtrust/trustgraph/memory_graph_test.go | 174 ++ .../docker/libtrust/trustgraph/statement.go | 227 +++ .../libtrust/trustgraph/statement_test.go | 417 ++++ .../src/github.com/docker/libtrust/util.go | 363 ++++ .../github.com/docker/libtrust/util_test.go | 45 + .../github.com/go-sql-driver/mysql/.gitignore | 8 + .../go-sql-driver/mysql/.travis.yml | 10 + .../github.com/go-sql-driver/mysql/AUTHORS | 44 + .../go-sql-driver/mysql/CHANGELOG.md | 92 + .../go-sql-driver/mysql/CONTRIBUTING.md | 40 + .../github.com/go-sql-driver/mysql/LICENSE | 373 ++++ .../github.com/go-sql-driver/mysql/README.md | 386 ++++ .../go-sql-driver/mysql/appengine.go | 19 + .../go-sql-driver/mysql/benchmark_test.go | 246 +++ .../github.com/go-sql-driver/mysql/buffer.go | 136 ++ .../go-sql-driver/mysql/collations.go | 250 +++ .../go-sql-driver/mysql/connection.go | 403 ++++ .../github.com/go-sql-driver/mysql/const.go | 162 ++ .../github.com/go-sql-driver/mysql/driver.go | 149 ++ .../go-sql-driver/mysql/driver_test.go | 1681 +++++++++++++++++ .../github.com/go-sql-driver/mysql/errors.go | 131 ++ .../go-sql-driver/mysql/errors_test.go | 42 + .../github.com/go-sql-driver/mysql/infile.go | 164 ++ .../github.com/go-sql-driver/mysql/packets.go | 1179 ++++++++++++ .../github.com/go-sql-driver/mysql/result.go | 22 + .../github.com/go-sql-driver/mysql/rows.go | 106 ++ .../go-sql-driver/mysql/statement.go | 150 ++ .../go-sql-driver/mysql/transaction.go | 31 + .../github.com/go-sql-driver/mysql/utils.go | 973 ++++++++++ .../go-sql-driver/mysql/utils_test.go | 346 ++++ .../github.com/gorilla/context/.travis.yml | 7 + .../src/github.com/gorilla/context/LICENSE | 27 + .../src/github.com/gorilla/context/README.md | 7 + .../src/github.com/gorilla/context/context.go | 143 ++ .../gorilla/context/context_test.go | 161 ++ .../src/github.com/gorilla/context/doc.go | 82 + .../src/github.com/gorilla/mux/.travis.yml | 7 + .../src/github.com/gorilla/mux/LICENSE | 27 + .../src/github.com/gorilla/mux/README.md | 7 + .../src/github.com/gorilla/mux/bench_test.go | 21 + .../src/github.com/gorilla/mux/doc.go | 199 ++ .../src/github.com/gorilla/mux/mux.go | 353 ++++ .../src/github.com/gorilla/mux/mux_test.go | 943 +++++++++ .../src/github.com/gorilla/mux/old_test.go | 714 +++++++ .../src/github.com/gorilla/mux/regexp.go | 276 +++ .../src/github.com/gorilla/mux/route.go | 524 +++++ .../src/github.com/mqu/openldap/LICENCE.txt | 24 + .../src/github.com/mqu/openldap/README.md | 82 + .../mqu/openldap/add-modify-delete.go | 230 +++ .../src/github.com/mqu/openldap/defines.go | 350 ++++ .../src/github.com/mqu/openldap/openldap.go | 454 +++++ .../github.com/mqu/openldap/options-errors.go | 118 ++ .../src/github.com/mqu/openldap/results.go | 306 +++ .../src/github.com/mqu/openldap/types.go | 76 + .../_workspace/src/golang.org/x/blog/LICENSE | 27 + .../_workspace/src/golang.org/x/blog/PATENTS | 22 + ...ird-party-libraries-goprotobuf-and.article | 26 + .../src/golang.org/x/crypto/LICENSE | 27 + .../src/golang.org/x/crypto/PATENTS | 22 + .../src/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + .../_workspace/src/golang.org/x/exp/LICENSE | 27 + .../_workspace/src/golang.org/x/exp/PATENTS | 22 + .../_workspace/src/golang.org/x/image/LICENSE | 27 + .../_workspace/src/golang.org/x/image/PATENTS | 22 + .../src/golang.org/x/mobile/LICENSE | 27 + .../src/golang.org/x/mobile/PATENTS | 22 + .../_workspace/src/golang.org/x/net/LICENSE | 27 + .../_workspace/src/golang.org/x/net/PATENTS | 22 + .../src/golang.org/x/net/context/context.go | 432 +++++ .../_workspace/src/golang.org/x/sys/LICENSE | 27 + .../_workspace/src/golang.org/x/sys/PATENTS | 22 + .../_workspace/src/golang.org/x/talks/LICENSE | 27 + .../_workspace/src/golang.org/x/talks/PATENTS | 22 + .../_workspace/src/golang.org/x/text/LICENSE | 27 + .../_workspace/src/golang.org/x/text/PATENTS | 22 + .../_workspace/src/golang.org/x/tools/LICENSE | 27 + .../_workspace/src/golang.org/x/tools/PATENTS | 22 + LICENSE | 202 ++ README.md | 25 + api/base.go | 65 + api/project.go | 195 ++ api/project_member.go | 212 +++ api/repository.go | 175 ++ api/search.go | 103 + api/user.go | 132 ++ api/utils.go | 51 + conf/app.conf | 9 + conf/private_key.pem | 15 + controllers/base.go | 140 ++ controllers/item_detail.go | 109 ++ controllers/login.go | 70 + controllers/password.go | 202 ++ controllers/project.go | 24 + controllers/register.go | 75 + controllers/search.go | 25 + dao/access_log.go | 103 + dao/base.go | 119 ++ dao/item_detail.go | 50 + dao/project.go | 234 +++ dao/project_role.go | 89 + dao/register.go | 131 ++ dao/role.go | 63 + dao/user.go | 198 ++ main.go | 81 + models/access_log.go | 35 + models/auth_model.go | 20 + models/notification.go | 49 + models/project.go | 33 + models/project_role.go | 21 + models/repo.go | 25 + models/repo_item.go | 32 + models/role.go | 28 + models/tag.go | 20 + models/user.go | 31 + models/user_project_role.go | 21 + opt_auth/db/db.go | 35 + opt_auth/ldap/ldap.go | 126 ++ opt_auth/opt_auth.go | 53 + routers/router.go | 66 + service/auth.go | 84 + service/notification.go | 72 + service/utils/auth_utils.go | 190 ++ service/utils/cache.go | 69 + service/utils/registry_utils.go | 111 ++ static/i18n/locale_en-US.ini | 84 + static/i18n/locale_messages.js | 264 +++ static/i18n/locale_zh-CN.ini | 82 + static/resources/css/base.css | 39 + static/resources/css/sign-in.css | 27 + static/resources/js/change-password.js | 98 + static/resources/js/common.js | 160 ++ static/resources/js/forgot-password.js | 84 + static/resources/js/header-login.js | 32 + static/resources/js/item-detail.js | 452 +++++ static/resources/js/login.js | 31 + static/resources/js/project.js | 234 +++ static/resources/js/register.js | 74 + static/resources/js/reset-password.js | 82 + static/resources/js/search.js | 79 + static/resources/js/sign-in.js | 79 + static/resources/js/validate-options.js | 183 ++ .../css/bootstrap-datetimepicker.min.css | 5 + .../build/js/bootstrap-datetimepicker.min.js | 9 + .../moment/min/moment-with-locales.min.js | 74 + static/vendors/spinner.min.js | 2 + tests/conf/app.conf | 1 + tests/dao_test.go | 555 ++++++ tests/utils_test.go | 15 + utils/encrypt.go | 26 + utils/mail.go | 74 + utils/registry_utils.go | 145 ++ utils/utils.go | 62 + views/change-password.tpl | 50 + views/forgot-password.tpl | 40 + views/index.tpl | 37 + views/item-detail.tpl | 223 +++ views/mail.tpl | 7 + views/project.tpl | 115 ++ views/register.tpl | 73 + views/reset-password-mail.tpl | 21 + views/reset-password.tpl | 46 + views/search.tpl | 32 + views/segment/base-layout.tpl | 28 + views/segment/foot-content.tpl | 23 + views/segment/header-content.tpl | 75 + views/segment/header-include.tpl | 41 + views/segment/modal-dialog.tpl | 38 + views/sign-in.tpl | 40 + 421 files changed, 67391 insertions(+) create mode 100644 .gitignore create mode 100644 Deploy/config/nginx/nginx.conf create mode 100644 Deploy/config/registry/root.crt create mode 100644 Deploy/db/Dockerfile create mode 100644 Deploy/db/docker-entrypoint.sh create mode 100644 Deploy/db/registry.sql create mode 100644 Deploy/docker-compose.yml create mode 100644 Deploy/log/Dockerfile create mode 100644 Deploy/log/logrotate_docker.conf create mode 100644 Deploy/log/rsyslog_docker.conf create mode 100755 Deploy/prepare create mode 100644 Deploy/templates/registry/config.yml create mode 100644 Deploy/templates/ui/app.conf create mode 100644 Deploy/templates/ui/env create mode 100644 Dockerfile create mode 100644 Dockerfile.offline create mode 100644 Godeps/Godeps.json create mode 100644 Godeps/Readme create mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/.gitignore create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/LICENSE create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/README.md create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/README_ZH.md create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/conf.go create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/goconfig_test.go create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/read.go create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf.ini create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf2.ini create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf_test.ini create mode 100644 Godeps/_workspace/src/github.com/Unknwon/goconfig/write.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/.gitignore create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/LICENSE create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/admin.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/adminui.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/app.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/beego.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/cache.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/conv.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/file.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/memcache/memcache.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/memory.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/cache/redis/redis.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config/config.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config/fake.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config/ini.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config/json.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config/xml/xml.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/config/yaml/yaml.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/context/acceptencoder.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/context/context.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/context/input.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/context/output.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/controller.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/doc.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/docs.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/error.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/filter.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/flash.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/grace/conn.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/grace/grace.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/grace/listener.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/grace/server.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/hooks.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/httplib/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/httplib/httplib.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/log.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/conn.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/console.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/es/es.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/file.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/log.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/logs/smtp.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/migration/ddl.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/migration/migration.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/mime.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/namespace.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd_utils.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_alias.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_mysql.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_oracle.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_postgres.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_sqlite.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tables.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tidb.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/db_utils.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/models.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/models_boot.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/models_fields.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_f.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_m.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/models_utils.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_conds.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_log.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_object.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_querym2m.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_queryset.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_raw.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/qb.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_mysql.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_tidb.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/types.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/orm/utils.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/parser.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/plugins/apiauth/apiauth.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/plugins/auth/basic.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/plugins/cors/cors.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/router.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/couchbase/sess_couchbase.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/ledis/ledis_session.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/memcache/sess_memcache.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/mysql/sess_mysql.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/postgres/sess_postgresql.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/redis/sess_redis.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/sess_cookie.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/sess_file.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/sess_mem.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/sess_utils.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/session/session.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/staticfile.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/swagger/docs_spec.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/template.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/templatefunc.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/testing/assertions.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/testing/client.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/toolbox/healthcheck.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/toolbox/profile.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/toolbox/statistics.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/toolbox/task.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/tree.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/caller.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/LICENSE create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/captcha.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/image.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/siprng.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/debug.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/file.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/mail.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/controller.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/doc.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/paginator.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/utils.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/rand.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/safemap.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/utils/slice.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/validation/README.md create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/validation/util.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/validation/validation.go create mode 100644 Godeps/_workspace/src/github.com/astaxie/beego/validation/validators.go create mode 100644 Godeps/_workspace/src/github.com/beego/i18n/.gitignore create mode 100644 Godeps/_workspace/src/github.com/beego/i18n/LICENSE create mode 100644 Godeps/_workspace/src/github.com/beego/i18n/README.md create mode 100644 Godeps/_workspace/src/github.com/beego/i18n/beei18n/beei18n.go create mode 100644 Godeps/_workspace/src/github.com/beego/i18n/beei18n/sync.go create mode 100644 Godeps/_workspace/src/github.com/beego/i18n/i18n.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/context.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/http.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/logger.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/trace.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/version.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/context/version_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go create mode 100644 Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/LICENSE create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/certificates.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/doc.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/filter.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/hash.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_files.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/key_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/util.go create mode 100644 Godeps/_workspace/src/github.com/docker/libtrust/util_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go create mode 100644 Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/README.md create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/context/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/LICENSE create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/README.md create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/doc.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/old_test.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/regexp.go create mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/route.go create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/LICENCE.txt create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/README.md create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/add-modify-delete.go create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/defines.go create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/openldap.go create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/options-errors.go create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/results.go create mode 100644 Godeps/_workspace/src/github.com/mqu/openldap/types.go create mode 100644 Godeps/_workspace/src/golang.org/x/blog/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/blog/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/blog/content/third-party-libraries-goprotobuf-and.article create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 Godeps/_workspace/src/golang.org/x/exp/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/exp/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/image/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/image/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/mobile/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/mobile/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/net/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/net/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/net/context/context.go create mode 100644 Godeps/_workspace/src/golang.org/x/sys/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/sys/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/talks/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/talks/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/text/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/text/PATENTS create mode 100644 Godeps/_workspace/src/golang.org/x/tools/LICENSE create mode 100644 Godeps/_workspace/src/golang.org/x/tools/PATENTS create mode 100644 LICENSE create mode 100644 README.md create mode 100644 api/base.go create mode 100644 api/project.go create mode 100644 api/project_member.go create mode 100644 api/repository.go create mode 100644 api/search.go create mode 100644 api/user.go create mode 100644 api/utils.go create mode 100644 conf/app.conf create mode 100644 conf/private_key.pem create mode 100644 controllers/base.go create mode 100644 controllers/item_detail.go create mode 100644 controllers/login.go create mode 100644 controllers/password.go create mode 100644 controllers/project.go create mode 100644 controllers/register.go create mode 100644 controllers/search.go create mode 100644 dao/access_log.go create mode 100644 dao/base.go create mode 100644 dao/item_detail.go create mode 100644 dao/project.go create mode 100644 dao/project_role.go create mode 100644 dao/register.go create mode 100644 dao/role.go create mode 100644 dao/user.go create mode 100644 main.go create mode 100644 models/access_log.go create mode 100644 models/auth_model.go create mode 100644 models/notification.go create mode 100644 models/project.go create mode 100644 models/project_role.go create mode 100644 models/repo.go create mode 100644 models/repo_item.go create mode 100644 models/role.go create mode 100644 models/tag.go create mode 100644 models/user.go create mode 100644 models/user_project_role.go create mode 100644 opt_auth/db/db.go create mode 100644 opt_auth/ldap/ldap.go create mode 100644 opt_auth/opt_auth.go create mode 100644 routers/router.go create mode 100644 service/auth.go create mode 100644 service/notification.go create mode 100644 service/utils/auth_utils.go create mode 100644 service/utils/cache.go create mode 100644 service/utils/registry_utils.go create mode 100644 static/i18n/locale_en-US.ini create mode 100644 static/i18n/locale_messages.js create mode 100644 static/i18n/locale_zh-CN.ini create mode 100644 static/resources/css/base.css create mode 100644 static/resources/css/sign-in.css create mode 100644 static/resources/js/change-password.js create mode 100644 static/resources/js/common.js create mode 100644 static/resources/js/forgot-password.js create mode 100644 static/resources/js/header-login.js create mode 100644 static/resources/js/item-detail.js create mode 100644 static/resources/js/login.js create mode 100644 static/resources/js/project.js create mode 100644 static/resources/js/register.js create mode 100644 static/resources/js/reset-password.js create mode 100644 static/resources/js/search.js create mode 100644 static/resources/js/sign-in.js create mode 100644 static/resources/js/validate-options.js create mode 100644 static/vendors/eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.min.css create mode 100644 static/vendors/eonasdan-bootstrap-datetimepicker/build/js/bootstrap-datetimepicker.min.js create mode 100644 static/vendors/moment/min/moment-with-locales.min.js create mode 100644 static/vendors/spinner.min.js create mode 100644 tests/conf/app.conf create mode 100644 tests/dao_test.go create mode 100644 tests/utils_test.go create mode 100644 utils/encrypt.go create mode 100644 utils/mail.go create mode 100644 utils/registry_utils.go create mode 100644 utils/utils.go create mode 100644 views/change-password.tpl create mode 100644 views/forgot-password.tpl create mode 100644 views/index.tpl create mode 100644 views/item-detail.tpl create mode 100644 views/mail.tpl create mode 100644 views/project.tpl create mode 100644 views/register.tpl create mode 100644 views/reset-password-mail.tpl create mode 100644 views/reset-password.tpl create mode 100644 views/search.tpl create mode 100644 views/segment/base-layout.tpl create mode 100644 views/segment/foot-content.tpl create mode 100644 views/segment/header-content.tpl create mode 100644 views/segment/header-include.tpl create mode 100644 views/segment/modal-dialog.tpl create mode 100644 views/sign-in.tpl diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..fb4bb6ae1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +harbor +my_start.sh +Deploy/config/registry/config.yml +Deploy/config/ui/env +Deploy/config/ui/app.conf +Deploy/prepare.my diff --git a/Deploy/config/nginx/nginx.conf b/Deploy/config/nginx/nginx.conf new file mode 100644 index 000000000..3941e94fa --- /dev/null +++ b/Deploy/config/nginx/nginx.conf @@ -0,0 +1,68 @@ +worker_processes auto; + +events { + worker_connections 1024; + use epoll; + multi_accept on; +} + +http { + tcp_nodelay on; + + # this is necessary for us to be able to disable request buffering in all cases + proxy_http_version 1.1; + + + upstream registry { + server registry:5000; +# check interval=2000 rise=1 fall=1 timeout=5000 type=tcp; + } + + upstream ui { + server ui:80; +# check interval=2000 rise=1 fall=1 timeout=5000 type=tcp; + } + + + server { + listen 80; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + location / { + proxy_pass http://ui/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_buffering off; + proxy_request_buffering off; + } + + location /v1/ { + return 404; + } + + location /v2/ { + proxy_pass http://registry/v2/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_buffering off; + proxy_request_buffering off; + + } + + location /service/ { + proxy_pass http://ui/service/; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_buffering off; + proxy_request_buffering off; + } + } +} diff --git a/Deploy/config/registry/root.crt b/Deploy/config/registry/root.crt new file mode 100644 index 000000000..326d8080a --- /dev/null +++ b/Deploy/config/registry/root.crt @@ -0,0 +1,15 @@ +-----BEGIN CERTIFICATE----- +MIICWDCCAcGgAwIBAgIJAN1nLuloDeHNMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV +BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX +aWRnaXRzIFB0eSBMdGQwHhcNMTYwMTI3MDQyMDM1WhcNNDMwNjE0MDQyMDM1WjBF +MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 +ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB +gQClak/4HO7EeLU0w/BhtVENPLOqU0AP2QjVUdg1qhNiDWVrbWx9KYHqz5Kn0n2+ +fxdZo3o7ZY5/2+hhgkKh1z6Kge9XGgune6z4fx2J/X2Se8WsGeQUTiND8ngSnsCA +NtYFwW50SbUZPtyf5XjAfKRofZem51OxbxzN3217L/ubKwIDAQABo1AwTjAdBgNV +HQ4EFgQU5EG2VrB3I6G/TudUpz+kBgQXSvYwHwYDVR0jBBgwFoAU5EG2VrB3I6G/ +TudUpz+kBgQXSvYwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOBgQAx+2eo +oOm0YNy9KQ81+7GQkKVWoPQXjAGGgZuZj8WCFepYqUSJ4q5qbuVCY8WbGcHVk2Rx +Jg1XDCmMjBgYP6S0ikezBRqSmNA3G6oFiydTKBfPs6RNalsB0C78Xk5l5+PIyd2R +jFKOKoMpkjwfeJv2j64WNGoBgqj7XRBoJ11a4g== +-----END CERTIFICATE----- diff --git a/Deploy/db/Dockerfile b/Deploy/db/Dockerfile new file mode 100644 index 000000000..c8555675a --- /dev/null +++ b/Deploy/db/Dockerfile @@ -0,0 +1,8 @@ +FROM mysql:5.6 + +WORKDIR /tmp + +ADD registry.sql r.sql + +ADD docker-entrypoint.sh /entrypoint.sh +RUN chmod u+x /entrypoint.sh diff --git a/Deploy/db/docker-entrypoint.sh b/Deploy/db/docker-entrypoint.sh new file mode 100644 index 000000000..95c46e939 --- /dev/null +++ b/Deploy/db/docker-entrypoint.sh @@ -0,0 +1,43 @@ +#!/bin/bash +set -e + +if [ ! -d '/var/lib/mysql/mysql' -a "${1%_safe}" = 'mysqld' ]; then + if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then + echo >&2 'error: database is uninitialized and MYSQL_ROOT_PASSWORD not set' + echo >&2 ' Did you forget to add -e MYSQL_ROOT_PASSWORD=... ? v2' + exit 1 + fi + + mysql_install_db --user=mysql --datadir=/var/lib/mysql + + # These statements _must_ be on individual lines, and _must_ end with + # semicolons (no line breaks or comments are permitted). + # TODO proper SQL escaping on ALL the things D: + TEMP_FILE='/tmp/mysql-first-time.sql' + cat > "$TEMP_FILE" <<-EOSQL + DELETE FROM mysql.user ; + CREATE USER 'root'@'%' IDENTIFIED BY '${MYSQL_ROOT_PASSWORD}' ; + GRANT ALL ON *.* TO 'root'@'%' WITH GRANT OPTION ; + DROP DATABASE IF EXISTS test ; + EOSQL + + if [ "$MYSQL_DATABASE" ]; then + echo "CREATE DATABASE IF NOT EXISTS $MYSQL_DATABASE ;" >> "$TEMP_FILE" + fi + + if [ "$MYSQL_USER" -a "$MYSQL_PASSWORD" ]; then + echo "CREATE USER '$MYSQL_USER'@'%' IDENTIFIED BY '$MYSQL_PASSWORD' ;" >> "$TEMP_FILE" + + if [ "$MYSQL_DATABASE" ]; then + echo "GRANT ALL ON $MYSQL_DATABASE.* TO '$MYSQL_USER'@'%' ;" >> "$TEMP_FILE" + fi + fi + + echo 'FLUSH PRIVILEGES ;' >> "$TEMP_FILE" + cat /tmp/r.sql >> "$TEMP_FILE" + + set -- "$@" --init-file="$TEMP_FILE" +fi + +chown -R mysql:mysql /var/lib/mysql +exec "$@" diff --git a/Deploy/db/registry.sql b/Deploy/db/registry.sql new file mode 100644 index 000000000..4c9f03d28 --- /dev/null +++ b/Deploy/db/registry.sql @@ -0,0 +1,104 @@ +drop database if exists registry; +create database registry charset = utf8; + +use registry; + +create table access ( + access_id int NOT NULL AUTO_INCREMENT, + access_code char(1), + comment varchar (30), + primary key (access_id) +); + +insert into access values +( null, 'A', 'All access for the system'), +( null, 'M', 'Management access for project'), +( null, 'R', 'Read access for project'), +( null, 'W', 'Write access for project'), +( null, 'D', 'Delete access for project'), +( null, 'S', 'Search access for project'); + + +create table role ( + role_id int NOT NULL AUTO_INCREMENT, + role_code varchar(20), + name varchar (20), + primary key (role_id) +); + +insert into role values +( null, 'AMDRWS', 'sysAdmin'), +( null, 'MDRWS', 'projectAdmin'), +( null, 'RWS', 'developer'), +( null, 'RS', 'guest'); + + +create table user ( + user_id int NOT NULL AUTO_INCREMENT, + username varchar(15), + email varchar(30), + password varchar(40) NOT NULL, + realname varchar (20) NOT NULL, + comment varchar (30), + deleted tinyint (1) DEFAULT 0 NOT NULL, + reset_uuid varchar(40) DEFAULT NULL, + salt varchar(40) DEFAULT NULL, + primary key (user_id), + UNIQUE (username), + UNIQUE (email) +); + +insert into user values +(1, 'admin', 'admin@example.com', '', 'system admin', 'admin user',0, null, ''), +(2, 'anonymous', 'anonymous@example.com', '', 'anonymous user', 'anonymous user', 1, null, ''); + +create table project ( + project_id int NOT NULL AUTO_INCREMENT, + owner_id int NOT NULL, + name varchar (30) NOT NULL, + creation_time timestamp, + deleted tinyint (1) DEFAULT 0 NOT NULL, + public tinyint (1) DEFAULT 0 NOT NULL, + primary key (project_id), + FOREIGN KEY (owner_id) REFERENCES user(user_id) +); + +insert into project values +(null, 1, 'library', NOW(), 0, 1); + +create table project_role ( + pr_id int NOT NULL AUTO_INCREMENT, + project_id int NOT NULL, + role_id int NOT NULL, + primary key (pr_id), + FOREIGN KEY (role_id) REFERENCES role(role_id), + FOREIGN KEY (project_id) REFERENCES project (project_id) +); + +insert into project_role values +( 1,1,1 ); + +create table user_project_role ( + upr_id int NOT NULL AUTO_INCREMENT, + user_id int NOT NULL, + pr_id int NOT NULL, + primary key (upr_id), + FOREIGN KEY (user_id) REFERENCES user(user_id), + FOREIGN KEY (pr_id) REFERENCES project_role (pr_id) +); + +insert into user_project_role values +( 1,1,1 ); + +create table access_log ( + log_id int NOT NULL AUTO_INCREMENT, + user_id int NOT NULL, + project_id int NOT NULL, + repo_name varchar (40), + GUID varchar(64), + operation varchar(20) NOT NULL, + op_time timestamp, + primary key (log_id), + FOREIGN KEY (user_id) REFERENCES user(user_id), + FOREIGN KEY (project_id) REFERENCES project (project_id) +); diff --git a/Deploy/docker-compose.yml b/Deploy/docker-compose.yml new file mode 100644 index 000000000..e1ce3ca84 --- /dev/null +++ b/Deploy/docker-compose.yml @@ -0,0 +1,61 @@ +log: + build: ./log/ + volumes: + - /var/log/harbor/:/var/log/docker/ + ports: + - 1514:514 +registry: + image: library/registry:2.1.1 + volumes: + - /data/registry:/storage + - ./config/registry/:/etc/registry/ + ports: + - 5001:5001 + command: + /etc/registry/config.yml + links: + - log + log_driver: "syslog" + log_opt: + syslog-address: "tcp://127.0.0.1:1514" + tag: "{{.Name}}" +mysql: + build: ./db/ + volumes: + - /data/database:/var/lib/mysql + environment: + MYSQL_ROOT_PASSWORD: root + links: + - log + log_driver: "syslog" + log_opt: + syslog-address: "tcp://127.0.0.1:1514" + tag: "{{.Name}}" +ui: + build: ../ + env_file: + - ./config/ui/env + volumes: + - ./config/ui/app.conf:/etc/ui/app.conf + links: + - registry:registry + - mysql:mysql + - log + log_driver: "syslog" + log_opt: + syslog-address: "tcp://127.0.0.1:1514" + tag: "{{.Name}}" +proxy: + image: library/nginx:1.9 + volumes: + - ./config/nginx/nginx.conf:/etc/nginx/nginx.conf + links: + - ui:ui + - registry:registry + - log + ports: + - 80:80 + log_driver: "syslog" + log_opt: + syslog-address: "tcp://127.0.0.1:1514" + tag: "{{.Name}}" diff --git a/Deploy/log/Dockerfile b/Deploy/log/Dockerfile new file mode 100644 index 000000000..b7aaa2fc8 --- /dev/null +++ b/Deploy/log/Dockerfile @@ -0,0 +1,28 @@ +FROM library/ubuntu:14.04 + +# run logrotate hourly +RUN mv /etc/cron.daily/logrotate /etc/cron.hourly/ + +# logrotate configuration file for docker +ADD logrotate_docker.conf /etc/logrotate.d/ + +#disable imklog model +RUN sed 's/$ModLoad imklog/#$ModLoad imklog/' -i /etc/rsyslog.conf +RUN sed 's/$KLogPermitNonKernelFacility on/#$KLogPermitNonKernelFacility on/' -i /etc/rsyslog.conf + +# provides TCP/UDP syslog reception +RUN sed 's/#$ModLoad imudp/$ModLoad imudp/' -i /etc/rsyslog.conf +RUN sed 's/#$UDPServerRun 514/$UDPServerRun 514/' -i /etc/rsyslog.conf +RUN sed 's/#$ModLoad imtcp/$ModLoad imtcp/' -i /etc/rsyslog.conf +RUN sed 's/#$InputTCPServerRun 514/$InputTCPServerRun 514/' -i /etc/rsyslog.conf + +RUN rm /etc/rsyslog.d/* + +# rsyslog configuration file for docker +ADD rsyslog_docker.conf /etc/rsyslog.d/ + +VOLUME /var/log/docker/ + +EXPOSE 514 + +CMD cron && chown -R syslog:syslog /var/log/docker/ && rsyslogd -n diff --git a/Deploy/log/logrotate_docker.conf b/Deploy/log/logrotate_docker.conf new file mode 100644 index 000000000..6a953d4c3 --- /dev/null +++ b/Deploy/log/logrotate_docker.conf @@ -0,0 +1,7 @@ +# Logrotate configuartion file for docker. + +/var/log/docker/*/*.log { + rotate 100 + size 10M + copytruncate +} diff --git a/Deploy/log/rsyslog_docker.conf b/Deploy/log/rsyslog_docker.conf new file mode 100644 index 000000000..4c417b054 --- /dev/null +++ b/Deploy/log/rsyslog_docker.conf @@ -0,0 +1,7 @@ +# Rsyslog configuration file for docker. + +template(name="DynaFile" type="string" + string="/var/log/docker/%$now%/%syslogtag:R,ERE,0,DFLT:[^[]*--end:secpath-replace%.log" +) + +if $programname == "docker" then ?DynaFile \ No newline at end of file diff --git a/Deploy/prepare b/Deploy/prepare new file mode 100755 index 000000000..076910851 --- /dev/null +++ b/Deploy/prepare @@ -0,0 +1,72 @@ +#!/usr/bin/python + +## CONFIGURATIONS +#The endpoint for user to access UI and registry service +hostname = "mydomain.com" +#User can update the protocol if ssl has been setup +ui_url = "http://" + hostname +#Email settings for ui to send password resetting emails +email_server = "smtp.mydomain.com" +email_server_port = "25" +email_username = "sample_admin@mydomain.com" +email_password = "abc" +email_from = "admin " +##The password of harbor admin +harbor_admin_password= "Harbor12345" +##By default the auth mode is db_auth, i.e. the creadentials are stored in a databse +#please set it to ldap_auth if you want to verify user's credentials against an ldap server. +auth_mode = "db_auth" +#The url for ldap endpoint +ldap_url = "ldaps://ldap.mydomain.com" +#The basedn template for verifying the user's password +ldap_basedn = "uid=%s,ou=people,dc=mydomain,dc=com" +##### +import os +from string import Template +base_dir = os.path.dirname(__file__) +config_dir = os.path.join(base_dir, "config") +templates_dir = os.path.join(base_dir, "templates") + +ui_config_dir = os.path.join(config_dir,"ui") +if not os.path.exists(ui_config_dir): + os.makedirs(os.path.join(config_dir, "ui")) + +def render(src, dest, **kw): + t = Template(open(src, 'r').read()) + with open(dest, 'w') as f: + f.write(t.substitute(**kw)) + print "Generated configuration file: %s" % dest + +ui_conf_env = os.path.join(config_dir, "ui", "env") +ui_conf = os.path.join(config_dir, "ui", "app.conf") +registry_conf = os.path.join(config_dir, "registry", "config.yml") + +conf_files = [ ui_conf, ui_conf_env, registry_conf ] +for f in conf_files: + if os.path.exists(f): + print "Clearing the configuration file: %s" % f + os.remove(f) + +render(os.path.join(templates_dir, "ui", "env"), + ui_conf_env, + hostname=hostname, + ui_url=ui_url, + auth_mode=auth_mode, + admin_pwd=harbor_admin_password, + ldap_url=ldap_url, + ldap_basedn=ldap_basedn) + +render(os.path.join(templates_dir, "ui", "app.conf"), + ui_conf, + email_server=email_server, + email_server_port=email_server_port, + email_user_name=email_username, + email_user_password=email_password, + email_from=email_from, + ui_url=ui_url) + +render(os.path.join(templates_dir, "registry", "config.yml"), + registry_conf, + ui_url=ui_url) + +print "The configuration files are ready, please use docker-compose to start the service." diff --git a/Deploy/templates/registry/config.yml b/Deploy/templates/registry/config.yml new file mode 100644 index 000000000..83134a9f6 --- /dev/null +++ b/Deploy/templates/registry/config.yml @@ -0,0 +1,33 @@ +version: 0.1 +log: + level: debug + fields: + service: registry +storage: + cache: + layerinfo: inmemory + filesystem: + rootdirectory: /storage + maintenance: + uploadpurging: + enabled: false +http: + addr: :5000 + secret: placeholder + debug: + addr: localhost:5001 +auth: + token: + issuer: registry-token-issuer + realm: $ui_url/service/token + rootcertbundle: /etc/registry/root.crt + service: token-service + +notifications: + endpoints: + - name: harbor + disabled: false + url: $ui_url/service/notifications + timeout: 500 + threshold: 5 + backoff: 1000 diff --git a/Deploy/templates/ui/app.conf b/Deploy/templates/ui/app.conf new file mode 100644 index 000000000..63dbd3451 --- /dev/null +++ b/Deploy/templates/ui/app.conf @@ -0,0 +1,16 @@ +appname = registry +runmode = dev + +[lang] +types = en-US|zh-CN +names = en-US|zh-CN + +[dev] +httpport = 80 + +[mail] +host = $email_server +port = $email_server_port +username = $email_user_name +password = $email_user_password +from = $email_from diff --git a/Deploy/templates/ui/env b/Deploy/templates/ui/env new file mode 100644 index 000000000..55701bbdb --- /dev/null +++ b/Deploy/templates/ui/env @@ -0,0 +1,10 @@ +MYSQL_HOST=mysql +MYSQL_USR=root +REGISTRY_URL=http://registry:5000 +CONFIG_PATH=/etc/ui/app.conf +HARBOR_REG_URL=$hostname +HARBOR_ADMIN_PASSWORD=$admin_pwd +HARBOR_URL=$ui_url +AUTH_MODE=$auth_mode +LDAP_URL=$ldap_url +LDAP_BASE_DN=$ldap_basedn diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..6a1e40176 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,30 @@ +FROM golang:1.5.1 + +RUN apt-get update -qqy && apt-get install -qqy libldap2-dev + +COPY . /go/src/github.com/vmware/harbor +WORKDIR /go/src/github.com/vmware/harbor + +ENV GOPATH /go/src/github.com/vmware/harbor/Godeps/_workspace:$GOPATH +RUN go install -v -a + +ENV MYSQL_USR root +ENV MYSQL_PWD root +ENV MYSQL_PORT_3306_TCP_ADDR localhost +ENV MYSQL_PORT_3306_TCP_PORT 3306 +ENV REGISTRY_URL localhost:5000 + +ADD conf /go/bin/conf +ADD views /go/bin/views +ADD static /go/bin/static + +RUN chmod u+x /go/bin/harbor + +RUN sed -i 's/TLS_CACERT/#TLS_CAERT/g' /etc/ldap/ldap.conf +RUN sed -i '$a\TLS_REQCERT allow' /etc/ldap/ldap.conf + +WORKDIR /go/bin/ +ENTRYPOINT ["/go/bin/harbor"] + +EXPOSE 80 + diff --git a/Dockerfile.offline b/Dockerfile.offline new file mode 100644 index 000000000..4f03681ac --- /dev/null +++ b/Dockerfile.offline @@ -0,0 +1,25 @@ +FROM ubuntu:14.04 + +ENV MYSQL_USR root +ENV MYSQL_PWD root +ENV MYSQL_PORT_3306_TCP_ADDR localhost +ENV MYSQL_PORT_3306_TCP_PORT 3306 +ENV REGISTRY_URL localhost:5000 + +RUN apt-get update -qqy && apt-get install -qqy libldap2-dev + +ADD harbor /go/bin/harbor +ADD conf /go/bin/conf +ADD views /go/bin/views +ADD static /go/bin/static + +RUN chmod u+x /go/bin/harbor + +RUN sed -i 's/TLS_CACERT/#TLS_CAERT/g' /etc/ldap/ldap.conf +RUN sed -i '$a\TLS_REQCERT allow' /etc/ldap/ldap.conf + +WORKDIR /go/bin/ +ENTRYPOINT ["/go/bin/harbor"] + +EXPOSE 80 + diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json new file mode 100644 index 000000000..2e22c3372 --- /dev/null +++ b/Godeps/Godeps.json @@ -0,0 +1,71 @@ +{ + "ImportPath": "vmware/harbor", + "GoVersion": "go1.5.1", + "Deps": [ + { + "ImportPath": "github.com/Sirupsen/logrus", + "Comment": "v0.8.7-30-g4643a7e", + "Rev": "4643a7efec9b8fd7548882e7b30095711343100f" + }, + { + "ImportPath": "github.com/Unknwon/goconfig", + "Rev": "18bc852dc01720ee9f0e9e0a01a1bdbc8744095c" + }, + { + "ImportPath": "github.com/astaxie/beego", + "Comment": "v1.6.0-7-g23bb36d", + "Rev": "23bb36d35cf83ca8b95c4432aa846326530d24b9" + }, + { + "ImportPath": "github.com/beego/i18n", + "Rev": "e87155e8f0c05bf323d0b13470e1b97af0cb5652" + }, + { + "ImportPath": "github.com/docker/distribution/context", + "Comment": "v2.2.0-55-g226d8c4", + "Rev": "226d8c4f07955f1246308af3fdba9d89db6e4ca5" + }, + { + "ImportPath": "github.com/docker/distribution/registry/auth", + "Comment": "v2.2.0-55-g226d8c4", + "Rev": "226d8c4f07955f1246308af3fdba9d89db6e4ca5" + }, + { + "ImportPath": "github.com/docker/distribution/uuid", + "Comment": "v2.2.0-55-g226d8c4", + "Rev": "226d8c4f07955f1246308af3fdba9d89db6e4ca5" + }, + { + "ImportPath": "github.com/docker/libtrust", + "Rev": "9cbd2a1374f46905c68a4eb3694a130610adc62a" + }, + { + "ImportPath": "github.com/go-sql-driver/mysql", + "Comment": "v1.2-119-g527bcd5", + "Rev": "527bcd55aab2e53314f1a150922560174b493034" + }, + { + "ImportPath": "github.com/gorilla/context", + "Rev": "50c25fb3b2b3b3cc724e9b6ac75fb44b3bccd0da" + }, + { + "ImportPath": "github.com/gorilla/mux", + "Rev": "e444e69cbd2e2e3e0749a2f3c717cec491552bbf" + }, + { + "ImportPath": "github.com/mqu/openldap", + "Comment": "v0.2-11-g91bfc21", + "Rev": "91bfc2150f6c4991335de476c800caf26a990324" + }, + { + "ImportPath": "golang.org/x/crypto/pbkdf2", + "Comment": "release-20141215-1-g14213be", + "Rev": "14213be8fdd634b2e113c4f65cca53bdece9d8d4" + }, + { + "ImportPath": "golang.org/x/net/context", + "Comment": "release-20141215-1-g14213be", + "Rev": "14213be8fdd634b2e113c4f65cca53bdece9d8d4" + } + ] +} diff --git a/Godeps/Readme b/Godeps/Readme new file mode 100644 index 000000000..4cdaa53d5 --- /dev/null +++ b/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore new file mode 100644 index 000000000..f037d684e --- /dev/null +++ b/Godeps/_workspace/.gitignore @@ -0,0 +1,2 @@ +/pkg +/bin diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 000000000..66be63a00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 000000000..ec6411425 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 + - tip +install: + - go get -t ./... diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 000000000..ce45d541a --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,54 @@ +# 0.9.0 (Unreleased) + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 000000000..f090cb42f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md new file mode 100644 index 000000000..996c37659 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,357 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not +yet stable (pre 1.0). Logrus itself is completely stable and has been used in +many large deployments. The core API is unlikely to change much but please +version control your Logrus to make sure you aren't fetching latest `master` on +every build.** + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +exit status 1 +``` + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/Sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/Sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stderr instead of stdout, could also be a file. + log.SetOutput(os.Stderr) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "github.com/Sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stderr + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging though logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +| Hook | Description | +| ----- | ----------- | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | + +#### Level logging + +Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/Sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true` +* `logrus.JSONFormatter`. Logs fields as JSON. +* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. + + ```go + logrus.SetFormatter(&logstash.LogstashFormatter{Type: “application_name"}) + ``` + +Third party logging formatters: + +* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + + +[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 000000000..dddd5f877 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 000000000..9ae900bc5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,264 @@ +package logrus + +import ( + "bytes" + "fmt" + "io" + "os" + "time" +) + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns a reader for the entry, which is a proxy to the formatter. +func (entry *Entry) Reader() (*bytes.Buffer, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + return bytes.NewBuffer(serialized), err +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + reader, err := entry.Reader() + if err != nil { + return "", err + } + + return reader.String(), err +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := Fields{} + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + + reader, err := entry.Reader() + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } + + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + + _, err = io.Copy(entry.Logger.Out, reader) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + os.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 000000000..99c3b41d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,77 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryWithError(t *testing.T) { + + assert := assert.New(t) + + defer func() { + ErrorKey = "error" + }() + + err := fmt.Errorf("kaboom at layer %d", 4711) + + assert.Equal(err, WithError(err).Data["error"]) + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + + assert.Equal(err, entry.WithError(err).Data["error"]) + + ErrorKey = "err" + + assert.Equal(err, entry.WithError(err).Data["err"]) + +} + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go new file mode 100644 index 000000000..a1623ec00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go @@ -0,0 +1,50 @@ +package main + +import ( + "github.com/Sirupsen/logrus" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.JSONFormatter) + log.Formatter = new(logrus.TextFormatter) // default + log.Level = logrus.DebugLevel +} + +func main() { + defer func() { + err := recover() + if err != nil { + log.WithFields(logrus.Fields{ + "omg": true, + "err": err, + "number": 100, + }).Fatal("The ice breaks!") + } + }() + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "number": 8, + }).Debug("Started observing beach") + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "temperature": -4, + }).Debug("Temperature changes") + + log.WithFields(logrus.Fields{ + "animal": "orca", + "size": 9009, + }).Panic("It's over 9000!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go new file mode 100644 index 000000000..3187f6d3e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go @@ -0,0 +1,30 @@ +package main + +import ( + "github.com/Sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" +) + +var log = logrus.New() + +func init() { + log.Formatter = new(logrus.TextFormatter) // default + log.Hooks.Add(airbrake.NewHook(123, "xyz", "development")) +} + +func main() { + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(logrus.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 000000000..9a0120ac1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 000000000..104d689f1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,48 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + _, ok := data["time"] + if ok { + data["fields.time"] = data["time"] + } + + _, ok = data["msg"] + if ok { + data["fields.msg"] = data["msg"] + } + + _, ok = data["level"] + if ok { + data["fields.level"] = data["level"] + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 000000000..c6d290c77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +var errorFields = Fields{ + "foo": fmt.Errorf("bar"), + "baz": fmt.Errorf("qux"), +} + +func BenchmarkErrorTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go new file mode 100644 index 000000000..8ea93ddf2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go @@ -0,0 +1,56 @@ +package logstash + +import ( + "encoding/json" + "fmt" + + "github.com/Sirupsen/logrus" +) + +// Formatter generates json in logstash format. +// Logstash site: http://logstash.net/ +type LogstashFormatter struct { + Type string // if not empty use for logstash type field. + + // TimestampFormat sets the format used for timestamps. + TimestampFormat string +} + +func (f *LogstashFormatter) Format(entry *logrus.Entry) ([]byte, error) { + entry.Data["@version"] = 1 + + if f.TimestampFormat == "" { + f.TimestampFormat = logrus.DefaultTimestampFormat + } + + entry.Data["@timestamp"] = entry.Time.Format(f.TimestampFormat) + + // set message field + v, ok := entry.Data["message"] + if ok { + entry.Data["fields.message"] = v + } + entry.Data["message"] = entry.Message + + // set level field + v, ok = entry.Data["level"] + if ok { + entry.Data["fields.level"] = v + } + entry.Data["level"] = entry.Level.String() + + // set type field + if f.Type != "" { + v, ok = entry.Data["type"] + if ok { + entry.Data["fields.type"] = v + } + entry.Data["type"] = f.Type + } + + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go new file mode 100644 index 000000000..d8814a0ea --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go @@ -0,0 +1,52 @@ +package logstash + +import ( + "bytes" + "encoding/json" + "github.com/Sirupsen/logrus" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestLogstashFormatter(t *testing.T) { + assert := assert.New(t) + + lf := LogstashFormatter{Type: "abc"} + + fields := logrus.Fields{ + "message": "def", + "level": "ijk", + "type": "lmn", + "one": 1, + "pi": 3.14, + "bool": true, + } + + entry := logrus.WithFields(fields) + entry.Message = "msg" + entry.Level = logrus.InfoLevel + + b, _ := lf.Format(entry) + + var data map[string]interface{} + dec := json.NewDecoder(bytes.NewReader(b)) + dec.UseNumber() + dec.Decode(&data) + + // base fields + assert.Equal(json.Number("1"), data["@version"]) + assert.NotEmpty(data["@timestamp"]) + assert.Equal("abc", data["type"]) + assert.Equal("msg", data["message"]) + assert.Equal("info", data["level"]) + + // substituted fields + assert.Equal("def", data["fields.message"]) + assert.Equal("ijk", data["fields.level"]) + assert.Equal("lmn", data["fields.type"]) + + // formats + assert.Equal(json.Number("1"), data["one"]) + assert.Equal(json.Number("3.14"), data["pi"]) + assert.Equal(true, data["bool"]) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 000000000..13f34cb6f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 000000000..3f151cdc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go new file mode 100644 index 000000000..d20a0f54a --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go @@ -0,0 +1,68 @@ +package logrus_bugsnag + +import ( + "errors" + + "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" +) + +type bugsnagHook struct{} + +// ErrBugsnagUnconfigured is returned if NewBugsnagHook is called before +// bugsnag.Configure. Bugsnag must be configured before the hook. +var ErrBugsnagUnconfigured = errors.New("bugsnag must be configured before installing this logrus hook") + +// ErrBugsnagSendFailed indicates that the hook failed to submit an error to +// bugsnag. The error was successfully generated, but `bugsnag.Notify()` +// failed. +type ErrBugsnagSendFailed struct { + err error +} + +func (e ErrBugsnagSendFailed) Error() string { + return "failed to send error to Bugsnag: " + e.err.Error() +} + +// NewBugsnagHook initializes a logrus hook which sends exceptions to an +// exception-tracking service compatible with the Bugsnag API. Before using +// this hook, you must call bugsnag.Configure(). The returned object should be +// registered with a log via `AddHook()` +// +// Entries that trigger an Error, Fatal or Panic should now include an "error" +// field to send to Bugsnag. +func NewBugsnagHook() (*bugsnagHook, error) { + if bugsnag.Config.APIKey == "" { + return nil, ErrBugsnagUnconfigured + } + return &bugsnagHook{}, nil +} + +// Fire forwards an error to Bugsnag. Given a logrus.Entry, it extracts the +// "error" field (or the Message if the error isn't present) and sends it off. +func (hook *bugsnagHook) Fire(entry *logrus.Entry) error { + var notifyErr error + err, ok := entry.Data["error"].(error) + if ok { + notifyErr = err + } else { + notifyErr = errors.New(entry.Message) + } + + bugsnagErr := bugsnag.Notify(notifyErr) + if bugsnagErr != nil { + return ErrBugsnagSendFailed{bugsnagErr} + } + + return nil +} + +// Levels enumerates the log levels on which the error should be forwarded to +// bugsnag: everything at or above the "Error" level. +func (hook *bugsnagHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.ErrorLevel, + logrus.FatalLevel, + logrus.PanicLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go new file mode 100644 index 000000000..e9ea298d8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go @@ -0,0 +1,64 @@ +package logrus_bugsnag + +import ( + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Sirupsen/logrus" + "github.com/bugsnag/bugsnag-go" +) + +type notice struct { + Events []struct { + Exceptions []struct { + Message string `json:"message"` + } `json:"exceptions"` + } `json:"events"` +} + +func TestNoticeReceived(t *testing.T) { + msg := make(chan string, 1) + expectedMsg := "foo" + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var notice notice + data, _ := ioutil.ReadAll(r.Body) + if err := json.Unmarshal(data, ¬ice); err != nil { + t.Error(err) + } + _ = r.Body.Close() + + msg <- notice.Events[0].Exceptions[0].Message + })) + defer ts.Close() + + hook := &bugsnagHook{} + + bugsnag.Configure(bugsnag.Configuration{ + Endpoint: ts.URL, + ReleaseStage: "production", + APIKey: "12345678901234567890123456789012", + Synchronous: true, + }) + + log := logrus.New() + log.Hooks.Add(hook) + + log.WithFields(logrus.Fields{ + "error": errors.New(expectedMsg), + }).Error("Bugsnag will not see this string") + + select { + case received := <-msg: + if received != expectedMsg { + t.Errorf("Unexpected message received: %s", received) + } + case <-time.After(time.Second): + t.Error("Timed out; no notice received by Bugsnag API") + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md new file mode 100644 index 000000000..066704b37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md @@ -0,0 +1,39 @@ +# Syslog Hooks for Logrus :walrus: + +## Usage + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` + +If you want to connect to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). Just assign empty string to the first two parameters of `NewSyslogHook`. It should look like the following. + +```go +import ( + "log/syslog" + "github.com/Sirupsen/logrus" + logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" +) + +func main() { + log := logrus.New() + hook, err := logrus_syslog.NewSyslogHook("", "", syslog.LOG_INFO, "") + + if err == nil { + log.Hooks.Add(hook) + } +} +``` \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go new file mode 100644 index 000000000..b6fa37462 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go @@ -0,0 +1,59 @@ +package logrus_syslog + +import ( + "fmt" + "github.com/Sirupsen/logrus" + "log/syslog" + "os" +) + +// SyslogHook to send logs via syslog. +type SyslogHook struct { + Writer *syslog.Writer + SyslogNetwork string + SyslogRaddr string +} + +// Creates a hook to be added to an instance of logger. This is called with +// `hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_DEBUG, "")` +// `if err == nil { log.Hooks.Add(hook) }` +func NewSyslogHook(network, raddr string, priority syslog.Priority, tag string) (*SyslogHook, error) { + w, err := syslog.Dial(network, raddr, priority, tag) + return &SyslogHook{w, network, raddr}, err +} + +func (hook *SyslogHook) Fire(entry *logrus.Entry) error { + line, err := entry.String() + if err != nil { + fmt.Fprintf(os.Stderr, "Unable to read entry, %v", err) + return err + } + + switch entry.Level { + case logrus.PanicLevel: + return hook.Writer.Crit(line) + case logrus.FatalLevel: + return hook.Writer.Crit(line) + case logrus.ErrorLevel: + return hook.Writer.Err(line) + case logrus.WarnLevel: + return hook.Writer.Warning(line) + case logrus.InfoLevel: + return hook.Writer.Info(line) + case logrus.DebugLevel: + return hook.Writer.Debug(line) + default: + return nil + } +} + +func (hook *SyslogHook) Levels() []logrus.Level { + return []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go new file mode 100644 index 000000000..42762dc10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go @@ -0,0 +1,26 @@ +package logrus_syslog + +import ( + "github.com/Sirupsen/logrus" + "log/syslog" + "testing" +) + +func TestLocalhostAddAndPrint(t *testing.T) { + log := logrus.New() + hook, err := NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + + if err != nil { + t.Errorf("Unable to connect to local syslog.") + } + + log.Hooks.Add(hook) + + for _, level := range hook.Levels() { + if len(log.Hooks[level]) != 1 { + t.Errorf("SyslogHook was not added. The length of log.Hooks[%v]: %v", level, len(log.Hooks[level])) + } + } + + log.Info("Congratulations!") +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 000000000..2ad6dc5cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,41 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + data["time"] = entry.Time.Format(timestampFormat) + data["msg"] = entry.Message + data["level"] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 000000000..1d7087325 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 000000000..fd9804c64 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,206 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. + mu sync.Mutex +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +// Adds a field to the log entry, note that you it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(logger).WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + return NewEntry(logger).WithFields(fields) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugf(format, args...) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infof(format, args...) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + NewEntry(logger).Printf(format, args...) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnf(format, args...) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorf(format, args...) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalf(format, args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicf(format, args...) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debug(args...) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Info(args...) + } +} + +func (logger *Logger) Print(args ...interface{}) { + NewEntry(logger).Info(args...) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warn(args...) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Error(args...) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatal(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panic(args...) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + NewEntry(logger).Debugln(args...) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + NewEntry(logger).Infoln(args...) + } +} + +func (logger *Logger) Println(args ...interface{}) { + NewEntry(logger).Println(args...) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + NewEntry(logger).Warnln(args...) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + NewEntry(logger).Errorln(args...) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + NewEntry(logger).Fatalln(args...) + } + os.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + NewEntry(logger).Panicln(args...) + } +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 000000000..0c09fbc26 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,98 @@ +package logrus + +import ( + "fmt" + "log" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch lvl { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 000000000..efaacea23 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,301 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 000000000..71f8d67a5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,9 @@ +// +build darwin freebsd openbsd netbsd dragonfly + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 000000000..a2c0b40db --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,12 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 000000000..4bb537602 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,21 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 000000000..2e09f6f7e --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stdout + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 000000000..06ef20233 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,161 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + + b := &bytes.Buffer{} + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return false + } + } + return true +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + + switch value := value.(type) { + case string: + if needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", value) + } + default: + fmt.Fprint(b, value) + } + + b.WriteByte(' ') +} diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 000000000..e25a44f67 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 000000000..1e30b1c75 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,31 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + reader, writer := io.Pipe() + + go logger.writerScanner(reader) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + logger.Print(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/.gitignore b/Godeps/_workspace/src/github.com/Unknwon/goconfig/.gitignore new file mode 100644 index 000000000..c81d5b374 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +*.iml +.idea \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/LICENSE b/Godeps/_workspace/src/github.com/Unknwon/goconfig/LICENSE new file mode 100644 index 000000000..8405e89a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/README.md b/Godeps/_workspace/src/github.com/Unknwon/goconfig/README.md new file mode 100644 index 000000000..6bfa59d28 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/README.md @@ -0,0 +1,67 @@ +goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) [![](http://gocover.io/_badge/github.com/Unknwon/goconfig)](http://gocover.io/github.com/Unknwon/goconfig) +======== + +[中文文档](README_ZH.md) + +**IMPORTANT** + +- This library is under bug fix only mode, which means no more features will be added. +- I'm continuing working on better Go code with a different library: [ini](https://github.com/go-ini/ini). + +## About + +Package goconfig is a easy-use, comments-support configuration file parser for the Go Programming Language, which provides a structure similar to what you would find on Microsoft Windows INI files. + +The configuration file consists of sections, led by a `[section]` header and followed by `name:value` or `name=value` entries. Note that leading whitespace is removed from values. The optional values can contain format strings which refer to other values in the same section, or values in a special DEFAULT section. Comments are indicated by ";" or "#"; comments may begin anywhere on a single line. + +## Features + +- It simplified operation processes, easy to use and undersatnd; therefore, there are less chances to have errors. +- It uses exactly the same way to access a configuration file as you use Windows APIs, so you don't need to change your code style. +- It supports read recursion sections. +- It supports auto increment of key. +- It supports **READ** and **WRITE** configuration file with comments each section or key which all the other parsers don't support!!!!!!! +- It supports get value through type bool, float64, int, int64 and string, methods that start with "Must" means ignore errors and get zero-value if error occurs, or you can specify a default value. +- It's able to load multiple files to overwrite key values. + +## Installation + + go get github.com/Unknwon/goconfig + +Or + + gopm get github.com/Unknwon/goconfig + +## API Documentation + +[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig). + +## Example + +Please see [conf.ini](testdata/conf.ini) as an example. + +### Usage + +- Function `LoadConfigFile` load file(s) depends on your situation, and return a variable with type `ConfigFile`. +- `GetValue` gives basic functionality of getting a value of given section and key. +- Methods like `Bool`, `Int`, `Int64` return corresponding type of values. +- Methods start with `Must` return corresponding type of values and returns zero-value of given type if something goes wrong. +- `SetValue` sets value to given section and key, and inserts somewhere if it does not exist. +- `DeleteKey` deletes by given section and key. +- Finally, `SaveConfigFile` saves your configuration to local file system. +- Use method `Reload` in case someone else modified your file(s). +- Methods contains `Comment` help you manipulate comments. + +## More Information + +- All characters are CASE SENSITIVE, BE CAREFUL! + +## Credits + +- [goconf](http://code.google.com/p/goconf/) +- [robfig/config](https://github.com/robfig/config) +- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/README_ZH.md b/Godeps/_workspace/src/github.com/Unknwon/goconfig/README_ZH.md new file mode 100644 index 000000000..d5fcbb95b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/README_ZH.md @@ -0,0 +1,64 @@ +goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) +======== + +本库已被 [《Go名库讲解》](https://github.com/Unknwon/go-rock-libraries-showcases/tree/master/lectures/01-goconfig) 收录讲解,欢迎前往学习如何使用! + +编码规范:基于 [Go 编码规范](https://github.com/Unknwon/go-code-convention) + +## 关于 + +包 goconfig 是一个易于使用,支持注释的 Go 语言配置文件解析器,该文件的书写格式和 Windows 下的 INI 文件一样。 + +配置文件由形为 `[section]` 的节构成,内部使用 `name:value` 或 `name=value` 这样的键值对;每行开头和尾部的空白符号都将被忽略;如果未指定任何节,则会默认放入名为 `DEFAULT` 的节当中;可以使用 “;” 或 “#” 来作为注释的开头,并可以放置于任意的单独一行中。 + +## 特性 + +- 简化流程,易于理解,更少出错。 +- 提供与 Windows API 一模一样的操作方式。 +- 支持读取递归节。 +- 支持自增键名。 +- 支持对注释的 **读** 和 **写** 操作,其它所有解析器都不支持!!!! +- 可以直接返回 bool, float64, int, int64 和 string 类型的值,如果使用 “Must” 开头的方法,则一定会返回这个类型的一个值而不返回错误,如果错误发生则会返回零值。 +- 支持加载多个文件来重写值。 + +## 安装 + + go get github.com/Unknwon/goconfig + +或 + + gopm get github.com/Unknwon/goconfig + + +## API 文档 + +[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig). + +## 示例 + +请查看 [conf.ini](testdata/conf.ini) 文件作为使用示例。 + +### 用例 + +- 函数 `LoadConfigFile` 加载一个或多个文件,然后返回一个类型为 `ConfigFile` 的变量。 +- `GetValue` 可以简单的获取某个值。 +- 像 `Bool`、`Int`、`Int64` 这样的方法会直接返回指定类型的值。 +- 以 `Must` 开头的方法不会返回错误,但当错误发生时会返回零值。 +- `SetValue` 可以设置某个值。 +- `DeleteKey` 可以删除某个键。 +- 最后,`SaveConfigFile` 可以保持您的配置到本地文件系统。 +- 使用方法 `Reload` 可以重载您的配置文件。 + +## 更多信息 + +- 所有字符都是大小写敏感的! + +## 参考信息 + +- [goconf](http://code.google.com/p/goconf/) +- [robfig/config](https://github.com/robfig/config) +- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0) + +## 授权许可 + +本项目采用 Apache v2 开源授权许可证,完整的授权说明已放置在 [LICENSE](LICENSE) 文件中。 diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/conf.go b/Godeps/_workspace/src/github.com/Unknwon/goconfig/conf.go new file mode 100644 index 000000000..b48779a2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/conf.go @@ -0,0 +1,536 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package goconfig is a fully functional and comments-support configuration file(.ini) parser. +package goconfig + +import ( + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + "sync" +) + +const ( + // Default section name. + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 200 +) + +type ParseError int + +const ( + ERR_SECTION_NOT_FOUND ParseError = iota + 1 + ERR_KEY_NOT_FOUND + ERR_BLANK_SECTION_NAME + ERR_COULD_NOT_PARSE +) + +var LineBreak = "\n" + +// Variable regexp pattern: %(variable)s +var varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +// A ConfigFile represents a INI formar configuration file. +type ConfigFile struct { + lock sync.RWMutex // Go map is not safe. + fileNames []string // Support mutil-files. + data map[string]map[string]string // Section -> key : value + + // Lists can keep sections and keys in order. + sectionList []string // Section name list. + keyList map[string][]string // Section -> Key name list + + sectionComments map[string]string // Sections comments. + keyComments map[string]map[string]string // Keys comments. + BlockMode bool // Indicates whether use lock or not. +} + +// newConfigFile creates an empty configuration representation. +func newConfigFile(fileNames []string) *ConfigFile { + c := new(ConfigFile) + c.fileNames = fileNames + c.data = make(map[string]map[string]string) + c.keyList = make(map[string][]string) + c.sectionComments = make(map[string]string) + c.keyComments = make(map[string]map[string]string) + c.BlockMode = true + return c +} + +// SetValue adds a new section-key-value to the configuration. +// It returns true if the key and value were inserted, +// or returns false if the value was overwritten. +// If the section does not exist in advance, it will be created. +func (c *ConfigFile) SetValue(section, key, value string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + if len(key) == 0 { + return false + } + + if c.BlockMode { + c.lock.Lock() + defer c.lock.Unlock() + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + // Execute add operation. + c.data[section] = make(map[string]string) + // Append section to list. + c.sectionList = append(c.sectionList, section) + } + + // Check if key exists. + _, ok := c.data[section][key] + c.data[section][key] = value + if !ok { + // If not exists, append to key list. + c.keyList[section] = append(c.keyList[section], key) + } + return !ok +} + +// DeleteKey deletes the key in given section. +// It returns true if the key was deleted, +// or returns false if the section or key didn't exist. +func (c *ConfigFile) DeleteKey(section, key string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + return false + } + + // Check if key exists. + if _, ok := c.data[section][key]; ok { + delete(c.data[section], key) + // Remove comments of key. + c.SetKeyComments(section, key, "") + // Get index of key. + i := 0 + for _, keyName := range c.keyList[section] { + if keyName == key { + break + } + i++ + } + // Remove from key list. + c.keyList[section] = append(c.keyList[section][:i], c.keyList[section][i+1:]...) + return true + } + return false +} + +// GetValue returns the value of key available in the given section. +// If the value needs to be unfolded +// (see e.g. %(google)s example in the GoConfig_test.go), +// then String does this unfolding automatically, up to +// _DEPTH_VALUES number of iterations. +// It returns an error and empty string value if the section does not exist, +// or key does not exist in DEFAULT and current sections. +func (c *ConfigFile) GetValue(section, key string) (string, error) { + if c.BlockMode { + c.lock.RLock() + defer c.lock.RUnlock() + } + + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists + if _, ok := c.data[section]; !ok { + // Section does not exist. + return "", getError{ERR_SECTION_NOT_FOUND, section} + } + + // Section exists. + // Check if key exists or empty value. + value, ok := c.data[section][key] + if !ok { + // Check if it is a sub-section. + if i := strings.LastIndex(section, "."); i > -1 { + return c.GetValue(section[:i], key) + } + + // Return empty value. + return "", getError{ERR_KEY_NOT_FOUND, key} + } + + // Key exists. + var i int + for i = 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(value) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search variable in default section. + nvalue, err := c.GetValue(DEFAULT_SECTION, noption) + if err != nil && section != DEFAULT_SECTION { + // Search in the same section. + if _, ok := c.data[section][noption]; ok { + nvalue = c.data[section][noption] + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + value = strings.Replace(value, vr, nvalue, -1) + } + return value, nil +} + +// Bool returns bool type value. +func (c *ConfigFile) Bool(section, key string) (bool, error) { + value, err := c.GetValue(section, key) + if err != nil { + return false, err + } + return strconv.ParseBool(value) +} + +// Float64 returns float64 type value. +func (c *ConfigFile) Float64(section, key string) (float64, error) { + value, err := c.GetValue(section, key) + if err != nil { + return 0.0, err + } + return strconv.ParseFloat(value, 64) +} + +// Int returns int type value. +func (c *ConfigFile) Int(section, key string) (int, error) { + value, err := c.GetValue(section, key) + if err != nil { + return 0, err + } + return strconv.Atoi(value) +} + +// Int64 returns int64 type value. +func (c *ConfigFile) Int64(section, key string) (int64, error) { + value, err := c.GetValue(section, key) + if err != nil { + return 0, err + } + return strconv.ParseInt(value, 10, 64) +} + +// MustValue always returns value without error. +// It returns empty string if error occurs, or the default value if given. +func (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string { + val, err := c.GetValue(section, key) + if len(defaultVal) > 0 && (err != nil || len(val) == 0) { + return defaultVal[0] + } + return val +} + +// MustValue always returns value without error, +// It returns empty string if error occurs, or the default value if given, +// and a bool value indicates whether default value is returned. +func (c *ConfigFile) MustValueSet(section, key string, defaultVal ...string) (string, bool) { + val, err := c.GetValue(section, key) + if len(defaultVal) > 0 && (err != nil || len(val) == 0) { + c.SetValue(section, key, defaultVal[0]) + return defaultVal[0], true + } + return val, false +} + +// MustValueRange always returns value without error, +// it returns default value if error occurs or doesn't fit into range. +func (c *ConfigFile) MustValueRange(section, key, defaultVal string, candidates []string) string { + val, err := c.GetValue(section, key) + if err != nil || len(val) == 0 { + return defaultVal + } + + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// MustValueArray always returns value array without error, +// it returns empty array if error occurs, split by delimiter otherwise. +func (c *ConfigFile) MustValueArray(section, key, delim string) []string { + val, err := c.GetValue(section, key) + if err != nil || len(val) == 0 { + return []string{} + } + + vals := strings.Split(val, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool { + val, err := c.Bool(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 { + value, err := c.Float64(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return value +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int { + value, err := c.Int(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return value +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 { + value, err := c.Int64(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return value +} + +// GetSectionList returns the list of all sections +// in the same order in the file. +func (c *ConfigFile) GetSectionList() []string { + list := make([]string, len(c.sectionList)) + copy(list, c.sectionList) + return list +} + +// GetKeyList returns the list of all keys in give section +// in the same order in the file. +// It returns nil if given section does not exist. +func (c *ConfigFile) GetKeyList(section string) []string { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + return nil + } + + // Non-default section has a blank key as section keeper. + offset := 1 + if section == DEFAULT_SECTION { + offset = 0 + } + + list := make([]string, len(c.keyList[section])-offset) + copy(list, c.keyList[section][offset:]) + return list +} + +// DeleteSection deletes the entire section by given name. +// It returns true if the section was deleted, and false if the section didn't exist. +func (c *ConfigFile) DeleteSection(section string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + return false + } + + delete(c.data, section) + // Remove comments of section. + c.SetSectionComments(section, "") + // Get index of section. + i := 0 + for _, secName := range c.sectionList { + if secName == section { + break + } + i++ + } + // Remove from section and key list. + c.sectionList = append(c.sectionList[:i], c.sectionList[i+1:]...) + delete(c.keyList, section) + return true +} + +// GetSection returns key-value pairs in given section. +// It section does not exist, returns nil and error. +func (c *ConfigFile) GetSection(section string) (map[string]string, error) { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + // Section does not exist. + return nil, getError{ERR_SECTION_NOT_FOUND, section} + } + + // Remove pre-defined key. + secMap := c.data[section] + delete(c.data[section], " ") + + // Section exists. + return secMap, nil +} + +// SetSectionComments adds new section comments to the configuration. +// If comments are empty(0 length), it will remove its section comments! +// It returns true if the comments were inserted or removed, +// or returns false if the comments were overwritten. +func (c *ConfigFile) SetSectionComments(section, comments string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if len(comments) == 0 { + if _, ok := c.sectionComments[section]; ok { + delete(c.sectionComments, section) + } + + // Not exists can be seen as remove. + return true + } + + // Check if comments exists. + _, ok := c.sectionComments[section] + if comments[0] != '#' && comments[0] != ';' { + comments = "; " + comments + } + c.sectionComments[section] = comments + return !ok +} + +// SetKeyComments adds new section-key comments to the configuration. +// If comments are empty(0 length), it will remove its section-key comments! +// It returns true if the comments were inserted or removed, +// or returns false if the comments were overwritten. +// If the section does not exist in advance, it is created. +func (c *ConfigFile) SetKeyComments(section, key, comments string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists. + if _, ok := c.keyComments[section]; ok { + if len(comments) == 0 { + if _, ok := c.keyComments[section][key]; ok { + delete(c.keyComments[section], key) + } + + // Not exists can be seen as remove. + return true + } + } else { + if len(comments) == 0 { + // Not exists can be seen as remove. + return true + } else { + // Execute add operation. + c.keyComments[section] = make(map[string]string) + } + } + + // Check if key exists. + _, ok := c.keyComments[section][key] + if comments[0] != '#' && comments[0] != ';' { + comments = "; " + comments + } + c.keyComments[section][key] = comments + return !ok +} + +// GetSectionComments returns the comments in the given section. +// It returns an empty string(0 length) if the comments do not exist. +func (c *ConfigFile) GetSectionComments(section string) (comments string) { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + return c.sectionComments[section] +} + +// GetKeyComments returns the comments of key in the given section. +// It returns an empty string(0 length) if the comments do not exist. +func (c *ConfigFile) GetKeyComments(section, key string) (comments string) { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if _, ok := c.keyComments[section]; ok { + return c.keyComments[section][key] + } + return "" +} + +// getError occurs when get value in configuration file with invalid parameter. +type getError struct { + Reason ParseError + Name string +} + +// Error implements Error interface. +func (err getError) Error() string { + switch err.Reason { + case ERR_SECTION_NOT_FOUND: + return fmt.Sprintf("section '%s' not found", err.Name) + case ERR_KEY_NOT_FOUND: + return fmt.Sprintf("key '%s' not found", err.Name) + } + return "invalid get error" +} diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/goconfig_test.go b/Godeps/_workspace/src/github.com/Unknwon/goconfig/goconfig_test.go new file mode 100644 index 000000000..4b2740561 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/goconfig_test.go @@ -0,0 +1,363 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package goconfig + +import ( + "fmt" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestLoadConfigFile(t *testing.T) { + Convey("Load a single configuration file that does exist", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + Convey("Test GetSectionList", func() { + So(c.GetSectionList(), ShouldResemble, + []string{"DEFAULT", "Demo", "What's this?", "url", "parent", + "parent.child", "parent.child.child", "auto increment"}) + }) + + Convey("Get value that does exist", func() { + v, err := c.GetValue("Demo", "key2") + So(err, ShouldBeNil) + So(v, ShouldEqual, "test data") + }) + + Convey("Get value that does not exist", func() { + _, err := c.GetValue("Demo", "key4") + So(err, ShouldNotBeNil) + }) + + Convey("Get value that has empty value", func() { + _, err := c.GetValue("What's this?", "empty_value") + So(err, ShouldBeNil) + }) + + Convey("Get value that section does not exist", func() { + _, err := c.GetValue("Demo404", "key4") + So(err, ShouldNotBeNil) + }) + + Convey("Get value use parent-child feature", func() { + v, err := c.GetValue("parent.child", "sex") + So(err, ShouldBeNil) + So(v, ShouldEqual, "male") + }) + + Convey("Get value use recursive feature", func() { + v, err := c.GetValue("", "search") + So(err, ShouldBeNil) + So(v, ShouldEqual, "http://www.google.com") + + v, err = c.GetValue("url", "google_url") + So(err, ShouldBeNil) + So(v, ShouldEqual, "http://www.google.fake") + }) + + Convey("Set value that does exist", func() { + So(c.SetValue("Demo", "key2", "hello man!"), ShouldBeFalse) + v, err := c.GetValue("Demo", "key2") + So(err, ShouldBeNil) + So(v, ShouldEqual, "hello man!") + }) + + Convey("Set value that does not exist", func() { + So(c.SetValue("Demo", "key4", "hello girl!"), ShouldBeTrue) + v, err := c.GetValue("Demo", "key4") + So(err, ShouldBeNil) + So(v, ShouldEqual, "hello girl!") + So(c.SetValue("", "gowalker", "https://gowalker.org"), ShouldBeTrue) + }) + + Convey("Test GetKeyList", func() { + So(c.GetKeyList("Demo"), ShouldResemble, + []string{"key1", "key2", "key3", "quote", "key:1", + "key:2=key:1", "中国", "chinese-var", "array_key"}) + }) + + Convey("Delete a key", func() { + So(c.DeleteKey("", "key404"), ShouldBeFalse) + So(c.DeleteKey("Demo", "key404"), ShouldBeFalse) + So(c.DeleteKey("Demo", "中国"), ShouldBeTrue) + _, err := c.GetValue("Demo", "中国") + So(err, ShouldNotBeNil) + So(c.DeleteKey("404", "key"), ShouldBeFalse) + }) + + Convey("Delete all the keys", func() { + for _, key := range c.GetKeyList("Demo") { + So(c.DeleteKey("Demo", key), ShouldBeTrue) + } + So(c.GetKeyList("Demo"), ShouldResemble, []string{}) + So(len(c.GetKeyList("Demo")), ShouldEqual, 0) + }) + + Convey("Delete section that does not exist", func() { + So(c.DeleteSection(""), ShouldBeTrue) + So(c.DeleteSection("404"), ShouldBeFalse) + }) + + Convey("Get section that exists", func() { + _, err = c.GetSection("") + So(err, ShouldBeNil) + }) + + Convey("Get section that does not exist", func() { + _, err = c.GetSection("404") + So(err, ShouldNotBeNil) + }) + + Convey("Set section comments", func() { + So(c.SetSectionComments("", "default section comments"), ShouldBeTrue) + }) + + Convey("Get section comments", func() { + So(c.GetSectionComments(""), ShouldEqual, "") + }) + + Convey("Set key comments", func() { + So(c.SetKeyComments("", "search", "search comments"), ShouldBeTrue) + So(c.SetKeyComments("404", "search", ""), ShouldBeTrue) + }) + + Convey("Get key comments", func() { + So(c.GetKeyComments("", "google"), ShouldEqual, "; Google") + }) + + Convey("Delete all the sections", func() { + for _, sec := range c.GetSectionList() { + So(c.DeleteSection(sec), ShouldBeTrue) + } + So(c.GetSectionList(), ShouldResemble, []string{}) + So(len(c.GetSectionList()), ShouldEqual, 0) + }) + }) + + Convey("Load a single configuration file that does not exist", t, func() { + _, err := LoadConfigFile("testdata/conf404.ini") + So(err, ShouldNotBeNil) + }) + + Convey("Load multiple configuration files", t, func() { + c, err := LoadConfigFile("testdata/conf.ini", "testdata/conf2.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + Convey("Get value that does not exist in 1st file", func() { + v, err := c.GetValue("new section", "key1") + So(err, ShouldBeNil) + So(v, ShouldEqual, "conf.ini does not have this key") + }) + + Convey("Get value that overwrited in 2nd file", func() { + v, err := c.GetValue("Demo", "key2") + So(err, ShouldBeNil) + So(v, ShouldEqual, "rewrite this key of conf.ini") + }) + }) +} + +func TestGetKeyList(t *testing.T) { + Convey("Get key list", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + Convey("Get ket list that does exist", func() { + So(c.GetKeyList("Demo"), ShouldResemble, + []string{"key1", "key2", "key3", "quote", "key:1", + "key:2=key:1", "中国", "chinese-var", "array_key"}) + So(c.GetKeyList(""), ShouldResemble, []string{"google", "search"}) + }) + + Convey("Get key list that not exist", func() { + So(c.GetKeyList("404"), ShouldBeNil) + }) + }) +} + +func TestSaveConfigFile(t *testing.T) { + Convey("Save a ConfigFile to file system", t, func() { + c, err := LoadConfigFile("testdata/conf.ini", "testdata/conf2.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + c.SetValue("", "", "empty") + + So(SaveConfigFile(c, "testdata/conf_test.ini"), ShouldBeNil) + }) +} + +func TestReload(t *testing.T) { + Convey("Reload a configuration file", t, func() { + c, err := LoadConfigFile("testdata/conf.ini", "testdata/conf2.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + So(c.Reload(), ShouldBeNil) + }) +} + +func TestAppendFiles(t *testing.T) { + Convey("Reload a configuration file", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + So(c.AppendFiles("testdata/conf2.ini"), ShouldBeNil) + }) +} + +func TestTypes(t *testing.T) { + Convey("Return with types", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + Convey("Return bool", func() { + v, err := c.Bool("parent.child", "married") + So(err, ShouldBeNil) + So(v, ShouldBeTrue) + + _, err = c.Bool("parent.child", "died") + So(err, ShouldNotBeNil) + }) + + Convey("Return float64", func() { + v, err := c.Float64("parent", "money") + So(err, ShouldBeNil) + So(v, ShouldEqual, 1.25) + + _, err = c.Float64("parent", "balance") + So(err, ShouldNotBeNil) + }) + + Convey("Return int", func() { + v, err := c.Int("parent", "age") + So(err, ShouldBeNil) + So(v, ShouldEqual, 32) + + _, err = c.Int("parent", "children") + So(err, ShouldNotBeNil) + }) + + Convey("Return int64", func() { + v, err := c.Int64("parent", "age") + So(err, ShouldBeNil) + So(v, ShouldEqual, 32) + + _, err = c.Int64("parent", "children") + So(err, ShouldNotBeNil) + }) + }) +} + +func TestMust(t *testing.T) { + Convey("Must return with type", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + Convey("Return string", func() { + So(c.MustValue("parent.child", "name"), ShouldEqual, "john") + So(c.MustValue("parent.child", "died"), ShouldEqual, "") + So(c.MustValue("parent.child", "died", "no"), ShouldEqual, "no") + }) + + Convey("Return string and bool", func() { + val, ok := c.MustValueSet("parent.child", "died") + So(val, ShouldEqual, "") + So(ok, ShouldBeFalse) + val, ok = c.MustValueSet("parent.child", "died", "no") + So(val, ShouldEqual, "no") + So(ok, ShouldBeTrue) + }) + + Convey("Return bool", func() { + So(c.MustBool("parent.child", "married"), ShouldBeTrue) + So(c.MustBool("parent.child", "died"), ShouldBeFalse) + So(c.MustBool("parent.child", "died", true), ShouldBeTrue) + }) + + Convey("Return float64", func() { + So(c.MustFloat64("parent", "money"), ShouldEqual, 1.25) + So(c.MustFloat64("parent", "balance"), ShouldEqual, 0.0) + So(c.MustFloat64("parent", "balance", 1.25), ShouldEqual, 1.25) + }) + + Convey("Return int", func() { + So(c.MustInt("parent", "age"), ShouldEqual, 32) + So(c.MustInt("parent", "children"), ShouldEqual, 0) + So(c.MustInt("parent", "children", 3), ShouldEqual, 3) + }) + + Convey("Return int64", func() { + So(c.MustInt64("parent", "age"), ShouldEqual, 32) + So(c.MustInt64("parent", "children"), ShouldEqual, 0) + So(c.MustInt64("parent", "children", 3), ShouldEqual, 3) + }) + }) +} + +func TestRange(t *testing.T) { + Convey("Must return with range", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + So(c.MustValueRange("What's this?", "name", "joe", []string{"hello"}), ShouldEqual, "joe") + So(c.MustValueRange("What's this?", "name404", "joe", []string{"hello"}), ShouldEqual, "joe") + So(c.MustValueRange("What's this?", "name", "joe", []string{"hello", "try one more value ^-^"}), + ShouldEqual, "try one more value ^-^") + }) +} + +func TestArray(t *testing.T) { + Convey("Must return with string array", t, func() { + c, err := LoadConfigFile("testdata/conf.ini") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + + So(fmt.Sprintf("%s", c.MustValueArray("Demo", "array_key", ",")), ShouldEqual, "[1 2 3 4 5]") + So(fmt.Sprintf("%s", c.MustValueArray("Demo", "array_key404", ",")), ShouldEqual, "[]") + }) +} + +func TestLoadFromData(t *testing.T) { + Convey("Load config file from data", t, func() { + c, err := LoadFromData([]byte("")) + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + }) +} + +func Benchmark_GetValue(b *testing.B) { + c, _ := LoadConfigFile("testdata/conf.ini") + c.BlockMode = false + for i := 0; i < b.N; i++ { + c.GetValue("parent", "money") + } +} + +func Benchmark_SetValue(b *testing.B) { + c, _ := LoadConfigFile("testdata/conf.ini") + for i := 0; i < b.N; i++ { + c.SetValue("parent", "money", "10") + } +} diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/read.go b/Godeps/_workspace/src/github.com/Unknwon/goconfig/read.go new file mode 100644 index 000000000..ac7892909 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/read.go @@ -0,0 +1,259 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package goconfig + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" + "time" +) + +// Read reads an io.Reader and returns a configuration representation. +// This representation can be queried with GetValue. +func (c *ConfigFile) read(reader io.Reader) (err error) { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && + mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 // Counter for auto increment. + // Current section name. + section := DEFAULT_SECTION + var comments string + // Parse line-by-line + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + lineLengh := len(line) //[SWH|+] + if err != nil { + if err != io.EOF { + return err + } + + // Reached end of file, if nothing to read then break, + // otherwise handle the last line. + if lineLengh == 0 { + break + } + } + + // switch written for readability (not performance) + switch { + case lineLengh == 0: // Empty line + continue + case line[0] == '#' || line[0] == ';': // Comment + // Append comments + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[lineLengh-1] == ']': // New sction. + // Get section name. + section = strings.TrimSpace(line[1 : lineLengh-1]) + // Set section comments and empty if it has comments. + if len(comments) > 0 { + c.SetSectionComments(section, comments) + comments = "" + } + // Make section exist even though it does not have any key. + c.SetValue(section, " ", " ") + // Reset counter. + count = 1 + continue + case section == "": // No section defined so far + return readError{ERR_BLANK_SECTION_NAME, line} + default: // Other alternatives + var ( + i int + keyQuote string + key string + valQuote string + value string + ) + //[SWH|+]:支持引号包围起来的字串 + if line[0] == '"' { + if lineLengh >= 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if keyQuote != "" { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return readError{ERR_COULD_NOT_PARSE, line} + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i <= 0 { + return readError{ERR_COULD_NOT_PARSE, line} + } + i = i + pos + key = line[qLen:pos] //保留引号内的两端的空格 + } else { + i = strings.IndexAny(line, "=:") + if i <= 0 { + return readError{ERR_COULD_NOT_PARSE, line} + } + key = strings.TrimSpace(line[0:i]) + } + //[SWH|+]; + + // Check if it needs auto increment. + if key == "-" { + key = "#" + fmt.Sprint(count) + count++ + } + + //[SWH|+]:支持引号包围起来的字串 + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if lineRightLength >= 6 && lineRight[0:3] == `"""` { + valQuote = `"""` + } + if valQuote != "" { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + if pos == -1 { + return readError{ERR_COULD_NOT_PARSE, line} + } + pos = pos + qLen + value = lineRight[qLen:pos] + } else { + value = strings.TrimSpace(lineRight[0:]) + } + //[SWH|+]; + + c.SetValue(section, key, value) + // Set key comments and empty if it has comments. + if len(comments) > 0 { + c.SetKeyComments(section, key, comments) + comments = "" + } + } + + // Reached end of file. + if err == io.EOF { + break + } + } + return nil +} + +// LoadFromData accepts raw data directly from memory +// and returns a new configuration representation. +func LoadFromData(data []byte) (c *ConfigFile, err error) { + // Save memory data to temporary file to support further operations. + tmpName := path.Join(os.TempDir(), "goconfig", fmt.Sprintf("%d", time.Now().Nanosecond())) + os.MkdirAll(path.Dir(tmpName), os.ModePerm) + if err = ioutil.WriteFile(tmpName, data, 0655); err != nil { + return nil, err + } + + c = newConfigFile([]string{tmpName}) + err = c.read(bytes.NewBuffer(data)) + return c, err +} + +func (c *ConfigFile) loadFile(fileName string) (err error) { + f, err := os.Open(fileName) + if err != nil { + return err + } + defer f.Close() + + return c.read(f) +} + +// LoadConfigFile reads a file and returns a new configuration representation. +// This representation can be queried with GetValue. +func LoadConfigFile(fileName string, moreFiles ...string) (c *ConfigFile, err error) { + // Append files' name together. + fileNames := make([]string, 1, len(moreFiles)+1) + fileNames[0] = fileName + if len(moreFiles) > 0 { + fileNames = append(fileNames, moreFiles...) + } + + c = newConfigFile(fileNames) + + for _, name := range fileNames { + if err = c.loadFile(name); err != nil { + return nil, err + } + } + + return c, nil +} + +// Reload reloads configuration file in case it has changes. +func (c *ConfigFile) Reload() (err error) { + var cfg *ConfigFile + if len(c.fileNames) == 1 { + cfg, err = LoadConfigFile(c.fileNames[0]) + } else { + cfg, err = LoadConfigFile(c.fileNames[0], c.fileNames[1:]...) + } + + if err == nil { + *c = *cfg + } + return err +} + +// AppendFiles appends more files to ConfigFile and reload automatically. +func (c *ConfigFile) AppendFiles(files ...string) error { + c.fileNames = append(c.fileNames, files...) + return c.Reload() +} + +// readError occurs when read configuration file with wrong format. +type readError struct { + Reason ParseError + Content string // Line content +} + +// Error implement Error interface. +func (err readError) Error() string { + switch err.Reason { + case ERR_BLANK_SECTION_NAME: + return "empty section name not allowed" + case ERR_COULD_NOT_PARSE: + return fmt.Sprintf("could not parse line: %s", string(err.Content)) + } + return "invalid read error" +} diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf.ini b/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf.ini new file mode 100644 index 000000000..65ab1da7c --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf.ini @@ -0,0 +1,46 @@ +; Google +google = www.google.com +search = http://%(google)s + +; Here are Comments +; Second line +[Demo] +# This symbol can also make this line to be comments +key1 = Let's us goconfig!!! +key2 = test data +key3 = this is based on key2:%(key2)s +quote = "special case for quote +"key:1" = This is the value of "key:1" +"key:2=key:1" = this is based on "key:2=key:1" => %(key:1)s +中国 = China +chinese-var = hello %(中国)s! +array_key = 1,2,3,4,5 + +[What's this?] +; Not Enough Comments!! +name = try one more value ^-^ +empty_value = + +[url] +google_fake = www.google.fake +google_url = http://%(google_fake)s + +[parent] +name = john +relation = father +sex = male +age = 32 +money = 1.25 + +[parent.child] +age = 3 +married = true + +[parent.child.child] + +; Auto increment by setting key to "-" +[auto increment] +- = hello +- = go +- = config + diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf2.ini b/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf2.ini new file mode 100644 index 000000000..d5635bb6b --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf2.ini @@ -0,0 +1,37 @@ +; Google +google = www.google.com +search = http://%(google)s + +; Here are Comments +; Second line +[Demo] +# This symbol can also make this line to be comments +key1 = Let's us goconfig!!! +key2 = rewrite this key of conf.ini +key3 = this is based on key2:%(key2)s +"key:1" = This is the value of "key:1" +"""key:2"""="""this is based on "key:1" => `%(key:1)s`""" + +[What's this?] +; Not Enough Comments!! +name = try one more value ^-^ + +[parent] +name = john +relation = father +sex = male +age = 32 + +[parent.child] +age = 3 + +[parent.child.child] + +; Auto increment by setting key to "-" +[auto increment] +- = hello +- = go +- = config + +[new section] +key1 = conf.ini does not have this key \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf_test.ini b/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf_test.ini new file mode 100644 index 000000000..769d65619 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/testdata/conf_test.ini @@ -0,0 +1,50 @@ +; Google +google = www.google.com +search = http://%(google)s + +; Here are Comments +; Second line +[Demo] +# This symbol can also make this line to be comments +key1 = Let's us goconfig!!! +key2 = rewrite this key of conf.ini +key3 = this is based on key2:%(key2)s +quote = "special case for quote +`key:1` = This is the value of "key:1" +`key:2=key:1` = this is based on "key:2=key:1" => %(key:1)s +中国 = China +chinese-var = hello %(中国)s! +array_key = 1,2,3,4,5 +`key:2` = """this is based on "key:1" => `%(key:1)s`""" + +[What's this?] +; Not Enough Comments!! +name = try one more value ^-^ +empty_value = + +[url] +google_fake = www.google.fake +google_url = http://%(google_fake)s + +[parent] +name = john +relation = father +sex = male +age = 32 +money = 1.25 + +[parent.child] +age = 3 +married = true + +[parent.child.child] + +; Auto increment by setting key to "-" +[auto increment] +- = hello +- = go +- = config + +[new section] +key1 = conf.ini does not have this key + diff --git a/Godeps/_workspace/src/github.com/Unknwon/goconfig/write.go b/Godeps/_workspace/src/github.com/Unknwon/goconfig/write.go new file mode 100644 index 000000000..385d51647 --- /dev/null +++ b/Godeps/_workspace/src/github.com/Unknwon/goconfig/write.go @@ -0,0 +1,108 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package goconfig + +import ( + "bytes" + "os" + "strings" +) + +// Write spaces around "=" to look better. +var PrettyFormat = true + +// SaveConfigFile writes configuration file to local file system +func SaveConfigFile(c *ConfigFile, filename string) (err error) { + // Write configuration file by filename. + var f *os.File + if f, err = os.Create(filename); err != nil { + return err + } + + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + buf := bytes.NewBuffer(nil) + for _, section := range c.sectionList { + // Write section comments. + if len(c.GetSectionComments(section)) > 0 { + if _, err = buf.WriteString(c.GetSectionComments(section) + LineBreak); err != nil { + return err + } + } + + if section != DEFAULT_SECTION { + // Write section name. + if _, err = buf.WriteString("[" + section + "]" + LineBreak); err != nil { + return err + } + } + + for _, key := range c.keyList[section] { + if key != " " { + // Write key comments. + if len(c.GetKeyComments(section, key)) > 0 { + if _, err = buf.WriteString(c.GetKeyComments(section, key) + LineBreak); err != nil { + return err + } + } + + keyName := key + // Check if it's auto increment. + if keyName[0] == '#' { + keyName = "-" + } + //[SWH|+]:支持键名包含等号和冒号 + if strings.Contains(keyName, `=`) || strings.Contains(keyName, `:`) { + if strings.Contains(keyName, "`") { + if strings.Contains(keyName, `"`) { + keyName = `"""` + keyName + `"""` + } else { + keyName = `"` + keyName + `"` + } + } else { + keyName = "`" + keyName + "`" + } + } + value := c.data[section][key] + // In case key value contains "`" or "\"". + if strings.Contains(value, "`") { + if strings.Contains(value, `"`) { + value = `"""` + value + `"""` + } else { + value = `"` + value + `"` + } + } + + // Write key and value. + if _, err = buf.WriteString(keyName + equalSign + value + LineBreak); err != nil { + return err + } + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return err + } + } + + if _, err = buf.WriteTo(f); err != nil { + return err + } + return f.Close() +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/.gitignore b/Godeps/_workspace/src/github.com/astaxie/beego/.gitignore new file mode 100644 index 000000000..9806457b9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/.gitignore @@ -0,0 +1,5 @@ +.idea +.DS_Store +*.swp +*.swo +beego.iml diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/.travis.yml b/Godeps/_workspace/src/github.com/astaxie/beego/.travis.yml new file mode 100644 index 000000000..c59cef619 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.5.1 + +services: + - redis-server + - mysql + - postgresql + - memcached +env: + - ORM_DRIVER=sqlite3 ORM_SOURCE=$TRAVIS_BUILD_DIR/orm_test.db + - ORM_DRIVER=mysql ORM_SOURCE="root:@/orm_test?charset=utf8" + - ORM_DRIVER=postgres ORM_SOURCE="user=postgres dbname=orm_test sslmode=disable" +install: + - go get github.com/lib/pq + - go get github.com/go-sql-driver/mysql + - go get github.com/mattn/go-sqlite3 + - go get github.com/bradfitz/gomemcache/memcache + - go get github.com/garyburd/redigo/redis + - go get github.com/beego/x2j + - go get github.com/beego/goyaml2 + - go get github.com/belogik/goes + - go get github.com/couchbase/go-couchbase + - go get github.com/siddontang/ledisdb/config + - go get github.com/siddontang/ledisdb/ledis +before_script: + - sh -c "if [ '$ORM_DRIVER' = 'postgres' ]; then psql -c 'create database orm_test;' -U postgres; fi" + - sh -c "if [ '$ORM_DRIVER' = 'mysql' ]; then mysql -u root -e 'create database orm_test;'; fi" + - sh -c "if [ '$ORM_DRIVER' = 'sqlite' ]; then touch $TRAVIS_BUILD_DIR/orm_test.db; fi" diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/astaxie/beego/CONTRIBUTING.md new file mode 100644 index 000000000..9d5116165 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/CONTRIBUTING.md @@ -0,0 +1,52 @@ +# Contributing to beego + +beego is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +Here are instructions to get you started. They are probably not perfect, +please let us know if anything feels wrong or incomplete. + +## Contribution guidelines + +### Pull requests + +First of all. beego follow the gitflow. So please send you pull request +to **develop** branch. We will close the pull request to master branch. + +We are always happy to receive pull requests, and do our best to +review them as fast as possible. Not sure if that typo is worth a pull +request? Do it! We will appreciate it. + +If your pull request is not accepted on the first try, don't be +discouraged! Sometimes we can make a mistake, please do more explaining +for us. We will appreciate it. + +We're trying very hard to keep beego simple and fast. We don't want it +to do everything for everybody. This means that we might decide against +incorporating a new feature. But we will give you some advice on how to +do it in other way. + +### Create issues + +Any significant improvement should be documented as [a GitHub +issue](https://github.com/astaxie/beego/issues) before anybody +starts working on it. + +Also when filing an issue, make sure to answer these five questions: + +- What version of beego are you using (bee version)? +- What operating system and processor architecture are you using? +- What did you do? +- What did you expect to see? +- What did you see instead? + +### but check existing issues and docs first! + +Please take a moment to check that an issue doesn't already exist +documenting your bug report or improvement proposal. If it does, it +never hurts to add a quick "+1" or "I have this problem too". This will +help prioritize the most common problems and requests. + +Also if you don't know how to use it. please make sure you have read though +the docs in http://beego.me/docs \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/LICENSE b/Godeps/_workspace/src/github.com/astaxie/beego/LICENSE new file mode 100644 index 000000000..5dbd42435 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/LICENSE @@ -0,0 +1,13 @@ +Copyright 2014 astaxie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/README.md new file mode 100644 index 000000000..fec6113f9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/README.md @@ -0,0 +1,60 @@ +## Beego + +[![Build Status](https://travis-ci.org/astaxie/beego.svg?branch=master)](https://travis-ci.org/astaxie/beego) +[![GoDoc](http://godoc.org/github.com/astaxie/beego?status.svg)](http://godoc.org/github.com/astaxie/beego) + +beego is used for rapid development of RESTful APIs, web apps and backend services in Go. +It is inspired by Tornado, Sinatra and Flask. beego has some Go-specific features such as interfaces and struct embedding. + +More info [beego.me](http://beego.me) + +##Quick Start +######Download and install + + go get github.com/astaxie/beego + +######Create file `hello.go` +```go +package main + +import "github.com/astaxie/beego" + +func main(){ + beego.Run() +} +``` +######Build and run +```bash + go build hello.go + ./hello +``` +######Congratulations! +You just built your first beego app. +Open your browser and visit `http://localhost:8000`. +Please see [Documentation](http://beego.me/docs) for more. + +## Features + +* RESTful support +* MVC architecture +* Modularity +* Auto API documents +* Annotation router +* Namespace +* Powerful development tools +* Full stack for Web & API + +## Documentation + +* [English](http://beego.me/docs/intro/) +* [中文文档](http://beego.me/docs/intro/) +* [Русский](http://beego.me/docs/intro/) + +## Community + +* [http://beego.me/community](http://beego.me/community) + +## LICENSE + +beego source code is licensed under the Apache Licence, Version 2.0 +(http://www.apache.org/licenses/LICENSE-2.0.html). diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/admin.go b/Godeps/_workspace/src/github.com/astaxie/beego/admin.go new file mode 100644 index 000000000..3effc5824 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/admin.go @@ -0,0 +1,424 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "text/template" + "time" + + "github.com/astaxie/beego/grace" + "github.com/astaxie/beego/toolbox" + "github.com/astaxie/beego/utils" +) + +// BeeAdminApp is the default adminApp used by admin module. +var beeAdminApp *adminApp + +// FilterMonitorFunc is default monitor filter when admin module is enable. +// if this func returns, admin module records qbs for this request by condition of this function logic. +// usage: +// func MyFilterMonitor(method, requestPath string, t time.Duration) bool { +// if method == "POST" { +// return false +// } +// if t.Nanoseconds() < 100 { +// return false +// } +// if strings.HasPrefix(requestPath, "/astaxie") { +// return false +// } +// return true +// } +// beego.FilterMonitorFunc = MyFilterMonitor. +var FilterMonitorFunc func(string, string, time.Duration) bool + +func init() { + beeAdminApp = &adminApp{ + routers: make(map[string]http.HandlerFunc), + } + beeAdminApp.Route("/", adminIndex) + beeAdminApp.Route("/qps", qpsIndex) + beeAdminApp.Route("/prof", profIndex) + beeAdminApp.Route("/healthcheck", healthcheck) + beeAdminApp.Route("/task", taskStatus) + beeAdminApp.Route("/listconf", listConf) + FilterMonitorFunc = func(string, string, time.Duration) bool { return true } +} + +// AdminIndex is the default http.Handler for admin module. +// it matches url pattern "/". +func adminIndex(rw http.ResponseWriter, r *http.Request) { + execTpl(rw, map[interface{}]interface{}{}, indexTpl, defaultScriptsTpl) +} + +// QpsIndex is the http.Handler for writing qbs statistics map result info in http.ResponseWriter. +// it's registered with url pattern "/qbs" in admin module. +func qpsIndex(rw http.ResponseWriter, r *http.Request) { + data := make(map[interface{}]interface{}) + data["Content"] = toolbox.StatisticsMap.GetMap() + execTpl(rw, data, qpsTpl, defaultScriptsTpl) +} + +// ListConf is the http.Handler of displaying all beego configuration values as key/value pair. +// it's registered with url pattern "/listconf" in admin module. +func listConf(rw http.ResponseWriter, r *http.Request) { + r.ParseForm() + command := r.Form.Get("command") + if command == "" { + rw.Write([]byte("command not support")) + return + } + + data := make(map[interface{}]interface{}) + switch command { + case "conf": + m := make(map[string]interface{}) + m["AppConfigPath"] = AppConfigPath + m["AppConfigProvider"] = AppConfigProvider + m["BConfig.AppName"] = BConfig.AppName + m["BConfig.RunMode"] = BConfig.RunMode + m["BConfig.RouterCaseSensitive"] = BConfig.RouterCaseSensitive + m["BConfig.ServerName"] = BConfig.ServerName + m["BConfig.RecoverPanic"] = BConfig.RecoverPanic + m["BConfig.CopyRequestBody"] = BConfig.CopyRequestBody + m["BConfig.EnableGzip"] = BConfig.EnableGzip + m["BConfig.MaxMemory"] = BConfig.MaxMemory + m["BConfig.EnableErrorsShow"] = BConfig.EnableErrorsShow + m["BConfig.Listen.Graceful"] = BConfig.Listen.Graceful + m["BConfig.Listen.ServerTimeOut"] = BConfig.Listen.ServerTimeOut + m["BConfig.Listen.ListenTCP4"] = BConfig.Listen.ListenTCP4 + m["BConfig.Listen.EnableHTTP"] = BConfig.Listen.EnableHTTP + m["BConfig.Listen.HTTPAddr"] = BConfig.Listen.HTTPAddr + m["BConfig.Listen.HTTPPort"] = BConfig.Listen.HTTPPort + m["BConfig.Listen.EnableHTTPS"] = BConfig.Listen.EnableHTTPS + m["BConfig.Listen.HTTPSAddr"] = BConfig.Listen.HTTPSAddr + m["BConfig.Listen.HTTPSPort"] = BConfig.Listen.HTTPSPort + m["BConfig.Listen.HTTPSCertFile"] = BConfig.Listen.HTTPSCertFile + m["BConfig.Listen.HTTPSKeyFile"] = BConfig.Listen.HTTPSKeyFile + m["BConfig.Listen.EnableAdmin"] = BConfig.Listen.EnableAdmin + m["BConfig.Listen.AdminAddr"] = BConfig.Listen.AdminAddr + m["BConfig.Listen.AdminPort"] = BConfig.Listen.AdminPort + m["BConfig.Listen.EnableFcgi"] = BConfig.Listen.EnableFcgi + m["BConfig.Listen.EnableStdIo"] = BConfig.Listen.EnableStdIo + m["BConfig.WebConfig.AutoRender"] = BConfig.WebConfig.AutoRender + m["BConfig.WebConfig.EnableDocs"] = BConfig.WebConfig.EnableDocs + m["BConfig.WebConfig.FlashName"] = BConfig.WebConfig.FlashName + m["BConfig.WebConfig.FlashSeparator"] = BConfig.WebConfig.FlashSeparator + m["BConfig.WebConfig.DirectoryIndex"] = BConfig.WebConfig.DirectoryIndex + m["BConfig.WebConfig.StaticDir"] = BConfig.WebConfig.StaticDir + m["BConfig.WebConfig.StaticExtensionsToGzip"] = BConfig.WebConfig.StaticExtensionsToGzip + m["BConfig.WebConfig.TemplateLeft"] = BConfig.WebConfig.TemplateLeft + m["BConfig.WebConfig.TemplateRight"] = BConfig.WebConfig.TemplateRight + m["BConfig.WebConfig.ViewsPath"] = BConfig.WebConfig.ViewsPath + m["BConfig.WebConfig.EnableXSRF"] = BConfig.WebConfig.EnableXSRF + m["BConfig.WebConfig.XSRFKEY"] = BConfig.WebConfig.XSRFKey + m["BConfig.WebConfig.XSRFExpire"] = BConfig.WebConfig.XSRFExpire + m["BConfig.WebConfig.Session.SessionOn"] = BConfig.WebConfig.Session.SessionOn + m["BConfig.WebConfig.Session.SessionProvider"] = BConfig.WebConfig.Session.SessionProvider + m["BConfig.WebConfig.Session.SessionName"] = BConfig.WebConfig.Session.SessionName + m["BConfig.WebConfig.Session.SessionGCMaxLifetime"] = BConfig.WebConfig.Session.SessionGCMaxLifetime + m["BConfig.WebConfig.Session.SessionProviderConfig"] = BConfig.WebConfig.Session.SessionProviderConfig + m["BConfig.WebConfig.Session.SessionCookieLifeTime"] = BConfig.WebConfig.Session.SessionCookieLifeTime + m["BConfig.WebConfig.Session.SessionAutoSetCookie"] = BConfig.WebConfig.Session.SessionAutoSetCookie + m["BConfig.WebConfig.Session.SessionDomain"] = BConfig.WebConfig.Session.SessionDomain + m["BConfig.Log.AccessLogs"] = BConfig.Log.AccessLogs + m["BConfig.Log.FileLineNum"] = BConfig.Log.FileLineNum + m["BConfig.Log.Outputs"] = BConfig.Log.Outputs + tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl)) + tmpl = template.Must(tmpl.Parse(configTpl)) + tmpl = template.Must(tmpl.Parse(defaultScriptsTpl)) + + data["Content"] = m + + tmpl.Execute(rw, data) + + case "router": + var ( + content = map[string]interface{}{ + "Fields": []string{ + "Router Pattern", + "Methods", + "Controller", + }, + } + methods = []string{} + methodsData = make(map[string]interface{}) + ) + for method, t := range BeeApp.Handlers.routers { + + resultList := new([][]string) + + printTree(resultList, t) + + methods = append(methods, method) + methodsData[method] = resultList + } + + content["Data"] = methodsData + content["Methods"] = methods + data["Content"] = content + data["Title"] = "Routers" + execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl) + case "filter": + var ( + content = map[string]interface{}{ + "Fields": []string{ + "Router Pattern", + "Filter Function", + }, + } + filterTypes = []string{} + filterTypeData = make(map[string]interface{}) + ) + + if BeeApp.Handlers.enableFilter { + var filterType string + for k, fr := range map[int]string{ + BeforeStatic: "Before Static", + BeforeRouter: "Before Router", + BeforeExec: "Before Exec", + AfterExec: "After Exec", + FinishRouter: "Finish Router"} { + if bf, ok := BeeApp.Handlers.filters[k]; ok { + filterType = fr + filterTypes = append(filterTypes, filterType) + resultList := new([][]string) + for _, f := range bf { + var result = []string{ + fmt.Sprintf("%s", f.pattern), + fmt.Sprintf("%s", utils.GetFuncName(f.filterFunc)), + } + *resultList = append(*resultList, result) + } + filterTypeData[filterType] = resultList + } + } + } + + content["Data"] = filterTypeData + content["Methods"] = filterTypes + + data["Content"] = content + data["Title"] = "Filters" + execTpl(rw, data, routerAndFilterTpl, defaultScriptsTpl) + default: + rw.Write([]byte("command not support")) + } +} + +func printTree(resultList *[][]string, t *Tree) { + for _, tr := range t.fixrouters { + printTree(resultList, tr) + } + if t.wildcard != nil { + printTree(resultList, t.wildcard) + } + for _, l := range t.leaves { + if v, ok := l.runObject.(*controllerInfo); ok { + if v.routerType == routerTypeBeego { + var result = []string{ + v.pattern, + fmt.Sprintf("%s", v.methods), + fmt.Sprintf("%s", v.controllerType), + } + *resultList = append(*resultList, result) + } else if v.routerType == routerTypeRESTFul { + var result = []string{ + v.pattern, + fmt.Sprintf("%s", v.methods), + "", + } + *resultList = append(*resultList, result) + } else if v.routerType == routerTypeHandler { + var result = []string{ + v.pattern, + "", + "", + } + *resultList = append(*resultList, result) + } + } + } +} + +// ProfIndex is a http.Handler for showing profile command. +// it's in url pattern "/prof" in admin module. +func profIndex(rw http.ResponseWriter, r *http.Request) { + r.ParseForm() + command := r.Form.Get("command") + if command == "" { + return + } + + var ( + format = r.Form.Get("format") + data = make(map[interface{}]interface{}) + result bytes.Buffer + ) + toolbox.ProcessInput(command, &result) + data["Content"] = result.String() + + if format == "json" && command == "gc summary" { + dataJSON, err := json.Marshal(data) + if err != nil { + http.Error(rw, err.Error(), http.StatusInternalServerError) + return + } + + rw.Header().Set("Content-Type", "application/json") + rw.Write(dataJSON) + return + } + + data["Title"] = command + defaultTpl := defaultScriptsTpl + if command == "gc summary" { + defaultTpl = gcAjaxTpl + } + execTpl(rw, data, profillingTpl, defaultTpl) +} + +// Healthcheck is a http.Handler calling health checking and showing the result. +// it's in "/healthcheck" pattern in admin module. +func healthcheck(rw http.ResponseWriter, req *http.Request) { + var ( + data = make(map[interface{}]interface{}) + result = []string{} + resultList = new([][]string) + content = map[string]interface{}{ + "Fields": []string{"Name", "Message", "Status"}, + } + ) + + for name, h := range toolbox.AdminCheckList { + if err := h.Check(); err != nil { + result = []string{ + fmt.Sprintf("error"), + fmt.Sprintf("%s", name), + fmt.Sprintf("%s", err.Error()), + } + + } else { + result = []string{ + fmt.Sprintf("success"), + fmt.Sprintf("%s", name), + fmt.Sprintf("OK"), + } + + } + *resultList = append(*resultList, result) + } + content["Data"] = resultList + data["Content"] = content + data["Title"] = "Health Check" + execTpl(rw, data, healthCheckTpl, defaultScriptsTpl) +} + +// TaskStatus is a http.Handler with running task status (task name, status and the last execution). +// it's in "/task" pattern in admin module. +func taskStatus(rw http.ResponseWriter, req *http.Request) { + data := make(map[interface{}]interface{}) + + // Run Task + req.ParseForm() + taskname := req.Form.Get("taskname") + if taskname != "" { + if t, ok := toolbox.AdminTaskList[taskname]; ok { + if err := t.Run(); err != nil { + data["Message"] = []string{"error", fmt.Sprintf("%s", err)} + } + data["Message"] = []string{"success", fmt.Sprintf("%s run success,Now the Status is
%s", taskname, t.GetStatus())} + } else { + data["Message"] = []string{"warning", fmt.Sprintf("there's no task which named: %s", taskname)} + } + } + + // List Tasks + content := make(map[string]interface{}) + resultList := new([][]string) + var result = []string{} + var fields = []string{ + "Task Name", + "Task Spec", + "Task Status", + "Last Time", + "", + } + for tname, tk := range toolbox.AdminTaskList { + result = []string{ + tname, + fmt.Sprintf("%s", tk.GetSpec()), + fmt.Sprintf("%s", tk.GetStatus()), + tk.GetPrev().String(), + } + *resultList = append(*resultList, result) + } + + content["Fields"] = fields + content["Data"] = resultList + data["Content"] = content + data["Title"] = "Tasks" + execTpl(rw, data, tasksTpl, defaultScriptsTpl) +} + +func execTpl(rw http.ResponseWriter, data map[interface{}]interface{}, tpls ...string) { + tmpl := template.Must(template.New("dashboard").Parse(dashboardTpl)) + for _, tpl := range tpls { + tmpl = template.Must(tmpl.Parse(tpl)) + } + tmpl.Execute(rw, data) +} + +// adminApp is an http.HandlerFunc map used as beeAdminApp. +type adminApp struct { + routers map[string]http.HandlerFunc +} + +// Route adds http.HandlerFunc to adminApp with url pattern. +func (admin *adminApp) Route(pattern string, f http.HandlerFunc) { + admin.routers[pattern] = f +} + +// Run adminApp http server. +// Its addr is defined in configuration file as adminhttpaddr and adminhttpport. +func (admin *adminApp) Run() { + if len(toolbox.AdminTaskList) > 0 { + toolbox.StartTask() + } + addr := BConfig.Listen.AdminAddr + + if BConfig.Listen.AdminPort != 0 { + addr = fmt.Sprintf("%s:%d", BConfig.Listen.AdminAddr, BConfig.Listen.AdminPort) + } + for p, f := range admin.routers { + http.Handle(p, f) + } + BeeLogger.Info("Admin server Running on %s", addr) + + var err error + if BConfig.Listen.Graceful { + err = grace.ListenAndServe(addr, nil) + } else { + err = http.ListenAndServe(addr, nil) + } + if err != nil { + BeeLogger.Critical("Admin ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid())) + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/adminui.go b/Godeps/_workspace/src/github.com/astaxie/beego/adminui.go new file mode 100644 index 000000000..7bb32b349 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/adminui.go @@ -0,0 +1,355 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +var indexTpl = ` +{{define "content"}} +

Beego Admin Dashboard

+

+For detail usage please check our document: +

+

+Toolbox +

+

+Live Monitor +

+{{.Content}} +{{end}}` + +var profillingTpl = ` +{{define "content"}} +

{{.Title}}

+
+
{{.Content}}
+
+{{end}}` + +var defaultScriptsTpl = `` + +var gcAjaxTpl = ` +{{define "scripts"}} + +{{end}} +` + +var qpsTpl = `{{define "content"}} +

Requests statistics

+ + + + {{range .Content.Fields}} + + {{end}} + + + + + {{range $i, $elem := .Content.Data}} + + + {{range $elem}} + + {{end}} + + + {{end}} + + +
+ {{.}} +
+ {{.}} +
+{{end}}` + +var configTpl = ` +{{define "content"}} +

Configurations

+
+{{range $index, $elem := .Content}}
+{{$index}}={{$elem}}
+{{end}}
+
+{{end}} +` + +var routerAndFilterTpl = `{{define "content"}} + + +

{{.Title}}

+ +{{range .Content.Methods}} + +
+
{{.}}
+
+ + + + {{range $.Content.Fields}} + + {{end}} + + + + + {{$slice := index $.Content.Data .}} + {{range $i, $elem := $slice}} + + + {{range $elem}} + + {{end}} + + + {{end}} + + +
+ {{.}} +
+ {{.}} +
+
+
+{{end}} + + +{{end}}` + +var tasksTpl = `{{define "content"}} + +

{{.Title}}

+ +{{if .Message }} +{{ $messageType := index .Message 0}} +

+{{index .Message 1}} +

+{{end}} + + + + + +{{range .Content.Fields}} + +{{end}} + + + + +{{range $i, $slice := .Content.Data}} + + {{range $slice}} + + {{end}} + + +{{end}} + +
+{{.}} +
+ {{.}} + + Run +
+ +{{end}}` + +var healthCheckTpl = ` +{{define "content"}} + +

{{.Title}}

+ + + +{{range .Content.Fields}} + +{{end}} + + + +{{range $i, $slice := .Content.Data}} + {{ $header := index $slice 0}} + {{ if eq "success" $header}} + + {{else if eq "error" $header}} + + {{else}} + + {{end}} + {{range $j, $elem := $slice}} + {{if ne $j 0}} + + {{end}} + {{end}} + + +{{end}} + + +
+ {{.}} +
+ {{$elem}} + + {{$header}} +
+{{end}}` + +// The base dashboardTpl +var dashboardTpl = ` + + + + + + + + + + +Welcome to Beego Admin Dashboard + + + + + + + + + + + + + +
+{{template "content" .}} +
+ + + + + + + +{{template "scripts" .}} + + +` diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/app.go b/Godeps/_workspace/src/github.com/astaxie/beego/app.go new file mode 100644 index 000000000..af54ea4b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/app.go @@ -0,0 +1,362 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "fmt" + "net" + "net/http" + "net/http/fcgi" + "os" + "path" + "time" + + "github.com/astaxie/beego/grace" + "github.com/astaxie/beego/utils" +) + +var ( + // BeeApp is an application instance + BeeApp *App +) + +func init() { + // create beego application + BeeApp = NewApp() +} + +// App defines beego application with a new PatternServeMux. +type App struct { + Handlers *ControllerRegister + Server *http.Server +} + +// NewApp returns a new beego application. +func NewApp() *App { + cr := NewControllerRegister() + app := &App{Handlers: cr, Server: &http.Server{}} + return app +} + +// Run beego application. +func (app *App) Run() { + addr := BConfig.Listen.HTTPAddr + + if BConfig.Listen.HTTPPort != 0 { + addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPAddr, BConfig.Listen.HTTPPort) + } + + var ( + err error + l net.Listener + endRunning = make(chan bool, 1) + ) + + // run cgi server + if BConfig.Listen.EnableFcgi { + if BConfig.Listen.EnableStdIo { + if err = fcgi.Serve(nil, app.Handlers); err == nil { // standard I/O + BeeLogger.Info("Use FCGI via standard I/O") + } else { + BeeLogger.Critical("Cannot use FCGI via standard I/O", err) + } + return + } + if BConfig.Listen.HTTPPort == 0 { + // remove the Socket file before start + if utils.FileExists(addr) { + os.Remove(addr) + } + l, err = net.Listen("unix", addr) + } else { + l, err = net.Listen("tcp", addr) + } + if err != nil { + BeeLogger.Critical("Listen: ", err) + } + if err = fcgi.Serve(l, app.Handlers); err != nil { + BeeLogger.Critical("fcgi.Serve: ", err) + } + return + } + + app.Server.Handler = app.Handlers + app.Server.ReadTimeout = time.Duration(BConfig.Listen.ServerTimeOut) * time.Second + app.Server.WriteTimeout = time.Duration(BConfig.Listen.ServerTimeOut) * time.Second + + // run graceful mode + if BConfig.Listen.Graceful { + httpsAddr := BConfig.Listen.HTTPSAddr + app.Server.Addr = httpsAddr + if BConfig.Listen.EnableHTTPS { + go func() { + time.Sleep(20 * time.Microsecond) + if BConfig.Listen.HTTPSPort != 0 { + httpsAddr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort) + app.Server.Addr = httpsAddr + } + server := grace.NewServer(httpsAddr, app.Handlers) + server.Server.ReadTimeout = app.Server.ReadTimeout + server.Server.WriteTimeout = app.Server.WriteTimeout + if err := server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil { + BeeLogger.Critical("ListenAndServeTLS: ", err, fmt.Sprintf("%d", os.Getpid())) + time.Sleep(100 * time.Microsecond) + endRunning <- true + } + }() + } + if BConfig.Listen.EnableHTTP { + go func() { + server := grace.NewServer(addr, app.Handlers) + server.Server.ReadTimeout = app.Server.ReadTimeout + server.Server.WriteTimeout = app.Server.WriteTimeout + if BConfig.Listen.ListenTCP4 { + server.Network = "tcp4" + } + if err := server.ListenAndServe(); err != nil { + BeeLogger.Critical("ListenAndServe: ", err, fmt.Sprintf("%d", os.Getpid())) + time.Sleep(100 * time.Microsecond) + endRunning <- true + } + }() + } + <-endRunning + return + } + + // run normal mode + app.Server.Addr = addr + if BConfig.Listen.EnableHTTPS { + go func() { + time.Sleep(20 * time.Microsecond) + if BConfig.Listen.HTTPSPort != 0 { + app.Server.Addr = fmt.Sprintf("%s:%d", BConfig.Listen.HTTPSAddr, BConfig.Listen.HTTPSPort) + } + BeeLogger.Info("https server Running on %s", app.Server.Addr) + if err := app.Server.ListenAndServeTLS(BConfig.Listen.HTTPSCertFile, BConfig.Listen.HTTPSKeyFile); err != nil { + BeeLogger.Critical("ListenAndServeTLS: ", err) + time.Sleep(100 * time.Microsecond) + endRunning <- true + } + }() + } + if BConfig.Listen.EnableHTTP { + go func() { + app.Server.Addr = addr + BeeLogger.Info("http server Running on %s", app.Server.Addr) + if BConfig.Listen.ListenTCP4 { + ln, err := net.Listen("tcp4", app.Server.Addr) + if err != nil { + BeeLogger.Critical("ListenAndServe: ", err) + time.Sleep(100 * time.Microsecond) + endRunning <- true + return + } + if err = app.Server.Serve(ln); err != nil { + BeeLogger.Critical("ListenAndServe: ", err) + time.Sleep(100 * time.Microsecond) + endRunning <- true + return + } + } else { + if err := app.Server.ListenAndServe(); err != nil { + BeeLogger.Critical("ListenAndServe: ", err) + time.Sleep(100 * time.Microsecond) + endRunning <- true + } + } + }() + } + <-endRunning +} + +// Router adds a patterned controller handler to BeeApp. +// it's an alias method of App.Router. +// usage: +// simple router +// beego.Router("/admin", &admin.UserController{}) +// beego.Router("/admin/index", &admin.ArticleController{}) +// +// regex router +// +// beego.Router("/api/:id([0-9]+)", &controllers.RController{}) +// +// custom rules +// beego.Router("/api/list",&RestController{},"*:ListFood") +// beego.Router("/api/create",&RestController{},"post:CreateFood") +// beego.Router("/api/update",&RestController{},"put:UpdateFood") +// beego.Router("/api/delete",&RestController{},"delete:DeleteFood") +func Router(rootpath string, c ControllerInterface, mappingMethods ...string) *App { + BeeApp.Handlers.Add(rootpath, c, mappingMethods...) + return BeeApp +} + +// Include will generate router file in the router/xxx.go from the controller's comments +// usage: +// beego.Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{}) +// type BankAccount struct{ +// beego.Controller +// } +// +// register the function +// func (b *BankAccount)Mapping(){ +// b.Mapping("ShowAccount" , b.ShowAccount) +// b.Mapping("ModifyAccount", b.ModifyAccount) +//} +// +// //@router /account/:id [get] +// func (b *BankAccount) ShowAccount(){ +// //logic +// } +// +// +// //@router /account/:id [post] +// func (b *BankAccount) ModifyAccount(){ +// //logic +// } +// +// the comments @router url methodlist +// url support all the function Router's pattern +// methodlist [get post head put delete options *] +func Include(cList ...ControllerInterface) *App { + BeeApp.Handlers.Include(cList...) + return BeeApp +} + +// RESTRouter adds a restful controller handler to BeeApp. +// its' controller implements beego.ControllerInterface and +// defines a param "pattern/:objectId" to visit each resource. +func RESTRouter(rootpath string, c ControllerInterface) *App { + Router(rootpath, c) + Router(path.Join(rootpath, ":objectId"), c) + return BeeApp +} + +// AutoRouter adds defined controller handler to BeeApp. +// it's same to App.AutoRouter. +// if beego.AddAuto(&MainContorlller{}) and MainController has methods List and Page, +// visit the url /main/list to exec List function or /main/page to exec Page function. +func AutoRouter(c ControllerInterface) *App { + BeeApp.Handlers.AddAuto(c) + return BeeApp +} + +// AutoPrefix adds controller handler to BeeApp with prefix. +// it's same to App.AutoRouterWithPrefix. +// if beego.AutoPrefix("/admin",&MainContorlller{}) and MainController has methods List and Page, +// visit the url /admin/main/list to exec List function or /admin/main/page to exec Page function. +func AutoPrefix(prefix string, c ControllerInterface) *App { + BeeApp.Handlers.AddAutoPrefix(prefix, c) + return BeeApp +} + +// Get used to register router for Get method +// usage: +// beego.Get("/", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Get(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Get(rootpath, f) + return BeeApp +} + +// Post used to register router for Post method +// usage: +// beego.Post("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Post(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Post(rootpath, f) + return BeeApp +} + +// Delete used to register router for Delete method +// usage: +// beego.Delete("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Delete(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Delete(rootpath, f) + return BeeApp +} + +// Put used to register router for Put method +// usage: +// beego.Put("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Put(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Put(rootpath, f) + return BeeApp +} + +// Head used to register router for Head method +// usage: +// beego.Head("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Head(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Head(rootpath, f) + return BeeApp +} + +// Options used to register router for Options method +// usage: +// beego.Options("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Options(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Options(rootpath, f) + return BeeApp +} + +// Patch used to register router for Patch method +// usage: +// beego.Patch("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Patch(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Patch(rootpath, f) + return BeeApp +} + +// Any used to register router for all methods +// usage: +// beego.Any("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Any(rootpath string, f FilterFunc) *App { + BeeApp.Handlers.Any(rootpath, f) + return BeeApp +} + +// Handler used to register a Handler router +// usage: +// beego.Handler("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func Handler(rootpath string, h http.Handler, options ...interface{}) *App { + BeeApp.Handlers.Handler(rootpath, h, options...) + return BeeApp +} + +// InsertFilter adds a FilterFunc with pattern condition and action constant. +// The pos means action constant including +// beego.BeforeStatic, beego.BeforeRouter, beego.BeforeExec, beego.AfterExec and beego.FinishRouter. +// The bool params is for setting the returnOnOutput value (false allows multiple filters to execute) +func InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) *App { + BeeApp.Handlers.InsertFilter(pattern, pos, filter, params...) + return BeeApp +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/beego.go b/Godeps/_workspace/src/github.com/astaxie/beego/beego.go new file mode 100644 index 000000000..04f020714 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/beego.go @@ -0,0 +1,107 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" +) + +const ( + // VERSION represent beego web framework version. + VERSION = "1.6.0" + + // DEV is for develop + DEV = "dev" + // PROD is for production + PROD = "prod" +) + +//hook function to run +type hookfunc func() error + +var ( + hooks = make([]hookfunc, 0) //hook function slice to store the hookfunc +) + +// AddAPPStartHook is used to register the hookfunc +// The hookfuncs will run in beego.Run() +// such as sessionInit, middlerware start, buildtemplate, admin start +func AddAPPStartHook(hf hookfunc) { + hooks = append(hooks, hf) +} + +// Run beego application. +// beego.Run() default run on HttpPort +// beego.Run("localhost") +// beego.Run(":8089") +// beego.Run("127.0.0.1:8089") +func Run(params ...string) { + initBeforeHTTPRun() + + if len(params) > 0 && params[0] != "" { + strs := strings.Split(params[0], ":") + if len(strs) > 0 && strs[0] != "" { + BConfig.Listen.HTTPAddr = strs[0] + } + if len(strs) > 1 && strs[1] != "" { + BConfig.Listen.HTTPPort, _ = strconv.Atoi(strs[1]) + } + } + + BeeApp.Run() +} + +func initBeforeHTTPRun() { + // if AppConfigPath is setted or conf/app.conf exist + err := ParseConfig() + if err != nil { + panic(err) + } + //init log + for adaptor, config := range BConfig.Log.Outputs { + err = BeeLogger.SetLogger(adaptor, config) + if err != nil { + fmt.Printf("%s with the config `%s` got err:%s\n", adaptor, config, err) + } + } + + SetLogFuncCall(BConfig.Log.FileLineNum) + + //init hooks + AddAPPStartHook(registerMime) + AddAPPStartHook(registerDefaultErrorHandler) + AddAPPStartHook(registerSession) + AddAPPStartHook(registerDocs) + AddAPPStartHook(registerTemplate) + AddAPPStartHook(registerAdmin) + + for _, hk := range hooks { + if err := hk(); err != nil { + panic(err) + } + } +} + +// TestBeegoInit is for test package init +func TestBeegoInit(ap string) { + os.Setenv("BEEGO_RUNMODE", "test") + AppConfigPath = filepath.Join(ap, "conf", "app.conf") + os.Chdir(ap) + initBeforeHTTPRun() +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/cache/README.md new file mode 100644 index 000000000..957790e7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/README.md @@ -0,0 +1,59 @@ +## cache +cache is a Go cache manager. It can use many cache adapters. The repo is inspired by `database/sql` . + + +## How to install? + + go get github.com/astaxie/beego/cache + + +## What adapters are supported? + +As of now this cache support memory, Memcache and Redis. + + +## How to use it? + +First you must import it + + import ( + "github.com/astaxie/beego/cache" + ) + +Then init a Cache (example with memory adapter) + + bm, err := cache.NewCache("memory", `{"interval":60}`) + +Use it like this: + + bm.Put("astaxie", 1, 10 * time.Second) + bm.Get("astaxie") + bm.IsExist("astaxie") + bm.Delete("astaxie") + + +## Memory adapter + +Configure memory adapter like this: + + {"interval":60} + +interval means the gc time. The cache will check at each time interval, whether item has expired. + + +## Memcache adapter + +Memcache adapter use the [gomemcache](http://github.com/bradfitz/gomemcache) client. + +Configure like this: + + {"conn":"127.0.0.1:11211"} + + +## Redis adapter + +Redis adapter use the [redigo](http://github.com/garyburd/redigo) client. + +Configure like this: + + {"conn":":6039"} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/cache.go b/Godeps/_workspace/src/github.com/astaxie/beego/cache/cache.go new file mode 100644 index 000000000..f7158741d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/cache.go @@ -0,0 +1,103 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cache provide a Cache interface and some implemetn engine +// Usage: +// +// import( +// "github.com/astaxie/beego/cache" +// ) +// +// bm, err := cache.NewCache("memory", `{"interval":60}`) +// +// Use it like this: +// +// bm.Put("astaxie", 1, 10 * time.Second) +// bm.Get("astaxie") +// bm.IsExist("astaxie") +// bm.Delete("astaxie") +// +// more docs http://beego.me/docs/module/cache.md +package cache + +import ( + "fmt" + "time" +) + +// Cache interface contains all behaviors for cache adapter. +// usage: +// cache.Register("file",cache.NewFileCache) // this operation is run in init method of file.go. +// c,err := cache.NewCache("file","{....}") +// c.Put("key",value, 3600 * time.Second) +// v := c.Get("key") +// +// c.Incr("counter") // now is 1 +// c.Incr("counter") // now is 2 +// count := c.Get("counter").(int) +type Cache interface { + // get cached value by key. + Get(key string) interface{} + // GetMulti is a batch version of Get. + GetMulti(keys []string) []interface{} + // set cached value with key and expire time. + Put(key string, val interface{}, timeout time.Duration) error + // delete cached value by key. + Delete(key string) error + // increase cached int value by key, as a counter. + Incr(key string) error + // decrease cached int value by key, as a counter. + Decr(key string) error + // check if cached value exists or not. + IsExist(key string) bool + // clear all cache. + ClearAll() error + // start gc routine based on config string settings. + StartAndGC(config string) error +} + +// Instance is a function create a new Cache Instance +type Instance func() Cache + +var adapters = make(map[string]Instance) + +// Register makes a cache adapter available by the adapter name. +// If Register is called twice with the same name or if driver is nil, +// it panics. +func Register(name string, adapter Instance) { + if adapter == nil { + panic("cache: Register adapter is nil") + } + if _, ok := adapters[name]; ok { + panic("cache: Register called twice for adapter " + name) + } + adapters[name] = adapter +} + +// NewCache Create a new cache driver by adapter name and config string. +// config need to be correct JSON as string: {"interval":360}. +// it will start gc automatically. +func NewCache(adapterName, config string) (adapter Cache, err error) { + instanceFunc, ok := adapters[adapterName] + if !ok { + err = fmt.Errorf("cache: unknown adapter name %q (forgot to import?)", adapterName) + return + } + adapter = instanceFunc() + err = adapter.StartAndGC(config) + if err != nil { + adapter = nil + } + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/conv.go b/Godeps/_workspace/src/github.com/astaxie/beego/cache/conv.go new file mode 100644 index 000000000..dbdff1c77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/conv.go @@ -0,0 +1,100 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "fmt" + "strconv" +) + +// GetString convert interface to string. +func GetString(v interface{}) string { + switch result := v.(type) { + case string: + return result + case []byte: + return string(result) + default: + if v != nil { + return fmt.Sprintf("%v", result) + } + } + return "" +} + +// GetInt convert interface to int. +func GetInt(v interface{}) int { + switch result := v.(type) { + case int: + return result + case int32: + return int(result) + case int64: + return int(result) + default: + if d := GetString(v); d != "" { + value, _ := strconv.Atoi(d) + return value + } + } + return 0 +} + +// GetInt64 convert interface to int64. +func GetInt64(v interface{}) int64 { + switch result := v.(type) { + case int: + return int64(result) + case int32: + return int64(result) + case int64: + return result + default: + + if d := GetString(v); d != "" { + value, _ := strconv.ParseInt(d, 10, 64) + return value + } + } + return 0 +} + +// GetFloat64 convert interface to float64. +func GetFloat64(v interface{}) float64 { + switch result := v.(type) { + case float64: + return result + default: + if d := GetString(v); d != "" { + value, _ := strconv.ParseFloat(d, 64) + return value + } + } + return 0 +} + +// GetBool convert interface to bool. +func GetBool(v interface{}) bool { + switch result := v.(type) { + case bool: + return result + default: + if d := GetString(v); d != "" { + value, _ := strconv.ParseBool(d) + return value + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/file.go b/Godeps/_workspace/src/github.com/astaxie/beego/cache/file.go new file mode 100644 index 000000000..4b0309804 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/file.go @@ -0,0 +1,274 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "bytes" + "crypto/md5" + "encoding/gob" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strconv" + "time" +) + +// FileCacheItem is basic unit of file cache adapter. +// it contains data and expire time. +type FileCacheItem struct { + Data interface{} + Lastaccess time.Time + Expired time.Time +} + +// FileCache Config +var ( + FileCachePath = "cache" // cache directory + FileCacheFileSuffix = ".bin" // cache file suffix + FileCacheDirectoryLevel = 2 // cache file deep level if auto generated cache files. + FileCacheEmbedExpiry time.Duration // cache expire time, default is no expire forever. +) + +// FileCache is cache adapter for file storage. +type FileCache struct { + CachePath string + FileSuffix string + DirectoryLevel int + EmbedExpiry int +} + +// NewFileCache Create new file cache with no config. +// the level and expiry need set in method StartAndGC as config string. +func NewFileCache() Cache { + // return &FileCache{CachePath:FileCachePath, FileSuffix:FileCacheFileSuffix} + return &FileCache{} +} + +// StartAndGC will start and begin gc for file cache. +// the config need to be like {CachePath:"/cache","FileSuffix":".bin","DirectoryLevel":2,"EmbedExpiry":0} +func (fc *FileCache) StartAndGC(config string) error { + + var cfg map[string]string + json.Unmarshal([]byte(config), &cfg) + if _, ok := cfg["CachePath"]; !ok { + cfg["CachePath"] = FileCachePath + } + if _, ok := cfg["FileSuffix"]; !ok { + cfg["FileSuffix"] = FileCacheFileSuffix + } + if _, ok := cfg["DirectoryLevel"]; !ok { + cfg["DirectoryLevel"] = strconv.Itoa(FileCacheDirectoryLevel) + } + if _, ok := cfg["EmbedExpiry"]; !ok { + cfg["EmbedExpiry"] = strconv.FormatInt(int64(FileCacheEmbedExpiry.Seconds()), 10) + } + fc.CachePath = cfg["CachePath"] + fc.FileSuffix = cfg["FileSuffix"] + fc.DirectoryLevel, _ = strconv.Atoi(cfg["DirectoryLevel"]) + fc.EmbedExpiry, _ = strconv.Atoi(cfg["EmbedExpiry"]) + + fc.Init() + return nil +} + +// Init will make new dir for file cache if not exist. +func (fc *FileCache) Init() { + if ok, _ := exists(fc.CachePath); !ok { // todo : error handle + _ = os.MkdirAll(fc.CachePath, os.ModePerm) // todo : error handle + } +} + +// get cached file name. it's md5 encoded. +func (fc *FileCache) getCacheFileName(key string) string { + m := md5.New() + io.WriteString(m, key) + keyMd5 := hex.EncodeToString(m.Sum(nil)) + cachePath := fc.CachePath + switch fc.DirectoryLevel { + case 2: + cachePath = filepath.Join(cachePath, keyMd5[0:2], keyMd5[2:4]) + case 1: + cachePath = filepath.Join(cachePath, keyMd5[0:2]) + } + + if ok, _ := exists(cachePath); !ok { // todo : error handle + _ = os.MkdirAll(cachePath, os.ModePerm) // todo : error handle + } + + return filepath.Join(cachePath, fmt.Sprintf("%s%s", keyMd5, fc.FileSuffix)) +} + +// Get value from file cache. +// if non-exist or expired, return empty string. +func (fc *FileCache) Get(key string) interface{} { + fileData, err := FileGetContents(fc.getCacheFileName(key)) + if err != nil { + return "" + } + var to FileCacheItem + GobDecode(fileData, &to) + if to.Expired.Before(time.Now()) { + return "" + } + return to.Data +} + +// GetMulti gets values from file cache. +// if non-exist or expired, return empty string. +func (fc *FileCache) GetMulti(keys []string) []interface{} { + var rc []interface{} + for _, key := range keys { + rc = append(rc, fc.Get(key)) + } + return rc +} + +// Put value into file cache. +// timeout means how long to keep this file, unit of ms. +// if timeout equals FileCacheEmbedExpiry(default is 0), cache this item forever. +func (fc *FileCache) Put(key string, val interface{}, timeout time.Duration) error { + gob.Register(val) + + item := FileCacheItem{Data: val} + if timeout == FileCacheEmbedExpiry { + item.Expired = time.Now().Add((86400 * 365 * 10) * time.Second) // ten years + } else { + item.Expired = time.Now().Add(timeout) + } + item.Lastaccess = time.Now() + data, err := GobEncode(item) + if err != nil { + return err + } + return FilePutContents(fc.getCacheFileName(key), data) +} + +// Delete file cache value. +func (fc *FileCache) Delete(key string) error { + filename := fc.getCacheFileName(key) + if ok, _ := exists(filename); ok { + return os.Remove(filename) + } + return nil +} + +// Incr will increase cached int value. +// fc value is saving forever unless Delete. +func (fc *FileCache) Incr(key string) error { + data := fc.Get(key) + var incr int + if reflect.TypeOf(data).Name() != "int" { + incr = 0 + } else { + incr = data.(int) + 1 + } + fc.Put(key, incr, FileCacheEmbedExpiry) + return nil +} + +// Decr will decrease cached int value. +func (fc *FileCache) Decr(key string) error { + data := fc.Get(key) + var decr int + if reflect.TypeOf(data).Name() != "int" || data.(int)-1 <= 0 { + decr = 0 + } else { + decr = data.(int) - 1 + } + fc.Put(key, decr, FileCacheEmbedExpiry) + return nil +} + +// IsExist check value is exist. +func (fc *FileCache) IsExist(key string) bool { + ret, _ := exists(fc.getCacheFileName(key)) + return ret +} + +// ClearAll will clean cached files. +// not implemented. +func (fc *FileCache) ClearAll() error { + return nil +} + +// check file exist. +func exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// FileGetContents Get bytes to file. +// if non-exist, create this file. +func FileGetContents(filename string) (data []byte, e error) { + f, e := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm) + if e != nil { + return + } + defer f.Close() + stat, e := f.Stat() + if e != nil { + return + } + data = make([]byte, stat.Size()) + result, e := f.Read(data) + if e != nil || int64(result) != stat.Size() { + return nil, e + } + return +} + +// FilePutContents Put bytes to file. +// if non-exist, create this file. +func FilePutContents(filename string, content []byte) error { + fp, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, os.ModePerm) + if err != nil { + return err + } + defer fp.Close() + _, err = fp.Write(content) + return err +} + +// GobEncode Gob encodes file cache item. +func GobEncode(data interface{}) ([]byte, error) { + buf := bytes.NewBuffer(nil) + enc := gob.NewEncoder(buf) + err := enc.Encode(data) + if err != nil { + return nil, err + } + return buf.Bytes(), err +} + +// GobDecode Gob decodes file cache item. +func GobDecode(data []byte, to *FileCacheItem) error { + buf := bytes.NewBuffer(data) + dec := gob.NewDecoder(buf) + return dec.Decode(&to) +} + +func init() { + Register("file", NewFileCache) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/memcache/memcache.go b/Godeps/_workspace/src/github.com/astaxie/beego/cache/memcache/memcache.go new file mode 100644 index 000000000..3f0fe4111 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/memcache/memcache.go @@ -0,0 +1,190 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memcache for cache provider +// +// depend on github.com/bradfitz/gomemcache/memcache +// +// go install github.com/bradfitz/gomemcache/memcache +// +// Usage: +// import( +// _ "github.com/astaxie/beego/cache/memcache" +// "github.com/astaxie/beego/cache" +// ) +// +// bm, err := cache.NewCache("memcache", `{"conn":"127.0.0.1:11211"}`) +// +// more docs http://beego.me/docs/module/cache.md +package memcache + +import ( + "encoding/json" + "errors" + "strings" + + "github.com/bradfitz/gomemcache/memcache" + + "time" + + "github.com/astaxie/beego/cache" +) + +// Cache Memcache adapter. +type Cache struct { + conn *memcache.Client + conninfo []string +} + +// NewMemCache create new memcache adapter. +func NewMemCache() cache.Cache { + return &Cache{} +} + +// Get get value from memcache. +func (rc *Cache) Get(key string) interface{} { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + if item, err := rc.conn.Get(key); err == nil { + return string(item.Value) + } + return nil +} + +// GetMulti get value from memcache. +func (rc *Cache) GetMulti(keys []string) []interface{} { + size := len(keys) + var rv []interface{} + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + for i := 0; i < size; i++ { + rv = append(rv, err) + } + return rv + } + } + mv, err := rc.conn.GetMulti(keys) + if err == nil { + for _, v := range mv { + rv = append(rv, string(v.Value)) + } + return rv + } + for i := 0; i < size; i++ { + rv = append(rv, err) + } + return rv +} + +// Put put value to memcache. only support string. +func (rc *Cache) Put(key string, val interface{}, timeout time.Duration) error { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + v, ok := val.(string) + if !ok { + return errors.New("val must string") + } + item := memcache.Item{Key: key, Value: []byte(v), Expiration: int32(timeout / time.Second)} + return rc.conn.Set(&item) +} + +// Delete delete value in memcache. +func (rc *Cache) Delete(key string) error { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + return rc.conn.Delete(key) +} + +// Incr increase counter. +func (rc *Cache) Incr(key string) error { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + _, err := rc.conn.Increment(key, 1) + return err +} + +// Decr decrease counter. +func (rc *Cache) Decr(key string) error { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + _, err := rc.conn.Decrement(key, 1) + return err +} + +// IsExist check value exists in memcache. +func (rc *Cache) IsExist(key string) bool { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return false + } + } + _, err := rc.conn.Get(key) + if err != nil { + return false + } + return true +} + +// ClearAll clear all cached in memcache. +func (rc *Cache) ClearAll() error { + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + return rc.conn.FlushAll() +} + +// StartAndGC start memcache adapter. +// config string is like {"conn":"connection info"}. +// if connecting error, return. +func (rc *Cache) StartAndGC(config string) error { + var cf map[string]string + json.Unmarshal([]byte(config), &cf) + if _, ok := cf["conn"]; !ok { + return errors.New("config has no conn key") + } + rc.conninfo = strings.Split(cf["conn"], ";") + if rc.conn == nil { + if err := rc.connectInit(); err != nil { + return err + } + } + return nil +} + +// connect to memcache and keep the connection. +func (rc *Cache) connectInit() error { + rc.conn = memcache.New(rc.conninfo...) + return nil +} + +func init() { + cache.Register("memcache", NewMemCache) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/memory.go b/Godeps/_workspace/src/github.com/astaxie/beego/cache/memory.go new file mode 100644 index 000000000..fff2ebbb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/memory.go @@ -0,0 +1,244 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "encoding/json" + "errors" + "sync" + "time" +) + +var ( + // DefaultEvery means the clock time of recycling the expired cache items in memory. + DefaultEvery = 60 // 1 minute +) + +// MemoryItem store memory cache item. +type MemoryItem struct { + val interface{} + createdTime time.Time + lifespan time.Duration +} + +func (mi *MemoryItem) isExpire() bool { + // 0 means forever + if mi.lifespan == 0 { + return false + } + return time.Now().Sub(mi.createdTime) > mi.lifespan +} + +// MemoryCache is Memory cache adapter. +// it contains a RW locker for safe map storage. +type MemoryCache struct { + sync.RWMutex + dur time.Duration + items map[string]*MemoryItem + Every int // run an expiration check Every clock time +} + +// NewMemoryCache returns a new MemoryCache. +func NewMemoryCache() Cache { + cache := MemoryCache{items: make(map[string]*MemoryItem)} + return &cache +} + +// Get cache from memory. +// if non-existed or expired, return nil. +func (bc *MemoryCache) Get(name string) interface{} { + bc.RLock() + defer bc.RUnlock() + if itm, ok := bc.items[name]; ok { + if itm.isExpire() { + return nil + } + return itm.val + } + return nil +} + +// GetMulti gets caches from memory. +// if non-existed or expired, return nil. +func (bc *MemoryCache) GetMulti(names []string) []interface{} { + var rc []interface{} + for _, name := range names { + rc = append(rc, bc.Get(name)) + } + return rc +} + +// Put cache to memory. +// if lifespan is 0, it will be forever till restart. +func (bc *MemoryCache) Put(name string, value interface{}, lifespan time.Duration) error { + bc.Lock() + defer bc.Unlock() + bc.items[name] = &MemoryItem{ + val: value, + createdTime: time.Now(), + lifespan: lifespan, + } + return nil +} + +// Delete cache in memory. +func (bc *MemoryCache) Delete(name string) error { + bc.Lock() + defer bc.Unlock() + if _, ok := bc.items[name]; !ok { + return errors.New("key not exist") + } + delete(bc.items, name) + if _, ok := bc.items[name]; ok { + return errors.New("delete key error") + } + return nil +} + +// Incr increase cache counter in memory. +// it supports int,int32,int64,uint,uint32,uint64. +func (bc *MemoryCache) Incr(key string) error { + bc.RLock() + defer bc.RUnlock() + itm, ok := bc.items[key] + if !ok { + return errors.New("key not exist") + } + switch itm.val.(type) { + case int: + itm.val = itm.val.(int) + 1 + case int32: + itm.val = itm.val.(int32) + 1 + case int64: + itm.val = itm.val.(int64) + 1 + case uint: + itm.val = itm.val.(uint) + 1 + case uint32: + itm.val = itm.val.(uint32) + 1 + case uint64: + itm.val = itm.val.(uint64) + 1 + default: + return errors.New("item val is not (u)int (u)int32 (u)int64") + } + return nil +} + +// Decr decrease counter in memory. +func (bc *MemoryCache) Decr(key string) error { + bc.RLock() + defer bc.RUnlock() + itm, ok := bc.items[key] + if !ok { + return errors.New("key not exist") + } + switch itm.val.(type) { + case int: + itm.val = itm.val.(int) - 1 + case int64: + itm.val = itm.val.(int64) - 1 + case int32: + itm.val = itm.val.(int32) - 1 + case uint: + if itm.val.(uint) > 0 { + itm.val = itm.val.(uint) - 1 + } else { + return errors.New("item val is less than 0") + } + case uint32: + if itm.val.(uint32) > 0 { + itm.val = itm.val.(uint32) - 1 + } else { + return errors.New("item val is less than 0") + } + case uint64: + if itm.val.(uint64) > 0 { + itm.val = itm.val.(uint64) - 1 + } else { + return errors.New("item val is less than 0") + } + default: + return errors.New("item val is not int int64 int32") + } + return nil +} + +// IsExist check cache exist in memory. +func (bc *MemoryCache) IsExist(name string) bool { + bc.RLock() + defer bc.RUnlock() + if v, ok := bc.items[name]; ok { + return !v.isExpire() + } + return false +} + +// ClearAll will delete all cache in memory. +func (bc *MemoryCache) ClearAll() error { + bc.Lock() + defer bc.Unlock() + bc.items = make(map[string]*MemoryItem) + return nil +} + +// StartAndGC start memory cache. it will check expiration in every clock time. +func (bc *MemoryCache) StartAndGC(config string) error { + var cf map[string]int + json.Unmarshal([]byte(config), &cf) + if _, ok := cf["interval"]; !ok { + cf = make(map[string]int) + cf["interval"] = DefaultEvery + } + dur := time.Duration(cf["interval"]) * time.Second + bc.Every = cf["interval"] + bc.dur = dur + go bc.vaccuum() + return nil +} + +// check expiration. +func (bc *MemoryCache) vaccuum() { + if bc.Every < 1 { + return + } + for { + <-time.After(bc.dur) + if bc.items == nil { + return + } + for name := range bc.items { + bc.itemExpired(name) + } + } +} + +// itemExpired returns true if an item is expired. +func (bc *MemoryCache) itemExpired(name string) bool { + bc.Lock() + defer bc.Unlock() + + itm, ok := bc.items[name] + if !ok { + return true + } + if itm.isExpire() { + delete(bc.items, name) + return true + } + return false +} + +func init() { + Register("memory", NewMemoryCache) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/cache/redis/redis.go b/Godeps/_workspace/src/github.com/astaxie/beego/cache/redis/redis.go new file mode 100644 index 000000000..781e38366 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/cache/redis/redis.go @@ -0,0 +1,240 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package redis for cache provider +// +// depend on github.com/garyburd/redigo/redis +// +// go install github.com/garyburd/redigo/redis +// +// Usage: +// import( +// _ "github.com/astaxie/beego/cache/redis" +// "github.com/astaxie/beego/cache" +// ) +// +// bm, err := cache.NewCache("redis", `{"conn":"127.0.0.1:11211"}`) +// +// more docs http://beego.me/docs/module/cache.md +package redis + +import ( + "encoding/json" + "errors" + "strconv" + "time" + + "github.com/garyburd/redigo/redis" + + "github.com/astaxie/beego/cache" +) + +var ( + // DefaultKey the collection name of redis for cache adapter. + DefaultKey = "beecacheRedis" +) + +// Cache is Redis cache adapter. +type Cache struct { + p *redis.Pool // redis connection pool + conninfo string + dbNum int + key string + password string +} + +// NewRedisCache create new redis cache with default collection name. +func NewRedisCache() cache.Cache { + return &Cache{key: DefaultKey} +} + +// actually do the redis cmds +func (rc *Cache) do(commandName string, args ...interface{}) (reply interface{}, err error) { + c := rc.p.Get() + defer c.Close() + + return c.Do(commandName, args...) +} + +// Get cache from redis. +func (rc *Cache) Get(key string) interface{} { + if v, err := rc.do("GET", key); err == nil { + return v + } + return nil +} + +// GetMulti get cache from redis. +func (rc *Cache) GetMulti(keys []string) []interface{} { + size := len(keys) + var rv []interface{} + c := rc.p.Get() + defer c.Close() + var err error + for _, key := range keys { + err = c.Send("GET", key) + if err != nil { + goto ERROR + } + } + if err = c.Flush(); err != nil { + goto ERROR + } + for i := 0; i < size; i++ { + if v, err := c.Receive(); err == nil { + rv = append(rv, v.([]byte)) + } else { + rv = append(rv, err) + } + } + return rv +ERROR: + rv = rv[0:0] + for i := 0; i < size; i++ { + rv = append(rv, nil) + } + + return rv +} + +// Put put cache to redis. +func (rc *Cache) Put(key string, val interface{}, timeout time.Duration) error { + var err error + if _, err = rc.do("SETEX", key, int64(timeout/time.Second), val); err != nil { + return err + } + + if _, err = rc.do("HSET", rc.key, key, true); err != nil { + return err + } + return err +} + +// Delete delete cache in redis. +func (rc *Cache) Delete(key string) error { + var err error + if _, err = rc.do("DEL", key); err != nil { + return err + } + _, err = rc.do("HDEL", rc.key, key) + return err +} + +// IsExist check cache's existence in redis. +func (rc *Cache) IsExist(key string) bool { + v, err := redis.Bool(rc.do("EXISTS", key)) + if err != nil { + return false + } + if v == false { + if _, err = rc.do("HDEL", rc.key, key); err != nil { + return false + } + } + return v +} + +// Incr increase counter in redis. +func (rc *Cache) Incr(key string) error { + _, err := redis.Bool(rc.do("INCRBY", key, 1)) + return err +} + +// Decr decrease counter in redis. +func (rc *Cache) Decr(key string) error { + _, err := redis.Bool(rc.do("INCRBY", key, -1)) + return err +} + +// ClearAll clean all cache in redis. delete this redis collection. +func (rc *Cache) ClearAll() error { + cachedKeys, err := redis.Strings(rc.do("HKEYS", rc.key)) + if err != nil { + return err + } + for _, str := range cachedKeys { + if _, err = rc.do("DEL", str); err != nil { + return err + } + } + _, err = rc.do("DEL", rc.key) + return err +} + +// StartAndGC start redis cache adapter. +// config is like {"key":"collection key","conn":"connection info","dbNum":"0"} +// the cache item in redis are stored forever, +// so no gc operation. +func (rc *Cache) StartAndGC(config string) error { + var cf map[string]string + json.Unmarshal([]byte(config), &cf) + + if _, ok := cf["key"]; !ok { + cf["key"] = DefaultKey + } + if _, ok := cf["conn"]; !ok { + return errors.New("config has no conn key") + } + if _, ok := cf["dbNum"]; !ok { + cf["dbNum"] = "0" + } + if _, ok := cf["password"]; !ok { + cf["password"] = "" + } + rc.key = cf["key"] + rc.conninfo = cf["conn"] + rc.dbNum, _ = strconv.Atoi(cf["dbNum"]) + rc.password = cf["password"] + + rc.connectInit() + + c := rc.p.Get() + defer c.Close() + + return c.Err() +} + +// connect to redis. +func (rc *Cache) connectInit() { + dialFunc := func() (c redis.Conn, err error) { + c, err = redis.Dial("tcp", rc.conninfo) + if err != nil { + return nil, err + } + + if rc.password != "" { + if _, err := c.Do("AUTH", rc.password); err != nil { + c.Close() + return nil, err + } + } + + _, selecterr := c.Do("SELECT", rc.dbNum) + if selecterr != nil { + c.Close() + return nil, selecterr + } + return + } + // initialize a new pool + rc.p = &redis.Pool{ + MaxIdle: 3, + IdleTimeout: 180 * time.Second, + Dial: dialFunc, + } +} + +func init() { + cache.Register("redis", NewRedisCache) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config.go b/Godeps/_workspace/src/github.com/astaxie/beego/config.go new file mode 100644 index 000000000..8cf215309 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config.go @@ -0,0 +1,392 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "html/template" + "os" + "path/filepath" + "strings" + + "github.com/astaxie/beego/config" + "github.com/astaxie/beego/session" + "github.com/astaxie/beego/utils" +) + +// Config is the main struct for BConfig +type Config struct { + AppName string //Application name + RunMode string //Running Mode: dev | prod + RouterCaseSensitive bool + ServerName string + RecoverPanic bool + CopyRequestBody bool + EnableGzip bool + MaxMemory int64 + EnableErrorsShow bool + Listen Listen + WebConfig WebConfig + Log LogConfig +} + +// Listen holds for http and https related config +type Listen struct { + Graceful bool // Graceful means use graceful module to start the server + ServerTimeOut int64 + ListenTCP4 bool + EnableHTTP bool + HTTPAddr string + HTTPPort int + EnableHTTPS bool + HTTPSAddr string + HTTPSPort int + HTTPSCertFile string + HTTPSKeyFile string + EnableAdmin bool + AdminAddr string + AdminPort int + EnableFcgi bool + EnableStdIo bool // EnableStdIo works with EnableFcgi Use FCGI via standard I/O +} + +// WebConfig holds web related config +type WebConfig struct { + AutoRender bool + EnableDocs bool + FlashName string + FlashSeparator string + DirectoryIndex bool + StaticDir map[string]string + StaticExtensionsToGzip []string + TemplateLeft string + TemplateRight string + ViewsPath string + EnableXSRF bool + XSRFKey string + XSRFExpire int + Session SessionConfig +} + +// SessionConfig holds session related config +type SessionConfig struct { + SessionOn bool + SessionProvider string + SessionName string + SessionGCMaxLifetime int64 + SessionProviderConfig string + SessionCookieLifeTime int + SessionAutoSetCookie bool + SessionDomain string +} + +// LogConfig holds Log related config +type LogConfig struct { + AccessLogs bool + FileLineNum bool + Outputs map[string]string // Store Adaptor : config +} + +var ( + // BConfig is the default config for Application + BConfig *Config + // AppConfig is the instance of Config, store the config information from file + AppConfig *beegoAppConfig + // AppConfigPath is the path to the config files + AppConfigPath string + // AppConfigProvider is the provider for the config, default is ini + AppConfigProvider = "ini" + // TemplateCache stores template caching + TemplateCache map[string]*template.Template + // GlobalSessions is the instance for the session manager + GlobalSessions *session.Manager +) + +func init() { + BConfig = &Config{ + AppName: "beego", + RunMode: DEV, + RouterCaseSensitive: true, + ServerName: "beegoServer:" + VERSION, + RecoverPanic: true, + CopyRequestBody: false, + EnableGzip: false, + MaxMemory: 1 << 26, //64MB + EnableErrorsShow: true, + Listen: Listen{ + Graceful: false, + ServerTimeOut: 0, + ListenTCP4: false, + EnableHTTP: true, + HTTPAddr: "", + HTTPPort: 8080, + EnableHTTPS: false, + HTTPSAddr: "", + HTTPSPort: 10443, + HTTPSCertFile: "", + HTTPSKeyFile: "", + EnableAdmin: false, + AdminAddr: "", + AdminPort: 8088, + EnableFcgi: false, + EnableStdIo: false, + }, + WebConfig: WebConfig{ + AutoRender: true, + EnableDocs: false, + FlashName: "BEEGO_FLASH", + FlashSeparator: "BEEGOFLASH", + DirectoryIndex: false, + StaticDir: map[string]string{"/static": "static"}, + StaticExtensionsToGzip: []string{".css", ".js"}, + TemplateLeft: "{{", + TemplateRight: "}}", + ViewsPath: "views", + EnableXSRF: false, + XSRFKey: "beegoxsrf", + XSRFExpire: 0, + Session: SessionConfig{ + SessionOn: false, + SessionProvider: "memory", + SessionName: "beegosessionID", + SessionGCMaxLifetime: 3600, + SessionProviderConfig: "", + SessionCookieLifeTime: 0, //set cookie default is the brower life + SessionAutoSetCookie: true, + SessionDomain: "", + }, + }, + Log: LogConfig{ + AccessLogs: false, + FileLineNum: true, + Outputs: map[string]string{"console": ""}, + }, + } + ParseConfig() +} + +// ParseConfig parsed default config file. +// now only support ini, next will support json. +func ParseConfig() (err error) { + if AppConfigPath == "" { + if utils.FileExists(filepath.Join("conf", "app.conf")) { + AppConfigPath = filepath.Join("conf", "app.conf") + } else { + AppConfig = &beegoAppConfig{config.NewFakeConfig()} + return + } + } + AppConfig, err = newAppConfig(AppConfigProvider, AppConfigPath) + if err != nil { + return err + } + // set the runmode first + if envRunMode := os.Getenv("BEEGO_RUNMODE"); envRunMode != "" { + BConfig.RunMode = envRunMode + } else if runmode := AppConfig.String("RunMode"); runmode != "" { + BConfig.RunMode = runmode + } + + BConfig.AppName = AppConfig.DefaultString("AppName", BConfig.AppName) + BConfig.RecoverPanic = AppConfig.DefaultBool("RecoverPanic", BConfig.RecoverPanic) + BConfig.RouterCaseSensitive = AppConfig.DefaultBool("RouterCaseSensitive", BConfig.RouterCaseSensitive) + BConfig.ServerName = AppConfig.DefaultString("ServerName", BConfig.ServerName) + BConfig.EnableGzip = AppConfig.DefaultBool("EnableGzip", BConfig.EnableGzip) + BConfig.EnableErrorsShow = AppConfig.DefaultBool("EnableErrorsShow", BConfig.EnableErrorsShow) + BConfig.CopyRequestBody = AppConfig.DefaultBool("CopyRequestBody", BConfig.CopyRequestBody) + BConfig.MaxMemory = AppConfig.DefaultInt64("MaxMemory", BConfig.MaxMemory) + BConfig.Listen.Graceful = AppConfig.DefaultBool("Graceful", BConfig.Listen.Graceful) + BConfig.Listen.HTTPAddr = AppConfig.String("HTTPAddr") + BConfig.Listen.HTTPPort = AppConfig.DefaultInt("HTTPPort", BConfig.Listen.HTTPPort) + BConfig.Listen.ListenTCP4 = AppConfig.DefaultBool("ListenTCP4", BConfig.Listen.ListenTCP4) + BConfig.Listen.EnableHTTP = AppConfig.DefaultBool("EnableHTTP", BConfig.Listen.EnableHTTP) + BConfig.Listen.EnableHTTPS = AppConfig.DefaultBool("EnableHTTPS", BConfig.Listen.EnableHTTPS) + BConfig.Listen.HTTPSAddr = AppConfig.DefaultString("HTTPSAddr", BConfig.Listen.HTTPSAddr) + BConfig.Listen.HTTPSPort = AppConfig.DefaultInt("HTTPSPort", BConfig.Listen.HTTPSPort) + BConfig.Listen.HTTPSCertFile = AppConfig.DefaultString("HTTPSCertFile", BConfig.Listen.HTTPSCertFile) + BConfig.Listen.HTTPSKeyFile = AppConfig.DefaultString("HTTPSKeyFile", BConfig.Listen.HTTPSKeyFile) + BConfig.Listen.EnableAdmin = AppConfig.DefaultBool("EnableAdmin", BConfig.Listen.EnableAdmin) + BConfig.Listen.AdminAddr = AppConfig.DefaultString("AdminAddr", BConfig.Listen.AdminAddr) + BConfig.Listen.AdminPort = AppConfig.DefaultInt("AdminPort", BConfig.Listen.AdminPort) + BConfig.Listen.EnableFcgi = AppConfig.DefaultBool("EnableFcgi", BConfig.Listen.EnableFcgi) + BConfig.Listen.EnableStdIo = AppConfig.DefaultBool("EnableStdIo", BConfig.Listen.EnableStdIo) + BConfig.Listen.ServerTimeOut = AppConfig.DefaultInt64("ServerTimeOut", BConfig.Listen.ServerTimeOut) + BConfig.WebConfig.AutoRender = AppConfig.DefaultBool("AutoRender", BConfig.WebConfig.AutoRender) + BConfig.WebConfig.ViewsPath = AppConfig.DefaultString("ViewsPath", BConfig.WebConfig.ViewsPath) + BConfig.WebConfig.DirectoryIndex = AppConfig.DefaultBool("DirectoryIndex", BConfig.WebConfig.DirectoryIndex) + BConfig.WebConfig.FlashName = AppConfig.DefaultString("FlashName", BConfig.WebConfig.FlashName) + BConfig.WebConfig.FlashSeparator = AppConfig.DefaultString("FlashSeparator", BConfig.WebConfig.FlashSeparator) + BConfig.WebConfig.EnableDocs = AppConfig.DefaultBool("EnableDocs", BConfig.WebConfig.EnableDocs) + BConfig.WebConfig.XSRFKey = AppConfig.DefaultString("XSRFKEY", BConfig.WebConfig.XSRFKey) + BConfig.WebConfig.EnableXSRF = AppConfig.DefaultBool("EnableXSRF", BConfig.WebConfig.EnableXSRF) + BConfig.WebConfig.XSRFExpire = AppConfig.DefaultInt("XSRFExpire", BConfig.WebConfig.XSRFExpire) + BConfig.WebConfig.TemplateLeft = AppConfig.DefaultString("TemplateLeft", BConfig.WebConfig.TemplateLeft) + BConfig.WebConfig.TemplateRight = AppConfig.DefaultString("TemplateRight", BConfig.WebConfig.TemplateRight) + BConfig.WebConfig.Session.SessionOn = AppConfig.DefaultBool("SessionOn", BConfig.WebConfig.Session.SessionOn) + BConfig.WebConfig.Session.SessionProvider = AppConfig.DefaultString("SessionProvider", BConfig.WebConfig.Session.SessionProvider) + BConfig.WebConfig.Session.SessionName = AppConfig.DefaultString("SessionName", BConfig.WebConfig.Session.SessionName) + BConfig.WebConfig.Session.SessionProviderConfig = AppConfig.DefaultString("SessionProviderConfig", BConfig.WebConfig.Session.SessionProviderConfig) + BConfig.WebConfig.Session.SessionGCMaxLifetime = AppConfig.DefaultInt64("SessionGCMaxLifetime", BConfig.WebConfig.Session.SessionGCMaxLifetime) + BConfig.WebConfig.Session.SessionCookieLifeTime = AppConfig.DefaultInt("SessionCookieLifeTime", BConfig.WebConfig.Session.SessionCookieLifeTime) + BConfig.WebConfig.Session.SessionAutoSetCookie = AppConfig.DefaultBool("SessionAutoSetCookie", BConfig.WebConfig.Session.SessionAutoSetCookie) + BConfig.WebConfig.Session.SessionDomain = AppConfig.DefaultString("SessionDomain", BConfig.WebConfig.Session.SessionDomain) + + if sd := AppConfig.String("StaticDir"); sd != "" { + for k := range BConfig.WebConfig.StaticDir { + delete(BConfig.WebConfig.StaticDir, k) + } + sds := strings.Fields(sd) + for _, v := range sds { + if url2fsmap := strings.SplitN(v, ":", 2); len(url2fsmap) == 2 { + BConfig.WebConfig.StaticDir["/"+strings.TrimRight(url2fsmap[0], "/")] = url2fsmap[1] + } else { + BConfig.WebConfig.StaticDir["/"+strings.TrimRight(url2fsmap[0], "/")] = url2fsmap[0] + } + } + } + + if sgz := AppConfig.String("StaticExtensionsToGzip"); sgz != "" { + extensions := strings.Split(sgz, ",") + fileExts := []string{} + for _, ext := range extensions { + ext = strings.TrimSpace(ext) + if ext == "" { + continue + } + if !strings.HasPrefix(ext, ".") { + ext = "." + ext + } + fileExts = append(fileExts, ext) + } + if len(fileExts) > 0 { + BConfig.WebConfig.StaticExtensionsToGzip = fileExts + } + } + return nil +} + +type beegoAppConfig struct { + innerConfig config.Configer +} + +func newAppConfig(AppConfigProvider, AppConfigPath string) (*beegoAppConfig, error) { + ac, err := config.NewConfig(AppConfigProvider, AppConfigPath) + if err != nil { + return nil, err + } + return &beegoAppConfig{ac}, nil +} + +func (b *beegoAppConfig) Set(key, val string) error { + if err := b.innerConfig.Set(BConfig.RunMode+"::"+key, val); err != nil { + return err + } + return b.innerConfig.Set(key, val) +} + +func (b *beegoAppConfig) String(key string) string { + if v := b.innerConfig.String(BConfig.RunMode + "::" + key); v != "" { + return v + } + return b.innerConfig.String(key) +} + +func (b *beegoAppConfig) Strings(key string) []string { + if v := b.innerConfig.Strings(BConfig.RunMode + "::" + key); v[0] != "" { + return v + } + return b.innerConfig.Strings(key) +} + +func (b *beegoAppConfig) Int(key string) (int, error) { + if v, err := b.innerConfig.Int(BConfig.RunMode + "::" + key); err == nil { + return v, nil + } + return b.innerConfig.Int(key) +} + +func (b *beegoAppConfig) Int64(key string) (int64, error) { + if v, err := b.innerConfig.Int64(BConfig.RunMode + "::" + key); err == nil { + return v, nil + } + return b.innerConfig.Int64(key) +} + +func (b *beegoAppConfig) Bool(key string) (bool, error) { + if v, err := b.innerConfig.Bool(BConfig.RunMode + "::" + key); err == nil { + return v, nil + } + return b.innerConfig.Bool(key) +} + +func (b *beegoAppConfig) Float(key string) (float64, error) { + if v, err := b.innerConfig.Float(BConfig.RunMode + "::" + key); err == nil { + return v, nil + } + return b.innerConfig.Float(key) +} + +func (b *beegoAppConfig) DefaultString(key string, defaultval string) string { + if v := b.String(key); v != "" { + return v + } + return defaultval +} + +func (b *beegoAppConfig) DefaultStrings(key string, defaultval []string) []string { + if v := b.Strings(key); len(v) != 0 { + return v + } + return defaultval +} + +func (b *beegoAppConfig) DefaultInt(key string, defaultval int) int { + if v, err := b.Int(key); err == nil { + return v + } + return defaultval +} + +func (b *beegoAppConfig) DefaultInt64(key string, defaultval int64) int64 { + if v, err := b.Int64(key); err == nil { + return v + } + return defaultval +} + +func (b *beegoAppConfig) DefaultBool(key string, defaultval bool) bool { + if v, err := b.Bool(key); err == nil { + return v + } + return defaultval +} + +func (b *beegoAppConfig) DefaultFloat(key string, defaultval float64) float64 { + if v, err := b.Float(key); err == nil { + return v + } + return defaultval +} + +func (b *beegoAppConfig) DIY(key string) (interface{}, error) { + return b.innerConfig.DIY(key) +} + +func (b *beegoAppConfig) GetSection(section string) (map[string]string, error) { + return b.innerConfig.GetSection(section) +} + +func (b *beegoAppConfig) SaveConfigFile(filename string) error { + return b.innerConfig.SaveConfigFile(filename) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config/config.go b/Godeps/_workspace/src/github.com/astaxie/beego/config/config.go new file mode 100644 index 000000000..da5d358bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config/config.go @@ -0,0 +1,108 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package config is used to parse config +// Usage: +// import( +// "github.com/astaxie/beego/config" +// ) +// +// cnf, err := config.NewConfig("ini", "config.conf") +// +// cnf APIS: +// +// cnf.Set(key, val string) error +// cnf.String(key string) string +// cnf.Strings(key string) []string +// cnf.Int(key string) (int, error) +// cnf.Int64(key string) (int64, error) +// cnf.Bool(key string) (bool, error) +// cnf.Float(key string) (float64, error) +// cnf.DefaultString(key string, defaultVal string) string +// cnf.DefaultStrings(key string, defaultVal []string) []string +// cnf.DefaultInt(key string, defaultVal int) int +// cnf.DefaultInt64(key string, defaultVal int64) int64 +// cnf.DefaultBool(key string, defaultVal bool) bool +// cnf.DefaultFloat(key string, defaultVal float64) float64 +// cnf.DIY(key string) (interface{}, error) +// cnf.GetSection(section string) (map[string]string, error) +// cnf.SaveConfigFile(filename string) error +// +// more docs http://beego.me/docs/module/config.md +package config + +import ( + "fmt" +) + +// Configer defines how to get and set value from configuration raw data. +type Configer interface { + Set(key, val string) error //support section::key type in given key when using ini type. + String(key string) string //support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same. + Strings(key string) []string //get string slice + Int(key string) (int, error) + Int64(key string) (int64, error) + Bool(key string) (bool, error) + Float(key string) (float64, error) + DefaultString(key string, defaultVal string) string // support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same. + DefaultStrings(key string, defaultVal []string) []string //get string slice + DefaultInt(key string, defaultVal int) int + DefaultInt64(key string, defaultVal int64) int64 + DefaultBool(key string, defaultVal bool) bool + DefaultFloat(key string, defaultVal float64) float64 + DIY(key string) (interface{}, error) + GetSection(section string) (map[string]string, error) + SaveConfigFile(filename string) error +} + +// Config is the adapter interface for parsing config file to get raw data to Configer. +type Config interface { + Parse(key string) (Configer, error) + ParseData(data []byte) (Configer, error) +} + +var adapters = make(map[string]Config) + +// Register makes a config adapter available by the adapter name. +// If Register is called twice with the same name or if driver is nil, +// it panics. +func Register(name string, adapter Config) { + if adapter == nil { + panic("config: Register adapter is nil") + } + if _, ok := adapters[name]; ok { + panic("config: Register called twice for adapter " + name) + } + adapters[name] = adapter +} + +// NewConfig adapterName is ini/json/xml/yaml. +// filename is the config file path. +func NewConfig(adapterName, filename string) (Configer, error) { + adapter, ok := adapters[adapterName] + if !ok { + return nil, fmt.Errorf("config: unknown adaptername %q (forgotten import?)", adapterName) + } + return adapter.Parse(filename) +} + +// NewConfigData adapterName is ini/json/xml/yaml. +// data is the config data. +func NewConfigData(adapterName string, data []byte) (Configer, error) { + adapter, ok := adapters[adapterName] + if !ok { + return nil, fmt.Errorf("config: unknown adaptername %q (forgotten import?)", adapterName) + } + return adapter.ParseData(data) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config/fake.go b/Godeps/_workspace/src/github.com/astaxie/beego/config/fake.go new file mode 100644 index 000000000..50aa5d4a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config/fake.go @@ -0,0 +1,130 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "errors" + "strconv" + "strings" +) + +type fakeConfigContainer struct { + data map[string]string +} + +func (c *fakeConfigContainer) getData(key string) string { + return c.data[strings.ToLower(key)] +} + +func (c *fakeConfigContainer) Set(key, val string) error { + c.data[strings.ToLower(key)] = val + return nil +} + +func (c *fakeConfigContainer) String(key string) string { + return c.getData(key) +} + +func (c *fakeConfigContainer) DefaultString(key string, defaultval string) string { + v := c.getData(key) + if v == "" { + return defaultval + } + return v +} + +func (c *fakeConfigContainer) Strings(key string) []string { + return strings.Split(c.getData(key), ";") +} + +func (c *fakeConfigContainer) DefaultStrings(key string, defaultval []string) []string { + v := c.Strings(key) + if len(v) == 0 { + return defaultval + } + return v +} + +func (c *fakeConfigContainer) Int(key string) (int, error) { + return strconv.Atoi(c.getData(key)) +} + +func (c *fakeConfigContainer) DefaultInt(key string, defaultval int) int { + v, err := c.Int(key) + if err != nil { + return defaultval + } + return v +} + +func (c *fakeConfigContainer) Int64(key string) (int64, error) { + return strconv.ParseInt(c.getData(key), 10, 64) +} + +func (c *fakeConfigContainer) DefaultInt64(key string, defaultval int64) int64 { + v, err := c.Int64(key) + if err != nil { + return defaultval + } + return v +} + +func (c *fakeConfigContainer) Bool(key string) (bool, error) { + return strconv.ParseBool(c.getData(key)) +} + +func (c *fakeConfigContainer) DefaultBool(key string, defaultval bool) bool { + v, err := c.Bool(key) + if err != nil { + return defaultval + } + return v +} + +func (c *fakeConfigContainer) Float(key string) (float64, error) { + return strconv.ParseFloat(c.getData(key), 64) +} + +func (c *fakeConfigContainer) DefaultFloat(key string, defaultval float64) float64 { + v, err := c.Float(key) + if err != nil { + return defaultval + } + return v +} + +func (c *fakeConfigContainer) DIY(key string) (interface{}, error) { + if v, ok := c.data[strings.ToLower(key)]; ok { + return v, nil + } + return nil, errors.New("key not find") +} + +func (c *fakeConfigContainer) GetSection(section string) (map[string]string, error) { + return nil, errors.New("not implement in the fakeConfigContainer") +} + +func (c *fakeConfigContainer) SaveConfigFile(filename string) error { + return errors.New("not implement in the fakeConfigContainer") +} + +var _ Configer = new(fakeConfigContainer) + +// NewFakeConfig return a fake Congiger +func NewFakeConfig() Configer { + return &fakeConfigContainer{ + data: make(map[string]string), + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config/ini.go b/Godeps/_workspace/src/github.com/astaxie/beego/config/ini.go new file mode 100644 index 000000000..59e84e1e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config/ini.go @@ -0,0 +1,437 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + "unicode" +) + +var ( + defaultSection = "default" // default section means if some ini items not in a section, make them in default section, + bNumComment = []byte{'#'} // number signal + bSemComment = []byte{';'} // semicolon signal + bEmpty = []byte{} + bEqual = []byte{'='} // equal signal + bDQuote = []byte{'"'} // quote signal + sectionStart = []byte{'['} // section start signal + sectionEnd = []byte{']'} // section end signal + lineBreak = "\n" +) + +// IniConfig implements Config to parse ini file. +type IniConfig struct { +} + +// Parse creates a new Config and parses the file configuration from the named file. +func (ini *IniConfig) Parse(name string) (Configer, error) { + return ini.parseFile(name) +} + +func (ini *IniConfig) parseFile(name string) (*IniConfigContainer, error) { + file, err := os.Open(name) + if err != nil { + return nil, err + } + + cfg := &IniConfigContainer{ + file.Name(), + make(map[string]map[string]string), + make(map[string]string), + make(map[string]string), + sync.RWMutex{}, + } + cfg.Lock() + defer cfg.Unlock() + defer file.Close() + + var comment bytes.Buffer + buf := bufio.NewReader(file) + // check the BOM + head, err := buf.Peek(3) + if err == nil && head[0] == 239 && head[1] == 187 && head[2] == 191 { + for i := 1; i <= 3; i++ { + buf.ReadByte() + } + } + section := defaultSection + for { + line, _, err := buf.ReadLine() + if err == io.EOF { + break + } + if bytes.Equal(line, bEmpty) { + continue + } + line = bytes.TrimSpace(line) + + var bComment []byte + switch { + case bytes.HasPrefix(line, bNumComment): + bComment = bNumComment + case bytes.HasPrefix(line, bSemComment): + bComment = bSemComment + } + if bComment != nil { + line = bytes.TrimLeft(line, string(bComment)) + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + comment.Write(line) + comment.WriteByte('\n') + continue + } + + if bytes.HasPrefix(line, sectionStart) && bytes.HasSuffix(line, sectionEnd) { + section = strings.ToLower(string(line[1 : len(line)-1])) // section name case insensitive + if comment.Len() > 0 { + cfg.sectionComment[section] = comment.String() + comment.Reset() + } + if _, ok := cfg.data[section]; !ok { + cfg.data[section] = make(map[string]string) + } + continue + } + + if _, ok := cfg.data[section]; !ok { + cfg.data[section] = make(map[string]string) + } + keyValue := bytes.SplitN(line, bEqual, 2) + + key := string(bytes.TrimSpace(keyValue[0])) // key name case insensitive + key = strings.ToLower(key) + + // handle include "other.conf" + if len(keyValue) == 1 && strings.HasPrefix(key, "include") { + includefiles := strings.Fields(key) + if includefiles[0] == "include" && len(includefiles) == 2 { + otherfile := strings.Trim(includefiles[1], "\"") + if !path.IsAbs(otherfile) { + otherfile = path.Join(path.Dir(name), otherfile) + } + i, err := ini.parseFile(otherfile) + if err != nil { + return nil, err + } + for sec, dt := range i.data { + if _, ok := cfg.data[sec]; !ok { + cfg.data[sec] = make(map[string]string) + } + for k, v := range dt { + cfg.data[sec][k] = v + } + } + for sec, comm := range i.sectionComment { + cfg.sectionComment[sec] = comm + } + for k, comm := range i.keyComment { + cfg.keyComment[k] = comm + } + continue + } + } + + if len(keyValue) != 2 { + return nil, errors.New("read the content error: \"" + string(line) + "\", should key = val") + } + val := bytes.TrimSpace(keyValue[1]) + if bytes.HasPrefix(val, bDQuote) { + val = bytes.Trim(val, `"`) + } + + cfg.data[section][key] = string(val) + if comment.Len() > 0 { + cfg.keyComment[section+"."+key] = comment.String() + comment.Reset() + } + + } + return cfg, nil +} + +// ParseData parse ini the data +func (ini *IniConfig) ParseData(data []byte) (Configer, error) { + // Save memory data to temporary file + tmpName := path.Join(os.TempDir(), "beego", fmt.Sprintf("%d", time.Now().Nanosecond())) + os.MkdirAll(path.Dir(tmpName), os.ModePerm) + if err := ioutil.WriteFile(tmpName, data, 0655); err != nil { + return nil, err + } + return ini.Parse(tmpName) +} + +// IniConfigContainer A Config represents the ini configuration. +// When set and get value, support key as section:name type. +type IniConfigContainer struct { + filename string + data map[string]map[string]string // section=> key:val + sectionComment map[string]string // section : comment + keyComment map[string]string // id: []{comment, key...}; id 1 is for main comment. + sync.RWMutex +} + +// Bool returns the boolean value for a given key. +func (c *IniConfigContainer) Bool(key string) (bool, error) { + return strconv.ParseBool(c.getdata(key)) +} + +// DefaultBool returns the boolean value for a given key. +// if err != nil return defaltval +func (c *IniConfigContainer) DefaultBool(key string, defaultval bool) bool { + v, err := c.Bool(key) + if err != nil { + return defaultval + } + return v +} + +// Int returns the integer value for a given key. +func (c *IniConfigContainer) Int(key string) (int, error) { + return strconv.Atoi(c.getdata(key)) +} + +// DefaultInt returns the integer value for a given key. +// if err != nil return defaltval +func (c *IniConfigContainer) DefaultInt(key string, defaultval int) int { + v, err := c.Int(key) + if err != nil { + return defaultval + } + return v +} + +// Int64 returns the int64 value for a given key. +func (c *IniConfigContainer) Int64(key string) (int64, error) { + return strconv.ParseInt(c.getdata(key), 10, 64) +} + +// DefaultInt64 returns the int64 value for a given key. +// if err != nil return defaltval +func (c *IniConfigContainer) DefaultInt64(key string, defaultval int64) int64 { + v, err := c.Int64(key) + if err != nil { + return defaultval + } + return v +} + +// Float returns the float value for a given key. +func (c *IniConfigContainer) Float(key string) (float64, error) { + return strconv.ParseFloat(c.getdata(key), 64) +} + +// DefaultFloat returns the float64 value for a given key. +// if err != nil return defaltval +func (c *IniConfigContainer) DefaultFloat(key string, defaultval float64) float64 { + v, err := c.Float(key) + if err != nil { + return defaultval + } + return v +} + +// String returns the string value for a given key. +func (c *IniConfigContainer) String(key string) string { + return c.getdata(key) +} + +// DefaultString returns the string value for a given key. +// if err != nil return defaltval +func (c *IniConfigContainer) DefaultString(key string, defaultval string) string { + v := c.String(key) + if v == "" { + return defaultval + } + return v +} + +// Strings returns the []string value for a given key. +func (c *IniConfigContainer) Strings(key string) []string { + return strings.Split(c.String(key), ";") +} + +// DefaultStrings returns the []string value for a given key. +// if err != nil return defaltval +func (c *IniConfigContainer) DefaultStrings(key string, defaultval []string) []string { + v := c.Strings(key) + if len(v) == 0 { + return defaultval + } + return v +} + +// GetSection returns map for the given section +func (c *IniConfigContainer) GetSection(section string) (map[string]string, error) { + if v, ok := c.data[section]; ok { + return v, nil + } + return nil, errors.New("not exist setction") +} + +// SaveConfigFile save the config into file +func (c *IniConfigContainer) SaveConfigFile(filename string) (err error) { + // Write configuration file by filename. + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + buf := bytes.NewBuffer(nil) + // Save default section at first place + if dt, ok := c.data[defaultSection]; ok { + for key, val := range dt { + if key != " " { + // Write key comments. + if v, ok := c.keyComment[key]; ok { + if _, err = buf.WriteString(string(bNumComment) + v + lineBreak); err != nil { + return err + } + } + + // Write key and value. + if _, err = buf.WriteString(key + string(bEqual) + val + lineBreak); err != nil { + return err + } + } + } + + // Put a line between sections. + if _, err = buf.WriteString(lineBreak); err != nil { + return err + } + } + // Save named sections + for section, dt := range c.data { + if section != defaultSection { + // Write section comments. + if v, ok := c.sectionComment[section]; ok { + if _, err = buf.WriteString(string(bNumComment) + v + lineBreak); err != nil { + return err + } + } + + // Write section name. + if _, err = buf.WriteString(string(sectionStart) + section + string(sectionEnd) + lineBreak); err != nil { + return err + } + + for key, val := range dt { + if key != " " { + // Write key comments. + if v, ok := c.keyComment[key]; ok { + if _, err = buf.WriteString(string(bNumComment) + v + lineBreak); err != nil { + return err + } + } + + // Write key and value. + if _, err = buf.WriteString(key + string(bEqual) + val + lineBreak); err != nil { + return err + } + } + } + + // Put a line between sections. + if _, err = buf.WriteString(lineBreak); err != nil { + return err + } + } + } + + if _, err = buf.WriteTo(f); err != nil { + return err + } + return nil +} + +// Set writes a new value for key. +// if write to one section, the key need be "section::key". +// if the section is not existed, it panics. +func (c *IniConfigContainer) Set(key, value string) error { + c.Lock() + defer c.Unlock() + if len(key) == 0 { + return errors.New("key is empty") + } + + var ( + section, k string + sectionKey = strings.Split(key, "::") + ) + + if len(sectionKey) >= 2 { + section = sectionKey[0] + k = sectionKey[1] + } else { + section = defaultSection + k = sectionKey[0] + } + + if _, ok := c.data[section]; !ok { + c.data[section] = make(map[string]string) + } + c.data[section][k] = value + return nil +} + +// DIY returns the raw value by a given key. +func (c *IniConfigContainer) DIY(key string) (v interface{}, err error) { + if v, ok := c.data[strings.ToLower(key)]; ok { + return v, nil + } + return v, errors.New("key not find") +} + +// section.key or key +func (c *IniConfigContainer) getdata(key string) string { + if len(key) == 0 { + return "" + } + c.RLock() + defer c.RUnlock() + + var ( + section, k string + sectionKey = strings.Split(strings.ToLower(key), "::") + ) + if len(sectionKey) >= 2 { + section = sectionKey[0] + k = sectionKey[1] + } else { + section = defaultSection + k = sectionKey[0] + } + if v, ok := c.data[section]; ok { + if vv, ok := v[k]; ok { + return vv + } + } + return "" +} + +func init() { + Register("ini", &IniConfig{}) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config/json.go b/Godeps/_workspace/src/github.com/astaxie/beego/config/json.go new file mode 100644 index 000000000..65b4ac48a --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config/json.go @@ -0,0 +1,265 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "encoding/json" + "errors" + "io/ioutil" + "os" + "strings" + "sync" +) + +// JSONConfig is a json config parser and implements Config interface. +type JSONConfig struct { +} + +// Parse returns a ConfigContainer with parsed json config map. +func (js *JSONConfig) Parse(filename string) (Configer, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + content, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + return js.ParseData(content) +} + +// ParseData returns a ConfigContainer with json string +func (js *JSONConfig) ParseData(data []byte) (Configer, error) { + x := &JSONConfigContainer{ + data: make(map[string]interface{}), + } + err := json.Unmarshal(data, &x.data) + if err != nil { + var wrappingArray []interface{} + err2 := json.Unmarshal(data, &wrappingArray) + if err2 != nil { + return nil, err + } + x.data["rootArray"] = wrappingArray + } + return x, nil +} + +// JSONConfigContainer A Config represents the json configuration. +// Only when get value, support key as section:name type. +type JSONConfigContainer struct { + data map[string]interface{} + sync.RWMutex +} + +// Bool returns the boolean value for a given key. +func (c *JSONConfigContainer) Bool(key string) (bool, error) { + val := c.getData(key) + if val != nil { + if v, ok := val.(bool); ok { + return v, nil + } + return false, errors.New("not bool value") + } + return false, errors.New("not exist key:" + key) +} + +// DefaultBool return the bool value if has no error +// otherwise return the defaultval +func (c *JSONConfigContainer) DefaultBool(key string, defaultval bool) bool { + if v, err := c.Bool(key); err == nil { + return v + } + return defaultval +} + +// Int returns the integer value for a given key. +func (c *JSONConfigContainer) Int(key string) (int, error) { + val := c.getData(key) + if val != nil { + if v, ok := val.(float64); ok { + return int(v), nil + } + return 0, errors.New("not int value") + } + return 0, errors.New("not exist key:" + key) +} + +// DefaultInt returns the integer value for a given key. +// if err != nil return defaltval +func (c *JSONConfigContainer) DefaultInt(key string, defaultval int) int { + if v, err := c.Int(key); err == nil { + return v + } + return defaultval +} + +// Int64 returns the int64 value for a given key. +func (c *JSONConfigContainer) Int64(key string) (int64, error) { + val := c.getData(key) + if val != nil { + if v, ok := val.(float64); ok { + return int64(v), nil + } + return 0, errors.New("not int64 value") + } + return 0, errors.New("not exist key:" + key) +} + +// DefaultInt64 returns the int64 value for a given key. +// if err != nil return defaltval +func (c *JSONConfigContainer) DefaultInt64(key string, defaultval int64) int64 { + if v, err := c.Int64(key); err == nil { + return v + } + return defaultval +} + +// Float returns the float value for a given key. +func (c *JSONConfigContainer) Float(key string) (float64, error) { + val := c.getData(key) + if val != nil { + if v, ok := val.(float64); ok { + return v, nil + } + return 0.0, errors.New("not float64 value") + } + return 0.0, errors.New("not exist key:" + key) +} + +// DefaultFloat returns the float64 value for a given key. +// if err != nil return defaltval +func (c *JSONConfigContainer) DefaultFloat(key string, defaultval float64) float64 { + if v, err := c.Float(key); err == nil { + return v + } + return defaultval +} + +// String returns the string value for a given key. +func (c *JSONConfigContainer) String(key string) string { + val := c.getData(key) + if val != nil { + if v, ok := val.(string); ok { + return v + } + } + return "" +} + +// DefaultString returns the string value for a given key. +// if err != nil return defaltval +func (c *JSONConfigContainer) DefaultString(key string, defaultval string) string { + // TODO FIXME should not use "" to replace non existence + if v := c.String(key); v != "" { + return v + } + return defaultval +} + +// Strings returns the []string value for a given key. +func (c *JSONConfigContainer) Strings(key string) []string { + stringVal := c.String(key) + if stringVal == "" { + return []string{} + } + return strings.Split(c.String(key), ";") +} + +// DefaultStrings returns the []string value for a given key. +// if err != nil return defaltval +func (c *JSONConfigContainer) DefaultStrings(key string, defaultval []string) []string { + if v := c.Strings(key); len(v) > 0 { + return v + } + return defaultval +} + +// GetSection returns map for the given section +func (c *JSONConfigContainer) GetSection(section string) (map[string]string, error) { + if v, ok := c.data[section]; ok { + return v.(map[string]string), nil + } + return nil, errors.New("nonexist section " + section) +} + +// SaveConfigFile save the config into file +func (c *JSONConfigContainer) SaveConfigFile(filename string) (err error) { + // Write configuration file by filename. + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + b, err := json.MarshalIndent(c.data, "", " ") + if err != nil { + return err + } + _, err = f.Write(b) + return err +} + +// Set writes a new value for key. +func (c *JSONConfigContainer) Set(key, val string) error { + c.Lock() + defer c.Unlock() + c.data[key] = val + return nil +} + +// DIY returns the raw value by a given key. +func (c *JSONConfigContainer) DIY(key string) (v interface{}, err error) { + val := c.getData(key) + if val != nil { + return val, nil + } + return nil, errors.New("not exist key") +} + +// section.key or key +func (c *JSONConfigContainer) getData(key string) interface{} { + if len(key) == 0 { + return nil + } + + c.RLock() + defer c.RUnlock() + + sectionKeys := strings.Split(key, "::") + if len(sectionKeys) >= 2 { + curValue, ok := c.data[sectionKeys[0]] + if !ok { + return nil + } + for _, key := range sectionKeys[1:] { + if v, ok := curValue.(map[string]interface{}); ok { + if curValue, ok = v[key]; !ok { + return nil + } + } + } + return curValue + } + if v, ok := c.data[key]; ok { + return v + } + return nil +} + +func init() { + Register("json", &JSONConfig{}) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config/xml/xml.go b/Godeps/_workspace/src/github.com/astaxie/beego/config/xml/xml.go new file mode 100644 index 000000000..4d48f7d2c --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config/xml/xml.go @@ -0,0 +1,229 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xml for config provider +// +// depend on github.com/beego/x2j +// +// go install github.com/beego/x2j +// +// Usage: +// import( +// _ "github.com/astaxie/beego/config/xml" +// "github.com/astaxie/beego/config" +// ) +// +// cnf, err := config.NewConfig("xml", "config.xml") +// +// more docs http://beego.me/docs/module/config.md +package xml + +import ( + "encoding/xml" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "github.com/astaxie/beego/config" + "github.com/beego/x2j" +) + +// Config is a xml config parser and implements Config interface. +// xml configurations should be included in tag. +// only support key/value pair as value as each item. +type Config struct{} + +// Parse returns a ConfigContainer with parsed xml config map. +func (xc *Config) Parse(filename string) (config.Configer, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + x := &ConfigContainer{data: make(map[string]interface{})} + content, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + d, err := x2j.DocToMap(string(content)) + if err != nil { + return nil, err + } + + x.data = d["config"].(map[string]interface{}) + return x, nil +} + +// ParseData xml data +func (xc *Config) ParseData(data []byte) (config.Configer, error) { + // Save memory data to temporary file + tmpName := path.Join(os.TempDir(), "beego", fmt.Sprintf("%d", time.Now().Nanosecond())) + os.MkdirAll(path.Dir(tmpName), os.ModePerm) + if err := ioutil.WriteFile(tmpName, data, 0655); err != nil { + return nil, err + } + return xc.Parse(tmpName) +} + +// ConfigContainer A Config represents the xml configuration. +type ConfigContainer struct { + data map[string]interface{} + sync.Mutex +} + +// Bool returns the boolean value for a given key. +func (c *ConfigContainer) Bool(key string) (bool, error) { + return strconv.ParseBool(c.data[key].(string)) +} + +// DefaultBool return the bool value if has no error +// otherwise return the defaultval +func (c *ConfigContainer) DefaultBool(key string, defaultval bool) bool { + v, err := c.Bool(key) + if err != nil { + return defaultval + } + return v +} + +// Int returns the integer value for a given key. +func (c *ConfigContainer) Int(key string) (int, error) { + return strconv.Atoi(c.data[key].(string)) +} + +// DefaultInt returns the integer value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultInt(key string, defaultval int) int { + v, err := c.Int(key) + if err != nil { + return defaultval + } + return v +} + +// Int64 returns the int64 value for a given key. +func (c *ConfigContainer) Int64(key string) (int64, error) { + return strconv.ParseInt(c.data[key].(string), 10, 64) +} + +// DefaultInt64 returns the int64 value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultInt64(key string, defaultval int64) int64 { + v, err := c.Int64(key) + if err != nil { + return defaultval + } + return v + +} + +// Float returns the float value for a given key. +func (c *ConfigContainer) Float(key string) (float64, error) { + return strconv.ParseFloat(c.data[key].(string), 64) +} + +// DefaultFloat returns the float64 value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultFloat(key string, defaultval float64) float64 { + v, err := c.Float(key) + if err != nil { + return defaultval + } + return v +} + +// String returns the string value for a given key. +func (c *ConfigContainer) String(key string) string { + if v, ok := c.data[key].(string); ok { + return v + } + return "" +} + +// DefaultString returns the string value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultString(key string, defaultval string) string { + v := c.String(key) + if v == "" { + return defaultval + } + return v +} + +// Strings returns the []string value for a given key. +func (c *ConfigContainer) Strings(key string) []string { + return strings.Split(c.String(key), ";") +} + +// DefaultStrings returns the []string value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultStrings(key string, defaultval []string) []string { + v := c.Strings(key) + if len(v) == 0 { + return defaultval + } + return v +} + +// GetSection returns map for the given section +func (c *ConfigContainer) GetSection(section string) (map[string]string, error) { + if v, ok := c.data[section]; ok { + return v.(map[string]string), nil + } + return nil, errors.New("not exist setction") +} + +// SaveConfigFile save the config into file +func (c *ConfigContainer) SaveConfigFile(filename string) (err error) { + // Write configuration file by filename. + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + b, err := xml.MarshalIndent(c.data, " ", " ") + if err != nil { + return err + } + _, err = f.Write(b) + return err +} + +// Set writes a new value for key. +func (c *ConfigContainer) Set(key, val string) error { + c.Lock() + defer c.Unlock() + c.data[key] = val + return nil +} + +// DIY returns the raw value by a given key. +func (c *ConfigContainer) DIY(key string) (v interface{}, err error) { + if v, ok := c.data[key]; ok { + return v, nil + } + return nil, errors.New("not exist key") +} + +func init() { + config.Register("xml", &Config{}) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/config/yaml/yaml.go b/Godeps/_workspace/src/github.com/astaxie/beego/config/yaml/yaml.go new file mode 100644 index 000000000..f034d3ba3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/config/yaml/yaml.go @@ -0,0 +1,266 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml for config provider +// +// depend on github.com/beego/goyaml2 +// +// go install github.com/beego/goyaml2 +// +// Usage: +// import( +// _ "github.com/astaxie/beego/config/yaml" +// "github.com/astaxie/beego/config" +// ) +// +// cnf, err := config.NewConfig("yaml", "config.yaml") +// +// more docs http://beego.me/docs/module/config.md +package yaml + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path" + "strings" + "sync" + "time" + + "github.com/astaxie/beego/config" + "github.com/beego/goyaml2" +) + +// Config is a yaml config parser and implements Config interface. +type Config struct{} + +// Parse returns a ConfigContainer with parsed yaml config map. +func (yaml *Config) Parse(filename string) (y config.Configer, err error) { + cnf, err := ReadYmlReader(filename) + if err != nil { + return + } + y = &ConfigContainer{ + data: cnf, + } + return +} + +// ParseData parse yaml data +func (yaml *Config) ParseData(data []byte) (config.Configer, error) { + // Save memory data to temporary file + tmpName := path.Join(os.TempDir(), "beego", fmt.Sprintf("%d", time.Now().Nanosecond())) + os.MkdirAll(path.Dir(tmpName), os.ModePerm) + if err := ioutil.WriteFile(tmpName, data, 0655); err != nil { + return nil, err + } + return yaml.Parse(tmpName) +} + +// ReadYmlReader Read yaml file to map. +// if json like, use json package, unless goyaml2 package. +func ReadYmlReader(path string) (cnf map[string]interface{}, err error) { + f, err := os.Open(path) + if err != nil { + return + } + defer f.Close() + + buf, err := ioutil.ReadAll(f) + if err != nil || len(buf) < 3 { + return + } + + if string(buf[0:1]) == "{" { + log.Println("Look like a Json, try json umarshal") + err = json.Unmarshal(buf, &cnf) + if err == nil { + log.Println("It is Json Map") + return + } + } + + data, err := goyaml2.Read(bytes.NewBuffer(buf)) + if err != nil { + log.Println("Goyaml2 ERR>", string(buf), err) + return + } + + if data == nil { + log.Println("Goyaml2 output nil? Pls report bug\n" + string(buf)) + return + } + cnf, ok := data.(map[string]interface{}) + if !ok { + log.Println("Not a Map? >> ", string(buf), data) + cnf = nil + } + return +} + +// ConfigContainer A Config represents the yaml configuration. +type ConfigContainer struct { + data map[string]interface{} + sync.Mutex +} + +// Bool returns the boolean value for a given key. +func (c *ConfigContainer) Bool(key string) (bool, error) { + if v, ok := c.data[key].(bool); ok { + return v, nil + } + return false, errors.New("not bool value") +} + +// DefaultBool return the bool value if has no error +// otherwise return the defaultval +func (c *ConfigContainer) DefaultBool(key string, defaultval bool) bool { + v, err := c.Bool(key) + if err != nil { + return defaultval + } + return v +} + +// Int returns the integer value for a given key. +func (c *ConfigContainer) Int(key string) (int, error) { + if v, ok := c.data[key].(int64); ok { + return int(v), nil + } + return 0, errors.New("not int value") +} + +// DefaultInt returns the integer value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultInt(key string, defaultval int) int { + v, err := c.Int(key) + if err != nil { + return defaultval + } + return v +} + +// Int64 returns the int64 value for a given key. +func (c *ConfigContainer) Int64(key string) (int64, error) { + if v, ok := c.data[key].(int64); ok { + return v, nil + } + return 0, errors.New("not bool value") +} + +// DefaultInt64 returns the int64 value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultInt64(key string, defaultval int64) int64 { + v, err := c.Int64(key) + if err != nil { + return defaultval + } + return v +} + +// Float returns the float value for a given key. +func (c *ConfigContainer) Float(key string) (float64, error) { + if v, ok := c.data[key].(float64); ok { + return v, nil + } + return 0.0, errors.New("not float64 value") +} + +// DefaultFloat returns the float64 value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultFloat(key string, defaultval float64) float64 { + v, err := c.Float(key) + if err != nil { + return defaultval + } + return v +} + +// String returns the string value for a given key. +func (c *ConfigContainer) String(key string) string { + if v, ok := c.data[key].(string); ok { + return v + } + return "" +} + +// DefaultString returns the string value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultString(key string, defaultval string) string { + v := c.String(key) + if v == "" { + return defaultval + } + return v +} + +// Strings returns the []string value for a given key. +func (c *ConfigContainer) Strings(key string) []string { + return strings.Split(c.String(key), ";") +} + +// DefaultStrings returns the []string value for a given key. +// if err != nil return defaltval +func (c *ConfigContainer) DefaultStrings(key string, defaultval []string) []string { + v := c.Strings(key) + if len(v) == 0 { + return defaultval + } + return v +} + +// GetSection returns map for the given section +func (c *ConfigContainer) GetSection(section string) (map[string]string, error) { + v, ok := c.data[section] + if ok { + return v.(map[string]string), nil + } + return nil, errors.New("not exist setction") +} + +// SaveConfigFile save the config into file +func (c *ConfigContainer) SaveConfigFile(filename string) (err error) { + // Write configuration file by filename. + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + err = goyaml2.Write(f, c.data) + return err +} + +// Set writes a new value for key. +func (c *ConfigContainer) Set(key, val string) error { + c.Lock() + defer c.Unlock() + c.data[key] = val + return nil +} + +// DIY returns the raw value by a given key. +func (c *ConfigContainer) DIY(key string) (v interface{}, err error) { + if v, ok := c.data[key]; ok { + return v, nil + } + return nil, errors.New("not exist key") +} + +func init() { + config.Register("yaml", &Config{}) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/context/acceptencoder.go b/Godeps/_workspace/src/github.com/astaxie/beego/context/acceptencoder.go new file mode 100644 index 000000000..033d9ca8c --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/context/acceptencoder.go @@ -0,0 +1,197 @@ +// Copyright 2015 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package context + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "compress/zlib" + "io" + "net/http" + "os" + "strconv" + "strings" + "sync" +) + +type resetWriter interface { + io.Writer + Reset(w io.Writer) +} + +type nopResetWriter struct { + io.Writer +} + +func (n nopResetWriter) Reset(w io.Writer) { + //do nothing +} + +type acceptEncoder struct { + name string + levelEncode func(int) resetWriter + bestSpeedPool *sync.Pool + bestCompressionPool *sync.Pool +} + +func (ac acceptEncoder) encode(wr io.Writer, level int) resetWriter { + if ac.bestSpeedPool == nil || ac.bestCompressionPool == nil { + return nopResetWriter{wr} + } + var rwr resetWriter + switch level { + case flate.BestSpeed: + rwr = ac.bestSpeedPool.Get().(resetWriter) + case flate.BestCompression: + rwr = ac.bestCompressionPool.Get().(resetWriter) + default: + rwr = ac.levelEncode(level) + } + rwr.Reset(wr) + return rwr +} + +func (ac acceptEncoder) put(wr resetWriter, level int) { + if ac.bestSpeedPool == nil || ac.bestCompressionPool == nil { + return + } + wr.Reset(nil) + switch level { + case flate.BestSpeed: + ac.bestSpeedPool.Put(wr) + case flate.BestCompression: + ac.bestCompressionPool.Put(wr) + } +} + +var ( + noneCompressEncoder = acceptEncoder{"", nil, nil, nil} + gzipCompressEncoder = acceptEncoder{"gzip", + func(level int) resetWriter { wr, _ := gzip.NewWriterLevel(nil, level); return wr }, + &sync.Pool{ + New: func() interface{} { wr, _ := gzip.NewWriterLevel(nil, flate.BestSpeed); return wr }, + }, + &sync.Pool{ + New: func() interface{} { wr, _ := gzip.NewWriterLevel(nil, flate.BestCompression); return wr }, + }, + } + + //according to the sec :http://tools.ietf.org/html/rfc2616#section-3.5 ,the deflate compress in http is zlib indeed + //deflate + //The "zlib" format defined in RFC 1950 [31] in combination with + //the "deflate" compression mechanism described in RFC 1951 [29]. + deflateCompressEncoder = acceptEncoder{"deflate", + func(level int) resetWriter { wr, _ := zlib.NewWriterLevel(nil, level); return wr }, + &sync.Pool{ + New: func() interface{} { wr, _ := zlib.NewWriterLevel(nil, flate.BestSpeed); return wr }, + }, + &sync.Pool{ + New: func() interface{} { wr, _ := zlib.NewWriterLevel(nil, flate.BestCompression); return wr }, + }, + } +) + +var ( + encoderMap = map[string]acceptEncoder{ // all the other compress methods will ignore + "gzip": gzipCompressEncoder, + "deflate": deflateCompressEncoder, + "*": gzipCompressEncoder, // * means any compress will accept,we prefer gzip + "identity": noneCompressEncoder, // identity means none-compress + } +) + +// WriteFile reads from file and writes to writer by the specific encoding(gzip/deflate) +func WriteFile(encoding string, writer io.Writer, file *os.File) (bool, string, error) { + return writeLevel(encoding, writer, file, flate.BestCompression) +} + +// WriteBody reads writes content to writer by the specific encoding(gzip/deflate) +func WriteBody(encoding string, writer io.Writer, content []byte) (bool, string, error) { + return writeLevel(encoding, writer, bytes.NewReader(content), flate.BestSpeed) +} + +// writeLevel reads from reader,writes to writer by specific encoding and compress level +// the compress level is defined by deflate package +func writeLevel(encoding string, writer io.Writer, reader io.Reader, level int) (bool, string, error) { + var outputWriter resetWriter + var err error + var ce = noneCompressEncoder + + if cf, ok := encoderMap[encoding]; ok { + ce = cf + } + encoding = ce.name + outputWriter = ce.encode(writer, level) + defer ce.put(outputWriter, level) + + _, err = io.Copy(outputWriter, reader) + if err != nil { + return false, "", err + } + + switch outputWriter.(type) { + case io.WriteCloser: + outputWriter.(io.WriteCloser).Close() + } + return encoding != "", encoding, nil +} + +// ParseEncoding will extract the right encoding for response +// the Accept-Encoding's sec is here: +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.3 +func ParseEncoding(r *http.Request) string { + if r == nil { + return "" + } + return parseEncoding(r) +} + +type q struct { + name string + value float64 +} + +func parseEncoding(r *http.Request) string { + acceptEncoding := r.Header.Get("Accept-Encoding") + if acceptEncoding == "" { + return "" + } + var lastQ q + for _, v := range strings.Split(acceptEncoding, ",") { + v = strings.TrimSpace(v) + if v == "" { + continue + } + vs := strings.Split(v, ";") + if len(vs) == 1 { + lastQ = q{vs[0], 1} + break + } + if len(vs) == 2 { + f, _ := strconv.ParseFloat(strings.Replace(vs[1], "q=", "", -1), 64) + if f == 0 { + continue + } + if f > lastQ.value { + lastQ = q{vs[0], f} + } + } + } + if cf, ok := encoderMap[lastQ.name]; ok { + return cf.name + } + return "" +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/context/context.go b/Godeps/_workspace/src/github.com/astaxie/beego/context/context.go new file mode 100644 index 000000000..db790ff2f --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/context/context.go @@ -0,0 +1,217 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package context provide the context utils +// Usage: +// +// import "github.com/astaxie/beego/context" +// +// ctx := context.Context{Request:req,ResponseWriter:rw} +// +// more docs http://beego.me/docs/module/context.md +package context + +import ( + "bufio" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "errors" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/astaxie/beego/utils" +) + +// NewContext return the Context with Input and Output +func NewContext() *Context { + return &Context{ + Input: NewInput(), + Output: NewOutput(), + } +} + +// Context Http request context struct including BeegoInput, BeegoOutput, http.Request and http.ResponseWriter. +// BeegoInput and BeegoOutput provides some api to operate request and response more easily. +type Context struct { + Input *BeegoInput + Output *BeegoOutput + Request *http.Request + ResponseWriter *Response + _xsrfToken string +} + +// Reset init Context, BeegoInput and BeegoOutput +func (ctx *Context) Reset(rw http.ResponseWriter, r *http.Request) { + ctx.Request = r + ctx.ResponseWriter = &Response{rw, false, 0} + ctx.Input.Reset(ctx) + ctx.Output.Reset(ctx) +} + +// Redirect does redirection to localurl with http header status code. +// It sends http response header directly. +func (ctx *Context) Redirect(status int, localurl string) { + ctx.Output.Header("Location", localurl) + ctx.ResponseWriter.WriteHeader(status) +} + +// Abort stops this request. +// if beego.ErrorMaps exists, panic body. +func (ctx *Context) Abort(status int, body string) { + panic(body) +} + +// WriteString Write string to response body. +// it sends response body. +func (ctx *Context) WriteString(content string) { + ctx.ResponseWriter.Write([]byte(content)) +} + +// GetCookie Get cookie from request by a given key. +// It's alias of BeegoInput.Cookie. +func (ctx *Context) GetCookie(key string) string { + return ctx.Input.Cookie(key) +} + +// SetCookie Set cookie for response. +// It's alias of BeegoOutput.Cookie. +func (ctx *Context) SetCookie(name string, value string, others ...interface{}) { + ctx.Output.Cookie(name, value, others...) +} + +// GetSecureCookie Get secure cookie from request by a given key. +func (ctx *Context) GetSecureCookie(Secret, key string) (string, bool) { + val := ctx.Input.Cookie(key) + if val == "" { + return "", false + } + + parts := strings.SplitN(val, "|", 3) + + if len(parts) != 3 { + return "", false + } + + vs := parts[0] + timestamp := parts[1] + sig := parts[2] + + h := hmac.New(sha1.New, []byte(Secret)) + fmt.Fprintf(h, "%s%s", vs, timestamp) + + if fmt.Sprintf("%02x", h.Sum(nil)) != sig { + return "", false + } + res, _ := base64.URLEncoding.DecodeString(vs) + return string(res), true +} + +// SetSecureCookie Set Secure cookie for response. +func (ctx *Context) SetSecureCookie(Secret, name, value string, others ...interface{}) { + vs := base64.URLEncoding.EncodeToString([]byte(value)) + timestamp := strconv.FormatInt(time.Now().UnixNano(), 10) + h := hmac.New(sha1.New, []byte(Secret)) + fmt.Fprintf(h, "%s%s", vs, timestamp) + sig := fmt.Sprintf("%02x", h.Sum(nil)) + cookie := strings.Join([]string{vs, timestamp, sig}, "|") + ctx.Output.Cookie(name, cookie, others...) +} + +// XSRFToken creates a xsrf token string and returns. +func (ctx *Context) XSRFToken(key string, expire int64) string { + if ctx._xsrfToken == "" { + token, ok := ctx.GetSecureCookie(key, "_xsrf") + if !ok { + token = string(utils.RandomCreateBytes(32)) + ctx.SetSecureCookie(key, "_xsrf", token, expire) + } + ctx._xsrfToken = token + } + return ctx._xsrfToken +} + +// CheckXSRFCookie checks xsrf token in this request is valid or not. +// the token can provided in request header "X-Xsrftoken" and "X-CsrfToken" +// or in form field value named as "_xsrf". +func (ctx *Context) CheckXSRFCookie() bool { + token := ctx.Input.Query("_xsrf") + if token == "" { + token = ctx.Request.Header.Get("X-Xsrftoken") + } + if token == "" { + token = ctx.Request.Header.Get("X-Csrftoken") + } + if token == "" { + ctx.Abort(403, "'_xsrf' argument missing from POST") + return false + } + if ctx._xsrfToken != token { + ctx.Abort(403, "XSRF cookie does not match POST argument") + return false + } + return true +} + +//Response is a wrapper for the http.ResponseWriter +//started set to true if response was written to then don't execute other handler +type Response struct { + http.ResponseWriter + Started bool + Status int +} + +// Write writes the data to the connection as part of an HTTP reply, +// and sets `started` to true. +// started means the response has sent out. +func (w *Response) Write(p []byte) (int, error) { + w.Started = true + return w.ResponseWriter.Write(p) +} + +// WriteHeader sends an HTTP response header with status code, +// and sets `started` to true. +func (w *Response) WriteHeader(code int) { + w.Status = code + w.Started = true + w.ResponseWriter.WriteHeader(code) +} + +// Hijack hijacker for http +func (w *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj, ok := w.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, errors.New("webserver doesn't support hijacking") + } + return hj.Hijack() +} + +// Flush http.Flusher +func (w *Response) Flush() { + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// CloseNotify http.CloseNotifier +func (w *Response) CloseNotify() <-chan bool { + if cn, ok := w.ResponseWriter.(http.CloseNotifier); ok { + return cn.CloseNotify() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/context/input.go b/Godeps/_workspace/src/github.com/astaxie/beego/context/input.go new file mode 100644 index 000000000..c37204bdc --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/context/input.go @@ -0,0 +1,615 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package context + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/astaxie/beego/session" +) + +// Regexes for checking the accept headers +// TODO make sure these are correct +var ( + acceptsHTMLRegex = regexp.MustCompile(`(text/html|application/xhtml\+xml)(?:,|$)`) + acceptsXMLRegex = regexp.MustCompile(`(application/xml|text/xml)(?:,|$)`) + acceptsJSONRegex = regexp.MustCompile(`(application/json)(?:,|$)`) + maxParam = 50 +) + +// BeegoInput operates the http request header, data, cookie and body. +// it also contains router params and current session. +type BeegoInput struct { + Context *Context + CruSession session.Store + pnames []string + pvalues []string + data map[interface{}]interface{} // store some values in this context when calling context in filter or controller. + RequestBody []byte +} + +// NewInput return BeegoInput generated by Context. +func NewInput() *BeegoInput { + return &BeegoInput{ + pnames: make([]string, 0, maxParam), + pvalues: make([]string, 0, maxParam), + data: make(map[interface{}]interface{}), + } +} + +// Reset init the BeegoInput +func (input *BeegoInput) Reset(ctx *Context) { + input.Context = ctx + input.CruSession = nil + input.pnames = input.pnames[:0] + input.pvalues = input.pvalues[:0] + input.data = nil + input.RequestBody = []byte{} +} + +// Protocol returns request protocol name, such as HTTP/1.1 . +func (input *BeegoInput) Protocol() string { + return input.Context.Request.Proto +} + +// URI returns full request url with query string, fragment. +func (input *BeegoInput) URI() string { + return input.Context.Request.RequestURI +} + +// URL returns request url path (without query string, fragment). +func (input *BeegoInput) URL() string { + return input.Context.Request.URL.Path +} + +// Site returns base site url as scheme://domain type. +func (input *BeegoInput) Site() string { + return input.Scheme() + "://" + input.Domain() +} + +// Scheme returns request scheme as "http" or "https". +func (input *BeegoInput) Scheme() string { + if input.Context.Request.URL.Scheme != "" { + return input.Context.Request.URL.Scheme + } + if input.Context.Request.TLS == nil { + return "http" + } + return "https" +} + +// Domain returns host name. +// Alias of Host method. +func (input *BeegoInput) Domain() string { + return input.Host() +} + +// Host returns host name. +// if no host info in request, return localhost. +func (input *BeegoInput) Host() string { + if input.Context.Request.Host != "" { + hostParts := strings.Split(input.Context.Request.Host, ":") + if len(hostParts) > 0 { + return hostParts[0] + } + return input.Context.Request.Host + } + return "localhost" +} + +// Method returns http request method. +func (input *BeegoInput) Method() string { + return input.Context.Request.Method +} + +// Is returns boolean of this request is on given method, such as Is("POST"). +func (input *BeegoInput) Is(method string) bool { + return input.Method() == method +} + +// IsGet Is this a GET method request? +func (input *BeegoInput) IsGet() bool { + return input.Is("GET") +} + +// IsPost Is this a POST method request? +func (input *BeegoInput) IsPost() bool { + return input.Is("POST") +} + +// IsHead Is this a Head method request? +func (input *BeegoInput) IsHead() bool { + return input.Is("HEAD") +} + +// IsOptions Is this a OPTIONS method request? +func (input *BeegoInput) IsOptions() bool { + return input.Is("OPTIONS") +} + +// IsPut Is this a PUT method request? +func (input *BeegoInput) IsPut() bool { + return input.Is("PUT") +} + +// IsDelete Is this a DELETE method request? +func (input *BeegoInput) IsDelete() bool { + return input.Is("DELETE") +} + +// IsPatch Is this a PATCH method request? +func (input *BeegoInput) IsPatch() bool { + return input.Is("PATCH") +} + +// IsAjax returns boolean of this request is generated by ajax. +func (input *BeegoInput) IsAjax() bool { + return input.Header("X-Requested-With") == "XMLHttpRequest" +} + +// IsSecure returns boolean of this request is in https. +func (input *BeegoInput) IsSecure() bool { + return input.Scheme() == "https" +} + +// IsWebsocket returns boolean of this request is in webSocket. +func (input *BeegoInput) IsWebsocket() bool { + return input.Header("Upgrade") == "websocket" +} + +// IsUpload returns boolean of whether file uploads in this request or not.. +func (input *BeegoInput) IsUpload() bool { + return strings.Contains(input.Header("Content-Type"), "multipart/form-data") +} + +// AcceptsHTML Checks if request accepts html response +func (input *BeegoInput) AcceptsHTML() bool { + return acceptsHTMLRegex.MatchString(input.Header("Accept")) +} + +// AcceptsXML Checks if request accepts xml response +func (input *BeegoInput) AcceptsXML() bool { + return acceptsXMLRegex.MatchString(input.Header("Accept")) +} + +// AcceptsJSON Checks if request accepts json response +func (input *BeegoInput) AcceptsJSON() bool { + return acceptsJSONRegex.MatchString(input.Header("Accept")) +} + +// IP returns request client ip. +// if in proxy, return first proxy id. +// if error, return 127.0.0.1. +func (input *BeegoInput) IP() string { + ips := input.Proxy() + if len(ips) > 0 && ips[0] != "" { + rip := strings.Split(ips[0], ":") + return rip[0] + } + ip := strings.Split(input.Context.Request.RemoteAddr, ":") + if len(ip) > 0 { + if ip[0] != "[" { + return ip[0] + } + } + return "127.0.0.1" +} + +// Proxy returns proxy client ips slice. +func (input *BeegoInput) Proxy() []string { + if ips := input.Header("X-Forwarded-For"); ips != "" { + return strings.Split(ips, ",") + } + return []string{} +} + +// Referer returns http referer header. +func (input *BeegoInput) Referer() string { + return input.Header("Referer") +} + +// Refer returns http referer header. +func (input *BeegoInput) Refer() string { + return input.Referer() +} + +// SubDomains returns sub domain string. +// if aa.bb.domain.com, returns aa.bb . +func (input *BeegoInput) SubDomains() string { + parts := strings.Split(input.Host(), ".") + if len(parts) >= 3 { + return strings.Join(parts[:len(parts)-2], ".") + } + return "" +} + +// Port returns request client port. +// when error or empty, return 80. +func (input *BeegoInput) Port() int { + parts := strings.Split(input.Context.Request.Host, ":") + if len(parts) == 2 { + port, _ := strconv.Atoi(parts[1]) + return port + } + return 80 +} + +// UserAgent returns request client user agent string. +func (input *BeegoInput) UserAgent() string { + return input.Header("User-Agent") +} + +// ParamsLen return the length of the params +func (input *BeegoInput) ParamsLen() int { + return len(input.pnames) +} + +// Param returns router param by a given key. +func (input *BeegoInput) Param(key string) string { + for i, v := range input.pnames { + if v == key && i <= len(input.pvalues) { + return input.pvalues[i] + } + } + return "" +} + +// Params returns the map[key]value. +func (input *BeegoInput) Params() map[string]string { + m := make(map[string]string) + for i, v := range input.pnames { + if i <= len(input.pvalues) { + m[v] = input.pvalues[i] + } + } + return m +} + +// SetParam will set the param with key and value +func (input *BeegoInput) SetParam(key, val string) { + input.pvalues = append(input.pvalues, val) + input.pnames = append(input.pnames, key) +} + +// Query returns input data item string by a given string. +func (input *BeegoInput) Query(key string) string { + if val := input.Param(key); val != "" { + return val + } + if input.Context.Request.Form == nil { + input.Context.Request.ParseForm() + } + return input.Context.Request.Form.Get(key) +} + +// Header returns request header item string by a given string. +// if non-existed, return empty string. +func (input *BeegoInput) Header(key string) string { + return input.Context.Request.Header.Get(key) +} + +// Cookie returns request cookie item string by a given key. +// if non-existed, return empty string. +func (input *BeegoInput) Cookie(key string) string { + ck, err := input.Context.Request.Cookie(key) + if err != nil { + return "" + } + return ck.Value +} + +// Session returns current session item value by a given key. +// if non-existed, return empty string. +func (input *BeegoInput) Session(key interface{}) interface{} { + return input.CruSession.Get(key) +} + +// CopyBody returns the raw request body data as bytes. +func (input *BeegoInput) CopyBody(MaxMemory int64) []byte { + safe := &io.LimitedReader{R: input.Context.Request.Body, N: MaxMemory} + requestbody, _ := ioutil.ReadAll(safe) + input.Context.Request.Body.Close() + bf := bytes.NewBuffer(requestbody) + input.Context.Request.Body = ioutil.NopCloser(bf) + input.RequestBody = requestbody + return requestbody +} + +// Data return the implicit data in the input +func (input *BeegoInput) Data() map[interface{}]interface{} { + if input.data == nil { + input.data = make(map[interface{}]interface{}) + } + return input.data +} + +// GetData returns the stored data in this context. +func (input *BeegoInput) GetData(key interface{}) interface{} { + if v, ok := input.data[key]; ok { + return v + } + return nil +} + +// SetData stores data with given key in this context. +// This data are only available in this context. +func (input *BeegoInput) SetData(key, val interface{}) { + if input.data == nil { + input.data = make(map[interface{}]interface{}) + } + input.data[key] = val +} + +// ParseFormOrMulitForm parseForm or parseMultiForm based on Content-type +func (input *BeegoInput) ParseFormOrMulitForm(maxMemory int64) error { + // Parse the body depending on the content type. + if strings.Contains(input.Header("Content-Type"), "multipart/form-data") { + if err := input.Context.Request.ParseMultipartForm(maxMemory); err != nil { + return errors.New("Error parsing request body:" + err.Error()) + } + } else if err := input.Context.Request.ParseForm(); err != nil { + return errors.New("Error parsing request body:" + err.Error()) + } + return nil +} + +// Bind data from request.Form[key] to dest +// like /?id=123&isok=true&ft=1.2&ol[0]=1&ol[1]=2&ul[]=str&ul[]=array&user.Name=astaxie +// var id int beegoInput.Bind(&id, "id") id ==123 +// var isok bool beegoInput.Bind(&isok, "isok") isok ==true +// var ft float64 beegoInput.Bind(&ft, "ft") ft ==1.2 +// ol := make([]int, 0, 2) beegoInput.Bind(&ol, "ol") ol ==[1 2] +// ul := make([]string, 0, 2) beegoInput.Bind(&ul, "ul") ul ==[str array] +// user struct{Name} beegoInput.Bind(&user, "user") user == {Name:"astaxie"} +func (input *BeegoInput) Bind(dest interface{}, key string) error { + value := reflect.ValueOf(dest) + if value.Kind() != reflect.Ptr { + return errors.New("beego: non-pointer passed to Bind: " + key) + } + value = value.Elem() + if !value.CanSet() { + return errors.New("beego: non-settable variable passed to Bind: " + key) + } + rv := input.bind(key, value.Type()) + if !rv.IsValid() { + return errors.New("beego: reflect value is empty") + } + value.Set(rv) + return nil +} + +func (input *BeegoInput) bind(key string, typ reflect.Type) reflect.Value { + rv := reflect.Zero(typ) + switch typ.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + val := input.Query(key) + if len(val) == 0 { + return rv + } + rv = input.bindInt(val, typ) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val := input.Query(key) + if len(val) == 0 { + return rv + } + rv = input.bindUint(val, typ) + case reflect.Float32, reflect.Float64: + val := input.Query(key) + if len(val) == 0 { + return rv + } + rv = input.bindFloat(val, typ) + case reflect.String: + val := input.Query(key) + if len(val) == 0 { + return rv + } + rv = input.bindString(val, typ) + case reflect.Bool: + val := input.Query(key) + if len(val) == 0 { + return rv + } + rv = input.bindBool(val, typ) + case reflect.Slice: + rv = input.bindSlice(&input.Context.Request.Form, key, typ) + case reflect.Struct: + rv = input.bindStruct(&input.Context.Request.Form, key, typ) + case reflect.Ptr: + rv = input.bindPoint(key, typ) + case reflect.Map: + rv = input.bindMap(&input.Context.Request.Form, key, typ) + } + return rv +} + +func (input *BeegoInput) bindValue(val string, typ reflect.Type) reflect.Value { + rv := reflect.Zero(typ) + switch typ.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + rv = input.bindInt(val, typ) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + rv = input.bindUint(val, typ) + case reflect.Float32, reflect.Float64: + rv = input.bindFloat(val, typ) + case reflect.String: + rv = input.bindString(val, typ) + case reflect.Bool: + rv = input.bindBool(val, typ) + case reflect.Slice: + rv = input.bindSlice(&url.Values{"": {val}}, "", typ) + case reflect.Struct: + rv = input.bindStruct(&url.Values{"": {val}}, "", typ) + case reflect.Ptr: + rv = input.bindPoint(val, typ) + case reflect.Map: + rv = input.bindMap(&url.Values{"": {val}}, "", typ) + } + return rv +} + +func (input *BeegoInput) bindInt(val string, typ reflect.Type) reflect.Value { + intValue, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return reflect.Zero(typ) + } + pValue := reflect.New(typ) + pValue.Elem().SetInt(intValue) + return pValue.Elem() +} + +func (input *BeegoInput) bindUint(val string, typ reflect.Type) reflect.Value { + uintValue, err := strconv.ParseUint(val, 10, 64) + if err != nil { + return reflect.Zero(typ) + } + pValue := reflect.New(typ) + pValue.Elem().SetUint(uintValue) + return pValue.Elem() +} + +func (input *BeegoInput) bindFloat(val string, typ reflect.Type) reflect.Value { + floatValue, err := strconv.ParseFloat(val, 64) + if err != nil { + return reflect.Zero(typ) + } + pValue := reflect.New(typ) + pValue.Elem().SetFloat(floatValue) + return pValue.Elem() +} + +func (input *BeegoInput) bindString(val string, typ reflect.Type) reflect.Value { + return reflect.ValueOf(val) +} + +func (input *BeegoInput) bindBool(val string, typ reflect.Type) reflect.Value { + val = strings.TrimSpace(strings.ToLower(val)) + switch val { + case "true", "on", "1": + return reflect.ValueOf(true) + } + return reflect.ValueOf(false) +} + +type sliceValue struct { + index int // Index extracted from brackets. If -1, no index was provided. + value reflect.Value // the bound value for this slice element. +} + +func (input *BeegoInput) bindSlice(params *url.Values, key string, typ reflect.Type) reflect.Value { + maxIndex := -1 + numNoIndex := 0 + sliceValues := []sliceValue{} + for reqKey, vals := range *params { + if !strings.HasPrefix(reqKey, key+"[") { + continue + } + // Extract the index, and the index where a sub-key starts. (e.g. field[0].subkey) + index := -1 + leftBracket, rightBracket := len(key), strings.Index(reqKey[len(key):], "]")+len(key) + if rightBracket > leftBracket+1 { + index, _ = strconv.Atoi(reqKey[leftBracket+1 : rightBracket]) + } + subKeyIndex := rightBracket + 1 + + // Handle the indexed case. + if index > -1 { + if index > maxIndex { + maxIndex = index + } + sliceValues = append(sliceValues, sliceValue{ + index: index, + value: input.bind(reqKey[:subKeyIndex], typ.Elem()), + }) + continue + } + + // It's an un-indexed element. (e.g. element[]) + numNoIndex += len(vals) + for _, val := range vals { + // Unindexed values can only be direct-bound. + sliceValues = append(sliceValues, sliceValue{ + index: -1, + value: input.bindValue(val, typ.Elem()), + }) + } + } + resultArray := reflect.MakeSlice(typ, maxIndex+1, maxIndex+1+numNoIndex) + for _, sv := range sliceValues { + if sv.index != -1 { + resultArray.Index(sv.index).Set(sv.value) + } else { + resultArray = reflect.Append(resultArray, sv.value) + } + } + return resultArray +} + +func (input *BeegoInput) bindStruct(params *url.Values, key string, typ reflect.Type) reflect.Value { + result := reflect.New(typ).Elem() + fieldValues := make(map[string]reflect.Value) + for reqKey, val := range *params { + if !strings.HasPrefix(reqKey, key+".") { + continue + } + + fieldName := reqKey[len(key)+1:] + + if _, ok := fieldValues[fieldName]; !ok { + // Time to bind this field. Get it and make sure we can set it. + fieldValue := result.FieldByName(fieldName) + if !fieldValue.IsValid() { + continue + } + if !fieldValue.CanSet() { + continue + } + boundVal := input.bindValue(val[0], fieldValue.Type()) + fieldValue.Set(boundVal) + fieldValues[fieldName] = boundVal + } + } + + return result +} + +func (input *BeegoInput) bindPoint(key string, typ reflect.Type) reflect.Value { + return input.bind(key, typ.Elem()).Addr() +} + +func (input *BeegoInput) bindMap(params *url.Values, key string, typ reflect.Type) reflect.Value { + var ( + result = reflect.MakeMap(typ) + keyType = typ.Key() + valueType = typ.Elem() + ) + for paramName, values := range *params { + if !strings.HasPrefix(paramName, key+"[") || paramName[len(paramName)-1] != ']' { + continue + } + + key := paramName[len(key)+1 : len(paramName)-1] + result.SetMapIndex(input.bindValue(key, keyType), input.bindValue(values[0], valueType)) + } + return result +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/context/output.go b/Godeps/_workspace/src/github.com/astaxie/beego/context/output.go new file mode 100644 index 000000000..2d756e271 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/context/output.go @@ -0,0 +1,343 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package context + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "html/template" + "io" + "mime" + "net/http" + "path/filepath" + "strconv" + "strings" + "time" +) + +// BeegoOutput does work for sending response header. +type BeegoOutput struct { + Context *Context + Status int + EnableGzip bool +} + +// NewOutput returns new BeegoOutput. +// it contains nothing now. +func NewOutput() *BeegoOutput { + return &BeegoOutput{} +} + +// Reset init BeegoOutput +func (output *BeegoOutput) Reset(ctx *Context) { + output.Context = ctx + output.Status = 0 +} + +// Header sets response header item string via given key. +func (output *BeegoOutput) Header(key, val string) { + output.Context.ResponseWriter.Header().Set(key, val) +} + +// Body sets response body content. +// if EnableGzip, compress content string. +// it sends out response body directly. +func (output *BeegoOutput) Body(content []byte) { + var encoding string + var buf = &bytes.Buffer{} + if output.EnableGzip { + encoding = ParseEncoding(output.Context.Request) + } + if b, n, _ := WriteBody(encoding, buf, content); b { + output.Header("Content-Encoding", n) + } else { + output.Header("Content-Length", strconv.Itoa(len(content))) + } + // Write status code if it has been set manually + // Set it to 0 afterwards to prevent "multiple response.WriteHeader calls" + if output.Status != 0 { + output.Context.ResponseWriter.WriteHeader(output.Status) + output.Status = 0 + } + + io.Copy(output.Context.ResponseWriter, buf) +} + +// Cookie sets cookie value via given key. +// others are ordered as cookie's max age time, path,domain, secure and httponly. +func (output *BeegoOutput) Cookie(name string, value string, others ...interface{}) { + var b bytes.Buffer + fmt.Fprintf(&b, "%s=%s", sanitizeName(name), sanitizeValue(value)) + + //fix cookie not work in IE + if len(others) > 0 { + var maxAge int64 + + switch v := others[0].(type) { + case int: + maxAge = int64(v) + case int32: + maxAge = int64(v) + case int64: + maxAge = v + } + + if maxAge > 0 { + fmt.Fprintf(&b, "; Expires=%s; Max-Age=%d", time.Now().Add(time.Duration(maxAge)*time.Second).UTC().Format(time.RFC1123), maxAge) + } else { + fmt.Fprintf(&b, "; Max-Age=0") + } + } + + // the settings below + // Path, Domain, Secure, HttpOnly + // can use nil skip set + + // default "/" + if len(others) > 1 { + if v, ok := others[1].(string); ok && len(v) > 0 { + fmt.Fprintf(&b, "; Path=%s", sanitizeValue(v)) + } + } else { + fmt.Fprintf(&b, "; Path=%s", "/") + } + + // default empty + if len(others) > 2 { + if v, ok := others[2].(string); ok && len(v) > 0 { + fmt.Fprintf(&b, "; Domain=%s", sanitizeValue(v)) + } + } + + // default empty + if len(others) > 3 { + var secure bool + switch v := others[3].(type) { + case bool: + secure = v + default: + if others[3] != nil { + secure = true + } + } + if secure { + fmt.Fprintf(&b, "; Secure") + } + } + + // default false. for session cookie default true + httponly := false + if len(others) > 4 { + if v, ok := others[4].(bool); ok && v { + // HttpOnly = true + httponly = true + } + } + + if httponly { + fmt.Fprintf(&b, "; HttpOnly") + } + + output.Context.ResponseWriter.Header().Add("Set-Cookie", b.String()) +} + +var cookieNameSanitizer = strings.NewReplacer("\n", "-", "\r", "-") + +func sanitizeName(n string) string { + return cookieNameSanitizer.Replace(n) +} + +var cookieValueSanitizer = strings.NewReplacer("\n", " ", "\r", " ", ";", " ") + +func sanitizeValue(v string) string { + return cookieValueSanitizer.Replace(v) +} + +// JSON writes json to response body. +// if coding is true, it converts utf-8 to \u0000 type. +func (output *BeegoOutput) JSON(data interface{}, hasIndent bool, coding bool) error { + output.Header("Content-Type", "application/json; charset=utf-8") + var content []byte + var err error + if hasIndent { + content, err = json.MarshalIndent(data, "", " ") + } else { + content, err = json.Marshal(data) + } + if err != nil { + http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) + return err + } + if coding { + content = []byte(stringsToJSON(string(content))) + } + output.Body(content) + return nil +} + +// JSONP writes jsonp to response body. +func (output *BeegoOutput) JSONP(data interface{}, hasIndent bool) error { + output.Header("Content-Type", "application/javascript; charset=utf-8") + var content []byte + var err error + if hasIndent { + content, err = json.MarshalIndent(data, "", " ") + } else { + content, err = json.Marshal(data) + } + if err != nil { + http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) + return err + } + callback := output.Context.Input.Query("callback") + if callback == "" { + return errors.New(`"callback" parameter required`) + } + callbackContent := bytes.NewBufferString(" " + template.JSEscapeString(callback)) + callbackContent.WriteString("(") + callbackContent.Write(content) + callbackContent.WriteString(");\r\n") + output.Body(callbackContent.Bytes()) + return nil +} + +// XML writes xml string to response body. +func (output *BeegoOutput) XML(data interface{}, hasIndent bool) error { + output.Header("Content-Type", "application/xml; charset=utf-8") + var content []byte + var err error + if hasIndent { + content, err = xml.MarshalIndent(data, "", " ") + } else { + content, err = xml.Marshal(data) + } + if err != nil { + http.Error(output.Context.ResponseWriter, err.Error(), http.StatusInternalServerError) + return err + } + output.Body(content) + return nil +} + +// Download forces response for download file. +// it prepares the download response header automatically. +func (output *BeegoOutput) Download(file string, filename ...string) { + output.Header("Content-Description", "File Transfer") + output.Header("Content-Type", "application/octet-stream") + if len(filename) > 0 && filename[0] != "" { + output.Header("Content-Disposition", "attachment; filename="+filename[0]) + } else { + output.Header("Content-Disposition", "attachment; filename="+filepath.Base(file)) + } + output.Header("Content-Transfer-Encoding", "binary") + output.Header("Expires", "0") + output.Header("Cache-Control", "must-revalidate") + output.Header("Pragma", "public") + http.ServeFile(output.Context.ResponseWriter, output.Context.Request, file) +} + +// ContentType sets the content type from ext string. +// MIME type is given in mime package. +func (output *BeegoOutput) ContentType(ext string) { + if !strings.HasPrefix(ext, ".") { + ext = "." + ext + } + ctype := mime.TypeByExtension(ext) + if ctype != "" { + output.Header("Content-Type", ctype) + } +} + +// SetStatus sets response status code. +// It writes response header directly. +func (output *BeegoOutput) SetStatus(status int) { + output.Status = status +} + +// IsCachable returns boolean of this request is cached. +// HTTP 304 means cached. +func (output *BeegoOutput) IsCachable(status int) bool { + return output.Status >= 200 && output.Status < 300 || output.Status == 304 +} + +// IsEmpty returns boolean of this request is empty. +// HTTP 201,204 and 304 means empty. +func (output *BeegoOutput) IsEmpty(status int) bool { + return output.Status == 201 || output.Status == 204 || output.Status == 304 +} + +// IsOk returns boolean of this request runs well. +// HTTP 200 means ok. +func (output *BeegoOutput) IsOk(status int) bool { + return output.Status == 200 +} + +// IsSuccessful returns boolean of this request runs successfully. +// HTTP 2xx means ok. +func (output *BeegoOutput) IsSuccessful(status int) bool { + return output.Status >= 200 && output.Status < 300 +} + +// IsRedirect returns boolean of this request is redirection header. +// HTTP 301,302,307 means redirection. +func (output *BeegoOutput) IsRedirect(status int) bool { + return output.Status == 301 || output.Status == 302 || output.Status == 303 || output.Status == 307 +} + +// IsForbidden returns boolean of this request is forbidden. +// HTTP 403 means forbidden. +func (output *BeegoOutput) IsForbidden(status int) bool { + return output.Status == 403 +} + +// IsNotFound returns boolean of this request is not found. +// HTTP 404 means forbidden. +func (output *BeegoOutput) IsNotFound(status int) bool { + return output.Status == 404 +} + +// IsClientError returns boolean of this request client sends error data. +// HTTP 4xx means forbidden. +func (output *BeegoOutput) IsClientError(status int) bool { + return output.Status >= 400 && output.Status < 500 +} + +// IsServerError returns boolean of this server handler errors. +// HTTP 5xx means server internal error. +func (output *BeegoOutput) IsServerError(status int) bool { + return output.Status >= 500 && output.Status < 600 +} + +func stringsToJSON(str string) string { + rs := []rune(str) + jsons := "" + for _, r := range rs { + rint := int(r) + if rint < 128 { + jsons += string(r) + } else { + jsons += "\\u" + strconv.FormatInt(int64(rint), 16) // json + } + } + return jsons +} + +// Session sets session item value with given key. +func (output *BeegoOutput) Session(name interface{}, value interface{}) { + output.Context.Input.CruSession.Set(name, value) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/controller.go b/Godeps/_workspace/src/github.com/astaxie/beego/controller.go new file mode 100644 index 000000000..a2943d420 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/controller.go @@ -0,0 +1,621 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "bytes" + "errors" + "html/template" + "io" + "mime/multipart" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + + "github.com/astaxie/beego/context" + "github.com/astaxie/beego/session" +) + +//commonly used mime-types +const ( + applicationJSON = "application/json" + applicationXML = "application/xml" + textXML = "text/xml" +) + +var ( + // ErrAbort custom error when user stop request handler manually. + ErrAbort = errors.New("User stop run") + // GlobalControllerRouter store comments with controller. pkgpath+controller:comments + GlobalControllerRouter = make(map[string][]ControllerComments) +) + +// ControllerComments store the comment for the controller method +type ControllerComments struct { + Method string + Router string + AllowHTTPMethods []string + Params []map[string]string +} + +// Controller defines some basic http request handler operations, such as +// http context, template and view, session and xsrf. +type Controller struct { + // context data + Ctx *context.Context + Data map[interface{}]interface{} + + // route controller info + controllerName string + actionName string + methodMapping map[string]func() //method:routertree + gotofunc string + AppController interface{} + + // template data + TplName string + Layout string + LayoutSections map[string]string // the key is the section name and the value is the template name + TplExt string + EnableRender bool + + // xsrf data + _xsrfToken string + XSRFExpire int + EnableXSRF bool + + // session + CruSession session.Store +} + +// ControllerInterface is an interface to uniform all controller handler. +type ControllerInterface interface { + Init(ct *context.Context, controllerName, actionName string, app interface{}) + Prepare() + Get() + Post() + Delete() + Put() + Head() + Patch() + Options() + Finish() + Render() error + XSRFToken() string + CheckXSRFCookie() bool + HandlerFunc(fn string) bool + URLMapping() +} + +// Init generates default values of controller operations. +func (c *Controller) Init(ctx *context.Context, controllerName, actionName string, app interface{}) { + c.Layout = "" + c.TplName = "" + c.controllerName = controllerName + c.actionName = actionName + c.Ctx = ctx + c.TplExt = "tpl" + c.AppController = app + c.EnableRender = true + c.EnableXSRF = true + c.Data = ctx.Input.Data() + c.methodMapping = make(map[string]func()) +} + +// Prepare runs after Init before request function execution. +func (c *Controller) Prepare() {} + +// Finish runs after request function execution. +func (c *Controller) Finish() {} + +// Get adds a request function to handle GET request. +func (c *Controller) Get() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// Post adds a request function to handle POST request. +func (c *Controller) Post() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// Delete adds a request function to handle DELETE request. +func (c *Controller) Delete() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// Put adds a request function to handle PUT request. +func (c *Controller) Put() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// Head adds a request function to handle HEAD request. +func (c *Controller) Head() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// Patch adds a request function to handle PATCH request. +func (c *Controller) Patch() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// Options adds a request function to handle OPTIONS request. +func (c *Controller) Options() { + http.Error(c.Ctx.ResponseWriter, "Method Not Allowed", 405) +} + +// HandlerFunc call function with the name +func (c *Controller) HandlerFunc(fnname string) bool { + if v, ok := c.methodMapping[fnname]; ok { + v() + return true + } + return false +} + +// URLMapping register the internal Controller router. +func (c *Controller) URLMapping() {} + +// Mapping the method to function +func (c *Controller) Mapping(method string, fn func()) { + c.methodMapping[method] = fn +} + +// Render sends the response with rendered template bytes as text/html type. +func (c *Controller) Render() error { + if !c.EnableRender { + return nil + } + rb, err := c.RenderBytes() + if err != nil { + return err + } + c.Ctx.Output.Header("Content-Type", "text/html; charset=utf-8") + c.Ctx.Output.Body(rb) + return nil +} + +// RenderString returns the rendered template string. Do not send out response. +func (c *Controller) RenderString() (string, error) { + b, e := c.RenderBytes() + return string(b), e +} + +// RenderBytes returns the bytes of rendered template string. Do not send out response. +func (c *Controller) RenderBytes() ([]byte, error) { + //if the controller has set layout, then first get the tplname's content set the content to the layout + var buf bytes.Buffer + if c.Layout != "" { + if c.TplName == "" { + c.TplName = strings.ToLower(c.controllerName) + "/" + strings.ToLower(c.actionName) + "." + c.TplExt + } + + if BConfig.RunMode == DEV { + buildFiles := []string{c.TplName} + if c.LayoutSections != nil { + for _, sectionTpl := range c.LayoutSections { + if sectionTpl == "" { + continue + } + buildFiles = append(buildFiles, sectionTpl) + } + } + BuildTemplate(BConfig.WebConfig.ViewsPath, buildFiles...) + } + if _, ok := BeeTemplates[c.TplName]; !ok { + panic("can't find templatefile in the path:" + c.TplName) + } + err := BeeTemplates[c.TplName].ExecuteTemplate(&buf, c.TplName, c.Data) + if err != nil { + Trace("template Execute err:", err) + return nil, err + } + c.Data["LayoutContent"] = template.HTML(buf.String()) + + if c.LayoutSections != nil { + for sectionName, sectionTpl := range c.LayoutSections { + if sectionTpl == "" { + c.Data[sectionName] = "" + continue + } + + buf.Reset() + err = BeeTemplates[sectionTpl].ExecuteTemplate(&buf, sectionTpl, c.Data) + if err != nil { + Trace("template Execute err:", err) + return nil, err + } + c.Data[sectionName] = template.HTML(buf.String()) + } + } + + buf.Reset() + err = BeeTemplates[c.Layout].ExecuteTemplate(&buf, c.Layout, c.Data) + if err != nil { + Trace("template Execute err:", err) + return nil, err + } + return buf.Bytes(), nil + } + + if c.TplName == "" { + c.TplName = strings.ToLower(c.controllerName) + "/" + strings.ToLower(c.actionName) + "." + c.TplExt + } + if BConfig.RunMode == DEV { + BuildTemplate(BConfig.WebConfig.ViewsPath, c.TplName) + } + if _, ok := BeeTemplates[c.TplName]; !ok { + panic("can't find templatefile in the path:" + c.TplName) + } + buf.Reset() + err := BeeTemplates[c.TplName].ExecuteTemplate(&buf, c.TplName, c.Data) + if err != nil { + Trace("template Execute err:", err) + return nil, err + } + return buf.Bytes(), nil +} + +// Redirect sends the redirection response to url with status code. +func (c *Controller) Redirect(url string, code int) { + c.Ctx.Redirect(code, url) +} + +// Abort stops controller handler and show the error data if code is defined in ErrorMap or code string. +func (c *Controller) Abort(code string) { + status, err := strconv.Atoi(code) + if err != nil { + status = 200 + } + c.CustomAbort(status, code) +} + +// CustomAbort stops controller handler and show the error data, it's similar Aborts, but support status code and body. +func (c *Controller) CustomAbort(status int, body string) { + c.Ctx.ResponseWriter.WriteHeader(status) + // first panic from ErrorMaps, is is user defined error functions. + if _, ok := ErrorMaps[body]; ok { + panic(body) + } + // last panic user string + c.Ctx.ResponseWriter.Write([]byte(body)) + panic(ErrAbort) +} + +// StopRun makes panic of USERSTOPRUN error and go to recover function if defined. +func (c *Controller) StopRun() { + panic(ErrAbort) +} + +// URLFor does another controller handler in this request function. +// it goes to this controller method if endpoint is not clear. +func (c *Controller) URLFor(endpoint string, values ...interface{}) string { + if len(endpoint) == 0 { + return "" + } + if endpoint[0] == '.' { + return URLFor(reflect.Indirect(reflect.ValueOf(c.AppController)).Type().Name()+endpoint, values...) + } + return URLFor(endpoint, values...) +} + +// ServeJSON sends a json response with encoding charset. +func (c *Controller) ServeJSON(encoding ...bool) { + var ( + hasIndent = true + hasEncoding = false + ) + if BConfig.RunMode == PROD { + hasIndent = false + } + if len(encoding) > 0 && encoding[0] == true { + hasEncoding = true + } + c.Ctx.Output.JSON(c.Data["json"], hasIndent, hasEncoding) +} + +// ServeJSONP sends a jsonp response. +func (c *Controller) ServeJSONP() { + hasIndent := true + if BConfig.RunMode == PROD { + hasIndent = false + } + c.Ctx.Output.JSONP(c.Data["jsonp"], hasIndent) +} + +// ServeXML sends xml response. +func (c *Controller) ServeXML() { + hasIndent := true + if BConfig.RunMode == PROD { + hasIndent = false + } + c.Ctx.Output.XML(c.Data["xml"], hasIndent) +} + +// ServeFormatted serve Xml OR Json, depending on the value of the Accept header +func (c *Controller) ServeFormatted() { + accept := c.Ctx.Input.Header("Accept") + switch accept { + case applicationJSON: + c.ServeJSON() + case applicationXML, textXML: + c.ServeXML() + default: + c.ServeJSON() + } +} + +// Input returns the input data map from POST or PUT request body and query string. +func (c *Controller) Input() url.Values { + if c.Ctx.Request.Form == nil { + c.Ctx.Request.ParseForm() + } + return c.Ctx.Request.Form +} + +// ParseForm maps input data map to obj struct. +func (c *Controller) ParseForm(obj interface{}) error { + return ParseForm(c.Input(), obj) +} + +// GetString returns the input value by key string or the default value while it's present and input is blank +func (c *Controller) GetString(key string, def ...string) string { + if v := c.Ctx.Input.Query(key); v != "" { + return v + } + if len(def) > 0 { + return def[0] + } + return "" +} + +// GetStrings returns the input string slice by key string or the default value while it's present and input is blank +// it's designed for multi-value input field such as checkbox(input[type=checkbox]), multi-selection. +func (c *Controller) GetStrings(key string, def ...[]string) []string { + var defv []string + if len(def) > 0 { + defv = def[0] + } + + if f := c.Input(); f == nil { + return defv + } else if vs := f[key]; len(vs) > 0 { + return vs + } + + return defv +} + +// GetInt returns input as an int or the default value while it's present and input is blank +func (c *Controller) GetInt(key string, def ...int) (int, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + return strconv.Atoi(strv) +} + +// GetInt8 return input as an int8 or the default value while it's present and input is blank +func (c *Controller) GetInt8(key string, def ...int8) (int8, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + i64, err := strconv.ParseInt(strv, 10, 8) + return int8(i64), err +} + +// GetInt16 returns input as an int16 or the default value while it's present and input is blank +func (c *Controller) GetInt16(key string, def ...int16) (int16, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + i64, err := strconv.ParseInt(strv, 10, 16) + return int16(i64), err +} + +// GetInt32 returns input as an int32 or the default value while it's present and input is blank +func (c *Controller) GetInt32(key string, def ...int32) (int32, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + i64, err := strconv.ParseInt(strv, 10, 32) + return int32(i64), err +} + +// GetInt64 returns input value as int64 or the default value while it's present and input is blank. +func (c *Controller) GetInt64(key string, def ...int64) (int64, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + return strconv.ParseInt(strv, 10, 64) +} + +// GetBool returns input value as bool or the default value while it's present and input is blank. +func (c *Controller) GetBool(key string, def ...bool) (bool, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + return strconv.ParseBool(strv) +} + +// GetFloat returns input value as float64 or the default value while it's present and input is blank. +func (c *Controller) GetFloat(key string, def ...float64) (float64, error) { + strv := c.Ctx.Input.Query(key) + if len(strv) == 0 && len(def) > 0 { + return def[0], nil + } + return strconv.ParseFloat(strv, 64) +} + +// GetFile returns the file data in file upload field named as key. +// it returns the first one of multi-uploaded files. +func (c *Controller) GetFile(key string) (multipart.File, *multipart.FileHeader, error) { + return c.Ctx.Request.FormFile(key) +} + +// GetFiles return multi-upload files +// files, err:=c.Getfiles("myfiles") +// if err != nil { +// http.Error(w, err.Error(), http.StatusNoContent) +// return +// } +// for i, _ := range files { +// //for each fileheader, get a handle to the actual file +// file, err := files[i].Open() +// defer file.Close() +// if err != nil { +// http.Error(w, err.Error(), http.StatusInternalServerError) +// return +// } +// //create destination file making sure the path is writeable. +// dst, err := os.Create("upload/" + files[i].Filename) +// defer dst.Close() +// if err != nil { +// http.Error(w, err.Error(), http.StatusInternalServerError) +// return +// } +// //copy the uploaded file to the destination file +// if _, err := io.Copy(dst, file); err != nil { +// http.Error(w, err.Error(), http.StatusInternalServerError) +// return +// } +// } +func (c *Controller) GetFiles(key string) ([]*multipart.FileHeader, error) { + if files, ok := c.Ctx.Request.MultipartForm.File[key]; ok { + return files, nil + } + return nil, http.ErrMissingFile +} + +// SaveToFile saves uploaded file to new path. +// it only operates the first one of mutil-upload form file field. +func (c *Controller) SaveToFile(fromfile, tofile string) error { + file, _, err := c.Ctx.Request.FormFile(fromfile) + if err != nil { + return err + } + defer file.Close() + f, err := os.OpenFile(tofile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + defer f.Close() + io.Copy(f, file) + return nil +} + +// StartSession starts session and load old session data info this controller. +func (c *Controller) StartSession() session.Store { + if c.CruSession == nil { + c.CruSession = c.Ctx.Input.CruSession + } + return c.CruSession +} + +// SetSession puts value into session. +func (c *Controller) SetSession(name interface{}, value interface{}) { + if c.CruSession == nil { + c.StartSession() + } + c.CruSession.Set(name, value) +} + +// GetSession gets value from session. +func (c *Controller) GetSession(name interface{}) interface{} { + if c.CruSession == nil { + c.StartSession() + } + return c.CruSession.Get(name) +} + +// DelSession removes value from session. +func (c *Controller) DelSession(name interface{}) { + if c.CruSession == nil { + c.StartSession() + } + c.CruSession.Delete(name) +} + +// SessionRegenerateID regenerates session id for this session. +// the session data have no changes. +func (c *Controller) SessionRegenerateID() { + if c.CruSession != nil { + c.CruSession.SessionRelease(c.Ctx.ResponseWriter) + } + c.CruSession = GlobalSessions.SessionRegenerateID(c.Ctx.ResponseWriter, c.Ctx.Request) + c.Ctx.Input.CruSession = c.CruSession +} + +// DestroySession cleans session data and session cookie. +func (c *Controller) DestroySession() { + c.Ctx.Input.CruSession.Flush() + c.Ctx.Input.CruSession = nil + GlobalSessions.SessionDestroy(c.Ctx.ResponseWriter, c.Ctx.Request) +} + +// IsAjax returns this request is ajax or not. +func (c *Controller) IsAjax() bool { + return c.Ctx.Input.IsAjax() +} + +// GetSecureCookie returns decoded cookie value from encoded browser cookie values. +func (c *Controller) GetSecureCookie(Secret, key string) (string, bool) { + return c.Ctx.GetSecureCookie(Secret, key) +} + +// SetSecureCookie puts value into cookie after encoded the value. +func (c *Controller) SetSecureCookie(Secret, name, value string, others ...interface{}) { + c.Ctx.SetSecureCookie(Secret, name, value, others...) +} + +// XSRFToken creates a CSRF token string and returns. +func (c *Controller) XSRFToken() string { + if c._xsrfToken == "" { + expire := int64(BConfig.WebConfig.XSRFExpire) + if c.XSRFExpire > 0 { + expire = int64(c.XSRFExpire) + } + c._xsrfToken = c.Ctx.XSRFToken(BConfig.WebConfig.XSRFKey, expire) + } + return c._xsrfToken +} + +// CheckXSRFCookie checks xsrf token in this request is valid or not. +// the token can provided in request header "X-Xsrftoken" and "X-CsrfToken" +// or in form field value named as "_xsrf". +func (c *Controller) CheckXSRFCookie() bool { + if !c.EnableXSRF { + return true + } + return c.Ctx.CheckXSRFCookie() +} + +// XSRFFormHTML writes an input field contains xsrf token value. +func (c *Controller) XSRFFormHTML() string { + return `` +} + +// GetControllerAndAction gets the executing controller name and action name. +func (c *Controller) GetControllerAndAction() (string, string) { + return c.controllerName, c.actionName +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/doc.go b/Godeps/_workspace/src/github.com/astaxie/beego/doc.go new file mode 100644 index 000000000..8825bd299 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/doc.go @@ -0,0 +1,17 @@ +/* +Package beego provide a MVC framework +beego: an open-source, high-performance, modular, full-stack web framework + +It is used for rapid development of RESTful APIs, web apps and backend services in Go. +beego is inspired by Tornado, Sinatra and Flask with the added benefit of some Go-specific features such as interfaces and struct embedding. + + package main + import "github.com/astaxie/beego" + + func main() { + beego.Run() + } + +more information: http://beego.me +*/ +package beego diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/docs.go b/Godeps/_workspace/src/github.com/astaxie/beego/docs.go new file mode 100644 index 000000000..725328760 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/docs.go @@ -0,0 +1,39 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "github.com/astaxie/beego/context" +) + +// GlobalDocAPI store the swagger api documents +var GlobalDocAPI = make(map[string]interface{}) + +func serverDocs(ctx *context.Context) { + var obj interface{} + if splat := ctx.Input.Param(":splat"); splat == "" { + obj = GlobalDocAPI["Root"] + } else { + if v, ok := GlobalDocAPI[splat]; ok { + obj = v + } + } + if obj != nil { + ctx.Output.Header("Access-Control-Allow-Origin", "*") + ctx.Output.JSON(obj, false, false) + return + } + ctx.Output.SetStatus(404) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/error.go b/Godeps/_workspace/src/github.com/astaxie/beego/error.go new file mode 100644 index 000000000..94151dd8b --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/error.go @@ -0,0 +1,459 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "fmt" + "html/template" + "net/http" + "reflect" + "runtime" + "strconv" + "strings" + + "github.com/astaxie/beego/context" + "github.com/astaxie/beego/utils" +) + +const ( + errorTypeHandler = iota + errorTypeController +) + +var tpl = ` + + + + + beego application error + + + + + +
+ + + + + + + + + + +
Request Method: {{.RequestMethod}}
Request URL: {{.RequestURL}}
RemoteAddr: {{.RemoteAddr }}
+
+ Stack +
{{.Stack}}
+
+
+ + + +` + +// render default application error page with error and stack string. +func showErr(err interface{}, ctx *context.Context, stack string) { + t, _ := template.New("beegoerrortemp").Parse(tpl) + data := map[string]string{ + "AppError": fmt.Sprintf("%s:%v", BConfig.AppName, err), + "RequestMethod": ctx.Input.Method(), + "RequestURL": ctx.Input.URI(), + "RemoteAddr": ctx.Input.IP(), + "Stack": stack, + "BeegoVersion": VERSION, + "GoVersion": runtime.Version(), + } + ctx.ResponseWriter.WriteHeader(500) + t.Execute(ctx.ResponseWriter, data) +} + +var errtpl = ` + + + + + {{.Title}} + + + +
+
+ +
+ {{.Content}} + Go Home
+ +
Powered by beego {{.BeegoVersion}} +
+
+
+ + +` + +type errorInfo struct { + controllerType reflect.Type + handler http.HandlerFunc + method string + errorType int +} + +// ErrorMaps holds map of http handlers for each error string. +// there is 10 kinds default error(40x and 50x) +var ErrorMaps = make(map[string]*errorInfo, 10) + +// show 401 unauthorized error. +func unauthorized(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(401), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested can't be authorized." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// show 402 Payment Required +func paymentRequired(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(402), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested Payment Required." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// show 403 forbidden error. +func forbidden(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(403), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested is forbidden." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// show 404 notfound error. +func notFound(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(404), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested has flown the coop." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// show 405 Method Not Allowed +func methodNotAllowed(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(405), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The method you have requested Not Allowed." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// show 500 internal server error. +func internalServerError(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(500), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested is down right now." + + "

") + t.Execute(rw, data) +} + +// show 501 Not Implemented. +func notImplemented(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(504), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested is Not Implemented." + + "

") + t.Execute(rw, data) +} + +// show 502 Bad Gateway. +func badGateway(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(502), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested is down right now." + + "

") + t.Execute(rw, data) +} + +// show 503 service unavailable error. +func serviceUnavailable(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(503), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested is unavailable." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// show 504 Gateway Timeout. +func gatewayTimeout(rw http.ResponseWriter, r *http.Request) { + t, _ := template.New("beegoerrortemp").Parse(errtpl) + data := map[string]interface{}{ + "Title": http.StatusText(504), + "BeegoVersion": VERSION, + } + data["Content"] = template.HTML("
The page you have requested is unavailable." + + "
Perhaps you are here because:" + + "

") + t.Execute(rw, data) +} + +// ErrorHandler registers http.HandlerFunc to each http err code string. +// usage: +// beego.ErrorHandler("404",NotFound) +// beego.ErrorHandler("500",InternalServerError) +func ErrorHandler(code string, h http.HandlerFunc) *App { + ErrorMaps[code] = &errorInfo{ + errorType: errorTypeHandler, + handler: h, + method: code, + } + return BeeApp +} + +// ErrorController registers ControllerInterface to each http err code string. +// usage: +// beego.ErrorController(&controllers.ErrorController{}) +func ErrorController(c ControllerInterface) *App { + reflectVal := reflect.ValueOf(c) + rt := reflectVal.Type() + ct := reflect.Indirect(reflectVal).Type() + for i := 0; i < rt.NumMethod(); i++ { + methodName := rt.Method(i).Name + if !utils.InSlice(methodName, exceptMethod) && strings.HasPrefix(methodName, "Error") { + errName := strings.TrimPrefix(methodName, "Error") + ErrorMaps[errName] = &errorInfo{ + errorType: errorTypeController, + controllerType: ct, + method: methodName, + } + } + } + return BeeApp +} + +// show error string as simple text message. +// if error string is empty, show 503 or 500 error as default. +func exception(errCode string, ctx *context.Context) { + atoi := func(code string) int { + v, err := strconv.Atoi(code) + if err == nil { + return v + } + return 503 + } + + for _, ec := range []string{errCode, "503", "500"} { + if h, ok := ErrorMaps[ec]; ok { + executeError(h, ctx, atoi(ec)) + return + } + } + //if 50x error has been removed from errorMap + ctx.ResponseWriter.WriteHeader(atoi(errCode)) + ctx.WriteString(errCode) +} + +func executeError(err *errorInfo, ctx *context.Context, code int) { + if err.errorType == errorTypeHandler { + err.handler(ctx.ResponseWriter, ctx.Request) + return + } + if err.errorType == errorTypeController { + ctx.Output.SetStatus(code) + //Invoke the request handler + vc := reflect.New(err.controllerType) + execController, ok := vc.Interface().(ControllerInterface) + if !ok { + panic("controller is not ControllerInterface") + } + //call the controller init function + execController.Init(ctx, err.controllerType.Name(), err.method, vc.Interface()) + + //call prepare function + execController.Prepare() + + execController.URLMapping() + + method := vc.MethodByName(err.method) + method.Call([]reflect.Value{}) + + //render template + if BConfig.WebConfig.AutoRender { + if err := execController.Render(); err != nil { + panic(err) + } + } + + // finish all runrouter. release resource + execController.Finish() + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/filter.go b/Godeps/_workspace/src/github.com/astaxie/beego/filter.go new file mode 100644 index 000000000..863223f79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/filter.go @@ -0,0 +1,43 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import "github.com/astaxie/beego/context" + +// FilterFunc defines a filter function which is invoked before the controller handler is executed. +type FilterFunc func(*context.Context) + +// FilterRouter defines a filter operation which is invoked before the controller handler is executed. +// It can match the URL against a pattern, and execute a filter function +// when a request with a matching URL arrives. +type FilterRouter struct { + filterFunc FilterFunc + tree *Tree + pattern string + returnOnOutput bool +} + +// ValidRouter checks if the current request is matched by this filter. +// If the request is matched, the values of the URL parameters defined +// by the filter pattern are also returned. +func (f *FilterRouter) ValidRouter(url string, ctx *context.Context) bool { + isOk := f.tree.Match(url, ctx) + if isOk != nil { + if b, ok := isOk.(bool); ok { + return b + } + } + return false +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/flash.go b/Godeps/_workspace/src/github.com/astaxie/beego/flash.go new file mode 100644 index 000000000..a6485a17e --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/flash.go @@ -0,0 +1,110 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "fmt" + "net/url" + "strings" +) + +// FlashData is a tools to maintain data when using across request. +type FlashData struct { + Data map[string]string +} + +// NewFlash return a new empty FlashData struct. +func NewFlash() *FlashData { + return &FlashData{ + Data: make(map[string]string), + } +} + +// Set message to flash +func (fd *FlashData) Set(key string, msg string, args ...interface{}) { + if len(args) == 0 { + fd.Data[key] = msg + } else { + fd.Data[key] = fmt.Sprintf(msg, args...) + } +} + +// Success writes success message to flash. +func (fd *FlashData) Success(msg string, args ...interface{}) { + if len(args) == 0 { + fd.Data["success"] = msg + } else { + fd.Data["success"] = fmt.Sprintf(msg, args...) + } +} + +// Notice writes notice message to flash. +func (fd *FlashData) Notice(msg string, args ...interface{}) { + if len(args) == 0 { + fd.Data["notice"] = msg + } else { + fd.Data["notice"] = fmt.Sprintf(msg, args...) + } +} + +// Warning writes warning message to flash. +func (fd *FlashData) Warning(msg string, args ...interface{}) { + if len(args) == 0 { + fd.Data["warning"] = msg + } else { + fd.Data["warning"] = fmt.Sprintf(msg, args...) + } +} + +// Error writes error message to flash. +func (fd *FlashData) Error(msg string, args ...interface{}) { + if len(args) == 0 { + fd.Data["error"] = msg + } else { + fd.Data["error"] = fmt.Sprintf(msg, args...) + } +} + +// Store does the saving operation of flash data. +// the data are encoded and saved in cookie. +func (fd *FlashData) Store(c *Controller) { + c.Data["flash"] = fd.Data + var flashValue string + for key, value := range fd.Data { + flashValue += "\x00" + key + "\x23" + BConfig.WebConfig.FlashSeparator + "\x23" + value + "\x00" + } + c.Ctx.SetCookie(BConfig.WebConfig.FlashName, url.QueryEscape(flashValue), 0, "/") +} + +// ReadFromRequest parsed flash data from encoded values in cookie. +func ReadFromRequest(c *Controller) *FlashData { + flash := NewFlash() + if cookie, err := c.Ctx.Request.Cookie(BConfig.WebConfig.FlashName); err == nil { + v, _ := url.QueryUnescape(cookie.Value) + vals := strings.Split(v, "\x00") + for _, v := range vals { + if len(v) > 0 { + kv := strings.Split(v, "\x23"+BConfig.WebConfig.FlashSeparator+"\x23") + if len(kv) == 2 { + flash.Data[kv[0]] = kv[1] + } + } + } + //read one time then delete it + c.Ctx.SetCookie(BConfig.WebConfig.FlashName, "", -1, "/") + } + c.Data["flash"] = flash.Data + return flash +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/grace/conn.go b/Godeps/_workspace/src/github.com/astaxie/beego/grace/conn.go new file mode 100644 index 000000000..6807e1ace --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/grace/conn.go @@ -0,0 +1,28 @@ +package grace + +import ( + "errors" + "net" +) + +type graceConn struct { + net.Conn + server *Server +} + +func (c graceConn) Close() (err error) { + defer func() { + if r := recover(); r != nil { + switch x := r.(type) { + case string: + err = errors.New(x) + case error: + err = x + default: + err = errors.New("Unknown panic") + } + } + }() + c.server.wg.Done() + return c.Conn.Close() +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/grace/grace.go b/Godeps/_workspace/src/github.com/astaxie/beego/grace/grace.go new file mode 100644 index 000000000..af4e90683 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/grace/grace.go @@ -0,0 +1,158 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package grace use to hot reload +// Description: http://grisha.org/blog/2014/06/03/graceful-restart-in-golang/ +// +// Usage: +// +// import( +// "log" +// "net/http" +// "os" +// +// "github.com/astaxie/beego/grace" +// ) +// +// func handler(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("WORLD!")) +// } +// +// func main() { +// mux := http.NewServeMux() +// mux.HandleFunc("/hello", handler) +// +// err := grace.ListenAndServe("localhost:8080", mux) +// if err != nil { +// log.Println(err) +// } +// log.Println("Server on 8080 stopped") +// os.Exit(0) +// } +package grace + +import ( + "flag" + "net/http" + "os" + "strings" + "sync" + "syscall" + "time" +) + +const ( + // PreSignal is the position to add filter before signal + PreSignal = iota + // PostSignal is the position to add filter after signal + PostSignal + // StateInit represent the application inited + StateInit + // StateRunning represent the application is running + StateRunning + // StateShuttingDown represent the application is shutting down + StateShuttingDown + // StateTerminate represent the application is killed + StateTerminate +) + +var ( + regLock *sync.Mutex + runningServers map[string]*Server + runningServersOrder []string + socketPtrOffsetMap map[string]uint + runningServersForked bool + + // DefaultReadTimeOut is the HTTP read timeout + DefaultReadTimeOut time.Duration + // DefaultWriteTimeOut is the HTTP Write timeout + DefaultWriteTimeOut time.Duration + // DefaultMaxHeaderBytes is the Max HTTP Herder size, default is 0, no limit + DefaultMaxHeaderBytes int + // DefaultTimeout is the shutdown server's timeout. default is 60s + DefaultTimeout = 60 * time.Second + + isChild bool + socketOrder string + once sync.Once +) + +func onceInit() { + regLock = &sync.Mutex{} + flag.BoolVar(&isChild, "graceful", false, "listen on open fd (after forking)") + flag.StringVar(&socketOrder, "socketorder", "", "previous initialization order - used when more than one listener was started") + runningServers = make(map[string]*Server) + runningServersOrder = []string{} + socketPtrOffsetMap = make(map[string]uint) +} + +// NewServer returns a new graceServer. +func NewServer(addr string, handler http.Handler) (srv *Server) { + once.Do(onceInit) + regLock.Lock() + defer regLock.Unlock() + if !flag.Parsed() { + flag.Parse() + } + if len(socketOrder) > 0 { + for i, addr := range strings.Split(socketOrder, ",") { + socketPtrOffsetMap[addr] = uint(i) + } + } else { + socketPtrOffsetMap[addr] = uint(len(runningServersOrder)) + } + + srv = &Server{ + wg: sync.WaitGroup{}, + sigChan: make(chan os.Signal), + isChild: isChild, + SignalHooks: map[int]map[os.Signal][]func(){ + PreSignal: { + syscall.SIGHUP: {}, + syscall.SIGINT: {}, + syscall.SIGTERM: {}, + }, + PostSignal: { + syscall.SIGHUP: {}, + syscall.SIGINT: {}, + syscall.SIGTERM: {}, + }, + }, + state: StateInit, + Network: "tcp", + } + srv.Server = &http.Server{} + srv.Server.Addr = addr + srv.Server.ReadTimeout = DefaultReadTimeOut + srv.Server.WriteTimeout = DefaultWriteTimeOut + srv.Server.MaxHeaderBytes = DefaultMaxHeaderBytes + srv.Server.Handler = handler + + runningServersOrder = append(runningServersOrder, addr) + runningServers[addr] = srv + + return +} + +// ListenAndServe refer http.ListenAndServe +func ListenAndServe(addr string, handler http.Handler) error { + server := NewServer(addr, handler) + return server.ListenAndServe() +} + +// ListenAndServeTLS refer http.ListenAndServeTLS +func ListenAndServeTLS(addr string, certFile string, keyFile string, handler http.Handler) error { + server := NewServer(addr, handler) + return server.ListenAndServeTLS(certFile, keyFile) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/grace/listener.go b/Godeps/_workspace/src/github.com/astaxie/beego/grace/listener.go new file mode 100644 index 000000000..5439d0b20 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/grace/listener.go @@ -0,0 +1,62 @@ +package grace + +import ( + "net" + "os" + "syscall" + "time" +) + +type graceListener struct { + net.Listener + stop chan error + stopped bool + server *Server +} + +func newGraceListener(l net.Listener, srv *Server) (el *graceListener) { + el = &graceListener{ + Listener: l, + stop: make(chan error), + server: srv, + } + go func() { + _ = <-el.stop + el.stopped = true + el.stop <- el.Listener.Close() + }() + return +} + +func (gl *graceListener) Accept() (c net.Conn, err error) { + tc, err := gl.Listener.(*net.TCPListener).AcceptTCP() + if err != nil { + return + } + + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + + c = graceConn{ + Conn: tc, + server: gl.server, + } + + gl.server.wg.Add(1) + return +} + +func (gl *graceListener) Close() error { + if gl.stopped { + return syscall.EINVAL + } + gl.stop <- nil + return <-gl.stop +} + +func (gl *graceListener) File() *os.File { + // returns a dup(2) - FD_CLOEXEC flag *not* set + tl := gl.Listener.(*net.TCPListener) + fl, _ := tl.File() + return fl +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/grace/server.go b/Godeps/_workspace/src/github.com/astaxie/beego/grace/server.go new file mode 100644 index 000000000..f4512ded5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/grace/server.go @@ -0,0 +1,293 @@ +package grace + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "net/http" + "os" + "os/exec" + "os/signal" + "strings" + "sync" + "syscall" + "time" +) + +// Server embedded http.Server +type Server struct { + *http.Server + GraceListener net.Listener + SignalHooks map[int]map[os.Signal][]func() + tlsInnerListener *graceListener + wg sync.WaitGroup + sigChan chan os.Signal + isChild bool + state uint8 + Network string +} + +// Serve accepts incoming connections on the Listener l, +// creating a new service goroutine for each. +// The service goroutines read requests and then call srv.Handler to reply to them. +func (srv *Server) Serve() (err error) { + srv.state = StateRunning + err = srv.Server.Serve(srv.GraceListener) + log.Println(syscall.Getpid(), "Waiting for connections to finish...") + srv.wg.Wait() + srv.state = StateTerminate + return +} + +// ListenAndServe listens on the TCP network address srv.Addr and then calls Serve +// to handle requests on incoming connections. If srv.Addr is blank, ":http" is +// used. +func (srv *Server) ListenAndServe() (err error) { + addr := srv.Addr + if addr == "" { + addr = ":http" + } + + go srv.handleSignals() + + l, err := srv.getListener(addr) + if err != nil { + log.Println(err) + return err + } + + srv.GraceListener = newGraceListener(l, srv) + + if srv.isChild { + process, err := os.FindProcess(os.Getppid()) + if err != nil { + log.Println(err) + return err + } + err = process.Kill() + if err != nil { + return err + } + } + + log.Println(os.Getpid(), srv.Addr) + return srv.Serve() +} + +// ListenAndServeTLS listens on the TCP network address srv.Addr and then calls +// Serve to handle requests on incoming TLS connections. +// +// Filenames containing a certificate and matching private key for the server must +// be provided. If the certificate is signed by a certificate authority, the +// certFile should be the concatenation of the server's certificate followed by the +// CA's certificate. +// +// If srv.Addr is blank, ":https" is used. +func (srv *Server) ListenAndServeTLS(certFile, keyFile string) (err error) { + addr := srv.Addr + if addr == "" { + addr = ":https" + } + + config := &tls.Config{} + if srv.TLSConfig != nil { + *config = *srv.TLSConfig + } + if config.NextProtos == nil { + config.NextProtos = []string{"http/1.1"} + } + + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return + } + + go srv.handleSignals() + + l, err := srv.getListener(addr) + if err != nil { + log.Println(err) + return err + } + + srv.tlsInnerListener = newGraceListener(l, srv) + srv.GraceListener = tls.NewListener(srv.tlsInnerListener, config) + + if srv.isChild { + process, err := os.FindProcess(os.Getppid()) + if err != nil { + log.Println(err) + return err + } + err = process.Kill() + if err != nil { + return err + } + } + log.Println(os.Getpid(), srv.Addr) + return srv.Serve() +} + +// getListener either opens a new socket to listen on, or takes the acceptor socket +// it got passed when restarted. +func (srv *Server) getListener(laddr string) (l net.Listener, err error) { + if srv.isChild { + var ptrOffset uint + if len(socketPtrOffsetMap) > 0 { + ptrOffset = socketPtrOffsetMap[laddr] + log.Println("laddr", laddr, "ptr offset", socketPtrOffsetMap[laddr]) + } + + f := os.NewFile(uintptr(3+ptrOffset), "") + l, err = net.FileListener(f) + if err != nil { + err = fmt.Errorf("net.FileListener error: %v", err) + return + } + } else { + l, err = net.Listen(srv.Network, laddr) + if err != nil { + err = fmt.Errorf("net.Listen error: %v", err) + return + } + } + return +} + +// handleSignals listens for os Signals and calls any hooked in function that the +// user had registered with the signal. +func (srv *Server) handleSignals() { + var sig os.Signal + + signal.Notify( + srv.sigChan, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + ) + + pid := syscall.Getpid() + for { + sig = <-srv.sigChan + srv.signalHooks(PreSignal, sig) + switch sig { + case syscall.SIGHUP: + log.Println(pid, "Received SIGHUP. forking.") + err := srv.fork() + if err != nil { + log.Println("Fork err:", err) + } + case syscall.SIGINT: + log.Println(pid, "Received SIGINT.") + srv.shutdown() + case syscall.SIGTERM: + log.Println(pid, "Received SIGTERM.") + srv.shutdown() + default: + log.Printf("Received %v: nothing i care about...\n", sig) + } + srv.signalHooks(PostSignal, sig) + } +} + +func (srv *Server) signalHooks(ppFlag int, sig os.Signal) { + if _, notSet := srv.SignalHooks[ppFlag][sig]; !notSet { + return + } + for _, f := range srv.SignalHooks[ppFlag][sig] { + f() + } + return +} + +// shutdown closes the listener so that no new connections are accepted. it also +// starts a goroutine that will serverTimeout (stop all running requests) the server +// after DefaultTimeout. +func (srv *Server) shutdown() { + if srv.state != StateRunning { + return + } + + srv.state = StateShuttingDown + if DefaultTimeout >= 0 { + go srv.serverTimeout(DefaultTimeout) + } + err := srv.GraceListener.Close() + if err != nil { + log.Println(syscall.Getpid(), "Listener.Close() error:", err) + } else { + log.Println(syscall.Getpid(), srv.GraceListener.Addr(), "Listener closed.") + } +} + +// serverTimeout forces the server to shutdown in a given timeout - whether it +// finished outstanding requests or not. if Read/WriteTimeout are not set or the +// max header size is very big a connection could hang +func (srv *Server) serverTimeout(d time.Duration) { + defer func() { + if r := recover(); r != nil { + log.Println("WaitGroup at 0", r) + } + }() + if srv.state != StateShuttingDown { + return + } + time.Sleep(d) + log.Println("[STOP - Hammer Time] Forcefully shutting down parent") + for { + if srv.state == StateTerminate { + break + } + srv.wg.Done() + } +} + +func (srv *Server) fork() (err error) { + regLock.Lock() + defer regLock.Unlock() + if runningServersForked { + return + } + runningServersForked = true + + var files = make([]*os.File, len(runningServers)) + var orderArgs = make([]string, len(runningServers)) + for _, srvPtr := range runningServers { + switch srvPtr.GraceListener.(type) { + case *graceListener: + files[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.GraceListener.(*graceListener).File() + default: + files[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.tlsInnerListener.File() + } + orderArgs[socketPtrOffsetMap[srvPtr.Server.Addr]] = srvPtr.Server.Addr + } + + log.Println(files) + path := os.Args[0] + var args []string + if len(os.Args) > 1 { + for _, arg := range os.Args[1:] { + if arg == "-graceful" { + break + } + args = append(args, arg) + } + } + args = append(args, "-graceful") + if len(runningServers) > 1 { + args = append(args, fmt.Sprintf(`-socketorder=%s`, strings.Join(orderArgs, ","))) + log.Println(args) + } + cmd := exec.Command(path, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.ExtraFiles = files + err = cmd.Start() + if err != nil { + log.Fatalf("Restart: Failed to launch, error: %v", err) + } + + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/hooks.go b/Godeps/_workspace/src/github.com/astaxie/beego/hooks.go new file mode 100644 index 000000000..78abf8ef9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/hooks.go @@ -0,0 +1,95 @@ +package beego + +import ( + "encoding/json" + "mime" + "net/http" + "path/filepath" + + "github.com/astaxie/beego/session" +) + +// +func registerMime() error { + for k, v := range mimemaps { + mime.AddExtensionType(k, v) + } + return nil +} + +// register default error http handlers, 404,401,403,500 and 503. +func registerDefaultErrorHandler() error { + m := map[string]func(http.ResponseWriter, *http.Request){ + "401": unauthorized, + "402": paymentRequired, + "403": forbidden, + "404": notFound, + "405": methodNotAllowed, + "500": internalServerError, + "501": notImplemented, + "502": badGateway, + "503": serviceUnavailable, + "504": gatewayTimeout, + } + for e, h := range m { + if _, ok := ErrorMaps[e]; !ok { + ErrorHandler(e, h) + } + } + return nil +} + +func registerSession() error { + if BConfig.WebConfig.Session.SessionOn { + var err error + sessionConfig := AppConfig.String("sessionConfig") + if sessionConfig == "" { + conf := map[string]interface{}{ + "cookieName": BConfig.WebConfig.Session.SessionName, + "gclifetime": BConfig.WebConfig.Session.SessionGCMaxLifetime, + "providerConfig": filepath.ToSlash(BConfig.WebConfig.Session.SessionProviderConfig), + "secure": BConfig.Listen.EnableHTTPS, + "enableSetCookie": BConfig.WebConfig.Session.SessionAutoSetCookie, + "domain": BConfig.WebConfig.Session.SessionDomain, + "cookieLifeTime": BConfig.WebConfig.Session.SessionCookieLifeTime, + } + confBytes, err := json.Marshal(conf) + if err != nil { + return err + } + sessionConfig = string(confBytes) + } + if GlobalSessions, err = session.NewManager(BConfig.WebConfig.Session.SessionProvider, sessionConfig); err != nil { + return err + } + go GlobalSessions.GC() + } + return nil +} + +func registerTemplate() error { + if BConfig.WebConfig.AutoRender { + if err := BuildTemplate(BConfig.WebConfig.ViewsPath); err != nil { + if BConfig.RunMode == DEV { + Warn(err) + } + return err + } + } + return nil +} + +func registerDocs() error { + if BConfig.WebConfig.EnableDocs { + Get("/docs", serverDocs) + Get("/docs/*", serverDocs) + } + return nil +} + +func registerAdmin() error { + if BConfig.Listen.EnableAdmin { + go beeAdminApp.Run() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/httplib/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/httplib/README.md new file mode 100644 index 000000000..6a72cf7cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/httplib/README.md @@ -0,0 +1,97 @@ +# httplib +httplib is an libs help you to curl remote url. + +# How to use? + +## GET +you can use Get to crawl data. + + import "github.com/astaxie/beego/httplib" + + str, err := httplib.Get("http://beego.me/").String() + if err != nil { + // error + } + fmt.Println(str) + +## POST +POST data to remote url + + req := httplib.Post("http://beego.me/") + req.Param("username","astaxie") + req.Param("password","123456") + str, err := req.String() + if err != nil { + // error + } + fmt.Println(str) + +## Set timeout + +The default timeout is `60` seconds, function prototype: + + SetTimeout(connectTimeout, readWriteTimeout time.Duration) + +Exmaple: + + // GET + httplib.Get("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) + + // POST + httplib.Post("http://beego.me/").SetTimeout(100 * time.Second, 30 * time.Second) + + +## Debug + +If you want to debug the request info, set the debug on + + httplib.Get("http://beego.me/").Debug(true) + +## Set HTTP Basic Auth + + str, err := Get("http://beego.me/").SetBasicAuth("user", "passwd").String() + if err != nil { + // error + } + fmt.Println(str) + +## Set HTTPS + +If request url is https, You can set the client support TSL: + + httplib.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true}) + +More info about the `tls.Config` please visit http://golang.org/pkg/crypto/tls/#Config + +## Set HTTP Version + +some servers need to specify the protocol version of HTTP + + httplib.Get("http://beego.me/").SetProtocolVersion("HTTP/1.1") + +## Set Cookie + +some http request need setcookie. So set it like this: + + cookie := &http.Cookie{} + cookie.Name = "username" + cookie.Value = "astaxie" + httplib.Get("http://beego.me/").SetCookie(cookie) + +## Upload file + +httplib support mutil file upload, use `req.PostFile()` + + req := httplib.Post("http://beego.me/") + req.Param("username","astaxie") + req.PostFile("uploadfile1", "httplib.pdf") + str, err := req.String() + if err != nil { + // error + } + fmt.Println(str) + + +See godoc for further documentation and examples. + +* [godoc.org/github.com/astaxie/beego/httplib](https://godoc.org/github.com/astaxie/beego/httplib) diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/httplib/httplib.go b/Godeps/_workspace/src/github.com/astaxie/beego/httplib/httplib.go new file mode 100644 index 000000000..fb64a30a8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/httplib/httplib.go @@ -0,0 +1,552 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httplib is used as http.Client +// Usage: +// +// import "github.com/astaxie/beego/httplib" +// +// b := httplib.Post("http://beego.me/") +// b.Param("username","astaxie") +// b.Param("password","123456") +// b.PostFile("uploadfile1", "httplib.pdf") +// b.PostFile("uploadfile2", "httplib.txt") +// str, err := b.String() +// if err != nil { +// t.Fatal(err) +// } +// fmt.Println(str) +// +// more docs http://beego.me/docs/module/httplib.md +package httplib + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "log" + "mime/multipart" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "net/url" + "os" + "strings" + "sync" + "time" +) + +var defaultSetting = BeegoHTTPSettings{ + UserAgent: "beegoServer", + ConnectTimeout: 60 * time.Second, + ReadWriteTimeout: 60 * time.Second, + Gzip: true, + DumpBody: true, +} + +var defaultCookieJar http.CookieJar +var settingMutex sync.Mutex + +// createDefaultCookie creates a global cookiejar to store cookies. +func createDefaultCookie() { + settingMutex.Lock() + defer settingMutex.Unlock() + defaultCookieJar, _ = cookiejar.New(nil) +} + +// SetDefaultSetting Overwrite default settings +func SetDefaultSetting(setting BeegoHTTPSettings) { + settingMutex.Lock() + defer settingMutex.Unlock() + defaultSetting = setting +} + +// NewBeegoRequest return *BeegoHttpRequest with specific method +func NewBeegoRequest(rawurl, method string) *BeegoHTTPRequest { + var resp http.Response + u, err := url.Parse(rawurl) + if err != nil { + log.Println("Httplib:", err) + } + req := http.Request{ + URL: u, + Method: method, + Header: make(http.Header), + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + } + return &BeegoHTTPRequest{ + url: rawurl, + req: &req, + params: map[string][]string{}, + files: map[string]string{}, + setting: defaultSetting, + resp: &resp, + } +} + +// Get returns *BeegoHttpRequest with GET method. +func Get(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "GET") +} + +// Post returns *BeegoHttpRequest with POST method. +func Post(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "POST") +} + +// Put returns *BeegoHttpRequest with PUT method. +func Put(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "PUT") +} + +// Delete returns *BeegoHttpRequest DELETE method. +func Delete(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "DELETE") +} + +// Head returns *BeegoHttpRequest with HEAD method. +func Head(url string) *BeegoHTTPRequest { + return NewBeegoRequest(url, "HEAD") +} + +// BeegoHTTPSettings is the http.Client setting +type BeegoHTTPSettings struct { + ShowDebug bool + UserAgent string + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + TLSClientConfig *tls.Config + Proxy func(*http.Request) (*url.URL, error) + Transport http.RoundTripper + EnableCookie bool + Gzip bool + DumpBody bool +} + +// BeegoHTTPRequest provides more useful methods for requesting one url than http.Request. +type BeegoHTTPRequest struct { + url string + req *http.Request + params map[string][]string + files map[string]string + setting BeegoHTTPSettings + resp *http.Response + body []byte + dump []byte +} + +// GetRequest return the request object +func (b *BeegoHTTPRequest) GetRequest() *http.Request { + return b.req +} + +// Setting Change request settings +func (b *BeegoHTTPRequest) Setting(setting BeegoHTTPSettings) *BeegoHTTPRequest { + b.setting = setting + return b +} + +// SetBasicAuth sets the request's Authorization header to use HTTP Basic Authentication with the provided username and password. +func (b *BeegoHTTPRequest) SetBasicAuth(username, password string) *BeegoHTTPRequest { + b.req.SetBasicAuth(username, password) + return b +} + +// SetEnableCookie sets enable/disable cookiejar +func (b *BeegoHTTPRequest) SetEnableCookie(enable bool) *BeegoHTTPRequest { + b.setting.EnableCookie = enable + return b +} + +// SetUserAgent sets User-Agent header field +func (b *BeegoHTTPRequest) SetUserAgent(useragent string) *BeegoHTTPRequest { + b.setting.UserAgent = useragent + return b +} + +// Debug sets show debug or not when executing request. +func (b *BeegoHTTPRequest) Debug(isdebug bool) *BeegoHTTPRequest { + b.setting.ShowDebug = isdebug + return b +} + +// DumpBody setting whether need to Dump the Body. +func (b *BeegoHTTPRequest) DumpBody(isdump bool) *BeegoHTTPRequest { + b.setting.DumpBody = isdump + return b +} + +// DumpRequest return the DumpRequest +func (b *BeegoHTTPRequest) DumpRequest() []byte { + return b.dump +} + +// SetTimeout sets connect time out and read-write time out for BeegoRequest. +func (b *BeegoHTTPRequest) SetTimeout(connectTimeout, readWriteTimeout time.Duration) *BeegoHTTPRequest { + b.setting.ConnectTimeout = connectTimeout + b.setting.ReadWriteTimeout = readWriteTimeout + return b +} + +// SetTLSClientConfig sets tls connection configurations if visiting https url. +func (b *BeegoHTTPRequest) SetTLSClientConfig(config *tls.Config) *BeegoHTTPRequest { + b.setting.TLSClientConfig = config + return b +} + +// Header add header item string in request. +func (b *BeegoHTTPRequest) Header(key, value string) *BeegoHTTPRequest { + b.req.Header.Set(key, value) + return b +} + +// SetHost set the request host +func (b *BeegoHTTPRequest) SetHost(host string) *BeegoHTTPRequest { + b.req.Host = host + return b +} + +// SetProtocolVersion Set the protocol version for incoming requests. +// Client requests always use HTTP/1.1. +func (b *BeegoHTTPRequest) SetProtocolVersion(vers string) *BeegoHTTPRequest { + if len(vers) == 0 { + vers = "HTTP/1.1" + } + + major, minor, ok := http.ParseHTTPVersion(vers) + if ok { + b.req.Proto = vers + b.req.ProtoMajor = major + b.req.ProtoMinor = minor + } + + return b +} + +// SetCookie add cookie into request. +func (b *BeegoHTTPRequest) SetCookie(cookie *http.Cookie) *BeegoHTTPRequest { + b.req.Header.Add("Cookie", cookie.String()) + return b +} + +// SetTransport set the setting transport +func (b *BeegoHTTPRequest) SetTransport(transport http.RoundTripper) *BeegoHTTPRequest { + b.setting.Transport = transport + return b +} + +// SetProxy set the http proxy +// example: +// +// func(req *http.Request) (*url.URL, error) { +// u, _ := url.ParseRequestURI("http://127.0.0.1:8118") +// return u, nil +// } +func (b *BeegoHTTPRequest) SetProxy(proxy func(*http.Request) (*url.URL, error)) *BeegoHTTPRequest { + b.setting.Proxy = proxy + return b +} + +// Param adds query param in to request. +// params build query string as ?key1=value1&key2=value2... +func (b *BeegoHTTPRequest) Param(key, value string) *BeegoHTTPRequest { + if param, ok := b.params[key]; ok { + b.params[key] = append(param, value) + } else { + b.params[key] = []string{value} + } + return b +} + +// PostFile add a post file to the request +func (b *BeegoHTTPRequest) PostFile(formname, filename string) *BeegoHTTPRequest { + b.files[formname] = filename + return b +} + +// Body adds request raw body. +// it supports string and []byte. +func (b *BeegoHTTPRequest) Body(data interface{}) *BeegoHTTPRequest { + switch t := data.(type) { + case string: + bf := bytes.NewBufferString(t) + b.req.Body = ioutil.NopCloser(bf) + b.req.ContentLength = int64(len(t)) + case []byte: + bf := bytes.NewBuffer(t) + b.req.Body = ioutil.NopCloser(bf) + b.req.ContentLength = int64(len(t)) + } + return b +} + +// JSONBody adds request raw body encoding by JSON. +func (b *BeegoHTTPRequest) JSONBody(obj interface{}) (*BeegoHTTPRequest, error) { + if b.req.Body == nil && obj != nil { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return b, err + } + b.req.Body = ioutil.NopCloser(buf) + b.req.ContentLength = int64(buf.Len()) + b.req.Header.Set("Content-Type", "application/json") + } + return b, nil +} + +func (b *BeegoHTTPRequest) buildURL(paramBody string) { + // build GET url with query string + if b.req.Method == "GET" && len(paramBody) > 0 { + if strings.Index(b.url, "?") != -1 { + b.url += "&" + paramBody + } else { + b.url = b.url + "?" + paramBody + } + return + } + + // build POST/PUT/PATCH url and body + if (b.req.Method == "POST" || b.req.Method == "PUT" || b.req.Method == "PATCH") && b.req.Body == nil { + // with files + if len(b.files) > 0 { + pr, pw := io.Pipe() + bodyWriter := multipart.NewWriter(pw) + go func() { + for formname, filename := range b.files { + fileWriter, err := bodyWriter.CreateFormFile(formname, filename) + if err != nil { + log.Println("Httplib:", err) + } + fh, err := os.Open(filename) + if err != nil { + log.Println("Httplib:", err) + } + //iocopy + _, err = io.Copy(fileWriter, fh) + fh.Close() + if err != nil { + log.Println("Httplib:", err) + } + } + for k, v := range b.params { + for _, vv := range v { + bodyWriter.WriteField(k, vv) + } + } + bodyWriter.Close() + pw.Close() + }() + b.Header("Content-Type", bodyWriter.FormDataContentType()) + b.req.Body = ioutil.NopCloser(pr) + return + } + + // with params + if len(paramBody) > 0 { + b.Header("Content-Type", "application/x-www-form-urlencoded") + b.Body(paramBody) + } + } +} + +func (b *BeegoHTTPRequest) getResponse() (*http.Response, error) { + if b.resp.StatusCode != 0 { + return b.resp, nil + } + resp, err := b.DoRequest() + if err != nil { + return nil, err + } + b.resp = resp + return resp, nil +} + +// DoRequest will do the client.Do +func (b *BeegoHTTPRequest) DoRequest() (*http.Response, error) { + var paramBody string + if len(b.params) > 0 { + var buf bytes.Buffer + for k, v := range b.params { + for _, vv := range v { + buf.WriteString(url.QueryEscape(k)) + buf.WriteByte('=') + buf.WriteString(url.QueryEscape(vv)) + buf.WriteByte('&') + } + } + paramBody = buf.String() + paramBody = paramBody[0 : len(paramBody)-1] + } + + b.buildURL(paramBody) + url, err := url.Parse(b.url) + if err != nil { + return nil, err + } + + b.req.URL = url + + trans := b.setting.Transport + + if trans == nil { + // create default transport + trans = &http.Transport{ + TLSClientConfig: b.setting.TLSClientConfig, + Proxy: b.setting.Proxy, + Dial: TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout), + } + } else { + // if b.transport is *http.Transport then set the settings. + if t, ok := trans.(*http.Transport); ok { + if t.TLSClientConfig == nil { + t.TLSClientConfig = b.setting.TLSClientConfig + } + if t.Proxy == nil { + t.Proxy = b.setting.Proxy + } + if t.Dial == nil { + t.Dial = TimeoutDialer(b.setting.ConnectTimeout, b.setting.ReadWriteTimeout) + } + } + } + + var jar http.CookieJar + if b.setting.EnableCookie { + if defaultCookieJar == nil { + createDefaultCookie() + } + jar = defaultCookieJar + } + + client := &http.Client{ + Transport: trans, + Jar: jar, + } + + if b.setting.UserAgent != "" && b.req.Header.Get("User-Agent") == "" { + b.req.Header.Set("User-Agent", b.setting.UserAgent) + } + + if b.setting.ShowDebug { + dump, err := httputil.DumpRequest(b.req, b.setting.DumpBody) + if err != nil { + log.Println(err.Error()) + } + b.dump = dump + } + return client.Do(b.req) +} + +// String returns the body string in response. +// it calls Response inner. +func (b *BeegoHTTPRequest) String() (string, error) { + data, err := b.Bytes() + if err != nil { + return "", err + } + + return string(data), nil +} + +// Bytes returns the body []byte in response. +// it calls Response inner. +func (b *BeegoHTTPRequest) Bytes() ([]byte, error) { + if b.body != nil { + return b.body, nil + } + resp, err := b.getResponse() + if err != nil { + return nil, err + } + if resp.Body == nil { + return nil, nil + } + defer resp.Body.Close() + if b.setting.Gzip && resp.Header.Get("Content-Encoding") == "gzip" { + reader, err := gzip.NewReader(resp.Body) + if err != nil { + return nil, err + } + b.body, err = ioutil.ReadAll(reader) + } else { + b.body, err = ioutil.ReadAll(resp.Body) + } + return b.body, err +} + +// ToFile saves the body data in response to one file. +// it calls Response inner. +func (b *BeegoHTTPRequest) ToFile(filename string) error { + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + resp, err := b.getResponse() + if err != nil { + return err + } + if resp.Body == nil { + return nil + } + defer resp.Body.Close() + _, err = io.Copy(f, resp.Body) + return err +} + +// ToJSON returns the map that marshals from the body bytes as json in response . +// it calls Response inner. +func (b *BeegoHTTPRequest) ToJSON(v interface{}) error { + data, err := b.Bytes() + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// ToXML returns the map that marshals from the body bytes as xml in response . +// it calls Response inner. +func (b *BeegoHTTPRequest) ToXML(v interface{}) error { + data, err := b.Bytes() + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +// Response executes request client gets response mannually. +func (b *BeegoHTTPRequest) Response() (*http.Response, error) { + return b.getResponse() +} + +// TimeoutDialer returns functions of connection dialer with timeout settings for http.Transport Dial field. +func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, addr string) (c net.Conn, err error) { + return func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, cTimeout) + if err != nil { + return nil, err + } + err = conn.SetDeadline(time.Now().Add(rwTimeout)) + return conn, err + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/log.go b/Godeps/_workspace/src/github.com/astaxie/beego/log.go new file mode 100644 index 000000000..46ec57dd8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/log.go @@ -0,0 +1,116 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "strings" + + "github.com/astaxie/beego/logs" +) + +// Log levels to control the logging output. +const ( + LevelEmergency = iota + LevelAlert + LevelCritical + LevelError + LevelWarning + LevelNotice + LevelInformational + LevelDebug +) + +// BeeLogger references the used application logger. +var BeeLogger = logs.NewLogger(100) + +// SetLevel sets the global log level used by the simple logger. +func SetLevel(l int) { + BeeLogger.SetLevel(l) +} + +// SetLogFuncCall set the CallDepth, default is 3 +func SetLogFuncCall(b bool) { + BeeLogger.EnableFuncCallDepth(b) + BeeLogger.SetLogFuncCallDepth(3) +} + +// SetLogger sets a new logger. +func SetLogger(adaptername string, config string) error { + err := BeeLogger.SetLogger(adaptername, config) + if err != nil { + return err + } + return nil +} + +// Emergency logs a message at emergency level. +func Emergency(v ...interface{}) { + BeeLogger.Emergency(generateFmtStr(len(v)), v...) +} + +// Alert logs a message at alert level. +func Alert(v ...interface{}) { + BeeLogger.Alert(generateFmtStr(len(v)), v...) +} + +// Critical logs a message at critical level. +func Critical(v ...interface{}) { + BeeLogger.Critical(generateFmtStr(len(v)), v...) +} + +// Error logs a message at error level. +func Error(v ...interface{}) { + BeeLogger.Error(generateFmtStr(len(v)), v...) +} + +// Warning logs a message at warning level. +func Warning(v ...interface{}) { + BeeLogger.Warning(generateFmtStr(len(v)), v...) +} + +// Warn compatibility alias for Warning() +func Warn(v ...interface{}) { + BeeLogger.Warn(generateFmtStr(len(v)), v...) +} + +// Notice logs a message at notice level. +func Notice(v ...interface{}) { + BeeLogger.Notice(generateFmtStr(len(v)), v...) +} + +// Informational logs a message at info level. +func Informational(v ...interface{}) { + BeeLogger.Informational(generateFmtStr(len(v)), v...) +} + +// Info compatibility alias for Warning() +func Info(v ...interface{}) { + BeeLogger.Info(generateFmtStr(len(v)), v...) +} + +// Debug logs a message at debug level. +func Debug(v ...interface{}) { + BeeLogger.Debug(generateFmtStr(len(v)), v...) +} + +// Trace logs a message at trace level. +// compatibility alias for Warning() +func Trace(v ...interface{}) { + BeeLogger.Trace(generateFmtStr(len(v)), v...) +} + +func generateFmtStr(n int) string { + return strings.Repeat("%v ", n) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/logs/README.md new file mode 100644 index 000000000..57d7abc39 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/README.md @@ -0,0 +1,63 @@ +## logs +logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` . + + +## How to install? + + go get github.com/astaxie/beego/logs + + +## What adapters are supported? + +As of now this logs support console, file,smtp and conn. + + +## How to use it? + +First you must import it + + import ( + "github.com/astaxie/beego/logs" + ) + +Then init a Log (example with console adapter) + + log := NewLogger(10000) + log.SetLogger("console", "") + +> the first params stand for how many channel + +Use it like this: + + log.Trace("trace") + log.Info("info") + log.Warn("warning") + log.Debug("debug") + log.Critical("critical") + + +## File adapter + +Configure file adapter like this: + + log := NewLogger(10000) + log.SetLogger("file", `{"filename":"test.log"}`) + + +## Conn adapter + +Configure like this: + + log := NewLogger(1000) + log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`) + log.Info("info") + + +## Smtp adapter + +Configure like this: + + log := NewLogger(10000) + log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`) + log.Critical("sendmail critical") + time.Sleep(time.Second * 30) diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/conn.go b/Godeps/_workspace/src/github.com/astaxie/beego/logs/conn.go new file mode 100644 index 000000000..3655bf514 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/conn.go @@ -0,0 +1,116 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "encoding/json" + "io" + "log" + "net" +) + +// connWriter implements LoggerInterface. +// it writes messages in keep-live tcp connection. +type connWriter struct { + lg *log.Logger + innerWriter io.WriteCloser + ReconnectOnMsg bool `json:"reconnectOnMsg"` + Reconnect bool `json:"reconnect"` + Net string `json:"net"` + Addr string `json:"addr"` + Level int `json:"level"` +} + +// NewConn create new ConnWrite returning as LoggerInterface. +func NewConn() Logger { + conn := new(connWriter) + conn.Level = LevelTrace + return conn +} + +// Init init connection writer with json config. +// json config only need key "level". +func (c *connWriter) Init(jsonconfig string) error { + return json.Unmarshal([]byte(jsonconfig), c) +} + +// WriteMsg write message in connection. +// if connection is down, try to re-connect. +func (c *connWriter) WriteMsg(msg string, level int) error { + if level > c.Level { + return nil + } + if c.neddedConnectOnMsg() { + err := c.connect() + if err != nil { + return err + } + } + + if c.ReconnectOnMsg { + defer c.innerWriter.Close() + } + c.lg.Println(msg) + return nil +} + +// Flush implementing method. empty. +func (c *connWriter) Flush() { + +} + +// Destroy destroy connection writer and close tcp listener. +func (c *connWriter) Destroy() { + if c.innerWriter != nil { + c.innerWriter.Close() + } +} + +func (c *connWriter) connect() error { + if c.innerWriter != nil { + c.innerWriter.Close() + c.innerWriter = nil + } + + conn, err := net.Dial(c.Net, c.Addr) + if err != nil { + return err + } + + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + } + + c.innerWriter = conn + c.lg = log.New(conn, "", log.Ldate|log.Ltime) + return nil +} + +func (c *connWriter) neddedConnectOnMsg() bool { + if c.Reconnect { + c.Reconnect = false + return true + } + + if c.innerWriter == nil { + return true + } + + return c.ReconnectOnMsg +} + +func init() { + Register("conn", NewConn) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/console.go b/Godeps/_workspace/src/github.com/astaxie/beego/logs/console.go new file mode 100644 index 000000000..23e8ebcaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/console.go @@ -0,0 +1,97 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "encoding/json" + "log" + "os" + "runtime" +) + +// brush is a color join function +type brush func(string) string + +// newBrush return a fix color Brush +func newBrush(color string) brush { + pre := "\033[" + reset := "\033[0m" + return func(text string) string { + return pre + color + "m" + text + reset + } +} + +var colors = []brush{ + newBrush("1;37"), // Emergency white + newBrush("1;36"), // Alert cyan + newBrush("1;35"), // Critical magenta + newBrush("1;31"), // Error red + newBrush("1;33"), // Warning yellow + newBrush("1;32"), // Notice green + newBrush("1;34"), // Informational blue + newBrush("1;34"), // Debug blue +} + +// consoleWriter implements LoggerInterface and writes messages to terminal. +type consoleWriter struct { + lg *log.Logger + Level int `json:"level"` +} + +// NewConsole create ConsoleWriter returning as LoggerInterface. +func NewConsole() Logger { + cw := &consoleWriter{ + lg: log.New(os.Stdout, "", log.Ldate|log.Ltime), + Level: LevelDebug, + } + return cw +} + +// Init init console logger. +// jsonconfig like '{"level":LevelTrace}'. +func (c *consoleWriter) Init(jsonconfig string) error { + if len(jsonconfig) == 0 { + return nil + } + return json.Unmarshal([]byte(jsonconfig), c) +} + +// WriteMsg write message in console. +func (c *consoleWriter) WriteMsg(msg string, level int) error { + if level > c.Level { + return nil + } + if goos := runtime.GOOS; goos == "windows" { + c.lg.Println(msg) + return nil + } + c.lg.Println(colors[level](msg)) + + return nil +} + +// Destroy implementing method. empty. +func (c *consoleWriter) Destroy() { + +} + +// Flush implementing method. empty. +func (c *consoleWriter) Flush() { + +} + +func init() { + Register("console", NewConsole) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/es/es.go b/Godeps/_workspace/src/github.com/astaxie/beego/logs/es/es.go new file mode 100644 index 000000000..f8dc5f65b --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/es/es.go @@ -0,0 +1,80 @@ +package es + +import ( + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "time" + + "github.com/astaxie/beego/logs" + "github.com/belogik/goes" +) + +// NewES return a LoggerInterface +func NewES() logs.Logger { + cw := &esLogger{ + Level: logs.LevelDebug, + } + return cw +} + +type esLogger struct { + *goes.Connection + DSN string `json:"dsn"` + Level int `json:"level"` +} + +// {"dsn":"http://localhost:9200/","level":1} +func (el *esLogger) Init(jsonconfig string) error { + err := json.Unmarshal([]byte(jsonconfig), el) + if err != nil { + return err + } + if el.DSN == "" { + return errors.New("empty dsn") + } else if u, err := url.Parse(el.DSN); err != nil { + return err + } else if u.Path == "" { + return errors.New("missing prefix") + } else if host, port, err := net.SplitHostPort(u.Host); err != nil { + return err + } else { + conn := goes.NewConnection(host, port) + el.Connection = conn + } + return nil +} + +// WriteMsg will write the msg and level into es +func (el *esLogger) WriteMsg(msg string, level int) error { + if level > el.Level { + return nil + } + t := time.Now() + vals := make(map[string]interface{}) + vals["@timestamp"] = t.Format(time.RFC3339) + vals["@msg"] = msg + d := goes.Document{ + Index: fmt.Sprintf("%04d.%02d.%02d", t.Year(), t.Month(), t.Day()), + Type: "logs", + Fields: vals, + } + _, err := el.Index(d, nil) + return err +} + +// Destroy is a empty method +func (el *esLogger) Destroy() { + +} + +// Flush is a empty method +func (el *esLogger) Flush() { + +} + +func init() { + logs.Register("es", NewES) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/file.go b/Godeps/_workspace/src/github.com/astaxie/beego/logs/file.go new file mode 100644 index 000000000..0eae734a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/file.go @@ -0,0 +1,314 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "time" +) + +// fileLogWriter implements LoggerInterface. +// It writes messages by lines limit, file size limit, or time frequency. +type fileLogWriter struct { + sync.Mutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize + // The opened file + Filename string `json:"filename"` + fileWriter *os.File + + // Rotate at line + MaxLines int `json:"maxlines"` + maxLinesCurLines int + + // Rotate at size + MaxSize int `json:"maxsize"` + maxSizeCurSize int + + // Rotate daily + Daily bool `json:"daily"` + MaxDays int64 `json:"maxdays"` + dailyOpenDate int + + Rotate bool `json:"rotate"` + + Level int `json:"level"` + + Perm os.FileMode `json:"perm"` +} + +// NewFileWriter create a FileLogWriter returning as LoggerInterface. +func newFileWriter() Logger { + w := &fileLogWriter{ + Filename: "", + MaxLines: 1000000, + MaxSize: 1 << 28, //256 MB + Daily: true, + MaxDays: 7, + Rotate: true, + Level: LevelTrace, + Perm: 0660, + } + return w +} + +// Init file logger with json config. +// jsonConfig like: +// { +// "filename":"logs/beego.log", +// "maxLines":10000, +// "maxsize":1<<30, +// "daily":true, +// "maxDays":15, +// "rotate":true, +// "perm":0600 +// } +func (w *fileLogWriter) Init(jsonConfig string) error { + err := json.Unmarshal([]byte(jsonConfig), w) + if err != nil { + return err + } + if len(w.Filename) == 0 { + return errors.New("jsonconfig must have filename") + } + err = w.startLogger() + return err +} + +// start file logger. create log file and set to locker-inside file writer. +func (w *fileLogWriter) startLogger() error { + file, err := w.createLogFile() + if err != nil { + return err + } + if w.fileWriter != nil { + w.fileWriter.Close() + } + w.fileWriter = file + return w.initFd() +} + +func (w *fileLogWriter) needRotate(size int, day int) bool { + return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) || + (w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) || + (w.Daily && day != w.dailyOpenDate) + +} + +// WriteMsg write logger message into file. +func (w *fileLogWriter) WriteMsg(msg string, level int) error { + if level > w.Level { + return nil + } + //2016/01/12 21:34:33 + now := time.Now() + y, mo, d := now.Date() + h, mi, s := now.Clock() + //len(2006/01/02 15:03:04)==19 + var buf [20]byte + t := 3 + for y >= 10 { + p := y / 10 + buf[t] = byte('0' + y - p*10) + y = p + t-- + } + buf[0] = byte('0' + y) + buf[4] = '/' + if mo > 9 { + buf[5] = '1' + buf[6] = byte('0' + mo - 9) + } else { + buf[5] = '0' + buf[6] = byte('0' + mo) + } + buf[7] = '/' + t = d / 10 + buf[8] = byte('0' + t) + buf[9] = byte('0' + d - t*10) + buf[10] = ' ' + t = h / 10 + buf[11] = byte('0' + t) + buf[12] = byte('0' + h - t*10) + buf[13] = ':' + t = mi / 10 + buf[14] = byte('0' + t) + buf[15] = byte('0' + mi - t*10) + buf[16] = ':' + t = s / 10 + buf[17] = byte('0' + t) + buf[18] = byte('0' + s - t*10) + buf[19] = ' ' + msg = string(buf[0:]) + msg + "\n" + + if w.Rotate { + if w.needRotate(len(msg), d) { + w.Lock() + if w.needRotate(len(msg), d) { + if err := w.doRotate(); err != nil { + fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err) + } + + } + w.Unlock() + } + } + + w.Lock() + _, err := w.fileWriter.Write([]byte(msg)) + if err == nil { + w.maxLinesCurLines++ + w.maxSizeCurSize += len(msg) + } + w.Unlock() + return err +} + +func (w *fileLogWriter) createLogFile() (*os.File, error) { + // Open the log file + fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, w.Perm) + return fd, err +} + +func (w *fileLogWriter) initFd() error { + fd := w.fileWriter + fInfo, err := fd.Stat() + if err != nil { + return fmt.Errorf("get stat err: %s\n", err) + } + w.maxSizeCurSize = int(fInfo.Size()) + w.dailyOpenDate = time.Now().Day() + w.maxLinesCurLines = 0 + if fInfo.Size() > 0 { + count, err := w.lines() + if err != nil { + return err + } + w.maxLinesCurLines = count + } + return nil +} + +func (w *fileLogWriter) lines() (int, error) { + fd, err := os.Open(w.Filename) + if err != nil { + return 0, err + } + defer fd.Close() + + buf := make([]byte, 32768) // 32k + count := 0 + lineSep := []byte{'\n'} + + for { + c, err := fd.Read(buf) + if err != nil && err != io.EOF { + return count, err + } + + count += bytes.Count(buf[:c], lineSep) + + if err == io.EOF { + break + } + } + + return count, nil +} + +// DoRotate means it need to write file in new file. +// new file name like xx.2013-01-01.2.log +func (w *fileLogWriter) doRotate() error { + _, err := os.Lstat(w.Filename) + if err != nil { + return err + } + // file exists + // Find the next available number + num := 1 + fName := "" + suffix := filepath.Ext(w.Filename) + filenameOnly := strings.TrimSuffix(w.Filename, suffix) + if suffix == "" { + suffix = ".log" + } + for ; err == nil && num <= 999; num++ { + fName = filenameOnly + fmt.Sprintf(".%s.%03d%s", time.Now().Format("2006-01-02"), num, suffix) + _, err = os.Lstat(fName) + } + // return error if the last file checked still existed + if err == nil { + return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.Filename) + } + + // close fileWriter before rename + w.fileWriter.Close() + + // Rename the file to its new found name + // even if occurs error,we MUST guarantee to restart new logger + renameErr := os.Rename(w.Filename, fName) + // re-start logger + startLoggerErr := w.startLogger() + go w.deleteOldLog() + + if startLoggerErr != nil { + return fmt.Errorf("Rotate StartLogger: %s\n", startLoggerErr) + } + if renameErr != nil { + return fmt.Errorf("Rotate: %s\n", renameErr) + } + return nil + +} + +func (w *fileLogWriter) deleteOldLog() { + dir := filepath.Dir(w.Filename) + filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r) + } + }() + + if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*w.MaxDays) { + if strings.HasPrefix(filepath.Base(path), filepath.Base(w.Filename)) { + os.Remove(path) + } + } + return + }) +} + +// Destroy close the file description, close file writer. +func (w *fileLogWriter) Destroy() { + w.fileWriter.Close() +} + +// Flush flush file logger. +// there are no buffering messages in file logger in memory. +// flush file means sync file from disk. +func (w *fileLogWriter) Flush() { + w.fileWriter.Sync() +} + +func init() { + Register("file", newFileWriter) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/log.go b/Godeps/_workspace/src/github.com/astaxie/beego/logs/log.go new file mode 100644 index 000000000..ccaaa3ad7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/log.go @@ -0,0 +1,364 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logs provide a general log interface +// Usage: +// +// import "github.com/astaxie/beego/logs" +// +// log := NewLogger(10000) +// log.SetLogger("console", "") +// +// > the first params stand for how many channel +// +// Use it like this: +// +// log.Trace("trace") +// log.Info("info") +// log.Warn("warning") +// log.Debug("debug") +// log.Critical("critical") +// +// more docs http://beego.me/docs/module/logs.md +package logs + +import ( + "fmt" + "os" + "path" + "runtime" + "strconv" + "sync" +) + +// RFC5424 log message levels. +const ( + LevelEmergency = iota + LevelAlert + LevelCritical + LevelError + LevelWarning + LevelNotice + LevelInformational + LevelDebug +) + +// Legacy loglevel constants to ensure backwards compatibility. +// +// Deprecated: will be removed in 1.5.0. +const ( + LevelInfo = LevelInformational + LevelTrace = LevelDebug + LevelWarn = LevelWarning +) + +type loggerType func() Logger + +// Logger defines the behavior of a log provider. +type Logger interface { + Init(config string) error + WriteMsg(msg string, level int) error + Destroy() + Flush() +} + +var adapters = make(map[string]loggerType) + +// Register makes a log provide available by the provided name. +// If Register is called twice with the same name or if driver is nil, +// it panics. +func Register(name string, log loggerType) { + if log == nil { + panic("logs: Register provide is nil") + } + if _, dup := adapters[name]; dup { + panic("logs: Register called twice for provider " + name) + } + adapters[name] = log +} + +// BeeLogger is default logger in beego application. +// it can contain several providers and log message into all providers. +type BeeLogger struct { + lock sync.Mutex + level int + enableFuncCallDepth bool + loggerFuncCallDepth int + asynchronous bool + msgChan chan *logMsg + outputs []*nameLogger +} + +type nameLogger struct { + Logger + name string +} + +type logMsg struct { + level int + msg string +} + +var logMsgPool *sync.Pool + +// NewLogger returns a new BeeLogger. +// channelLen means the number of messages in chan(used where asynchronous is true). +// if the buffering chan is full, logger adapters write to file or other way. +func NewLogger(channelLen int64) *BeeLogger { + bl := new(BeeLogger) + bl.level = LevelDebug + bl.loggerFuncCallDepth = 2 + bl.msgChan = make(chan *logMsg, channelLen) + return bl +} + +// Async set the log to asynchronous and start the goroutine +func (bl *BeeLogger) Async() *BeeLogger { + bl.asynchronous = true + logMsgPool = &sync.Pool{ + New: func() interface{} { + return &logMsg{} + }, + } + go bl.startLogger() + return bl +} + +// SetLogger provides a given logger adapter into BeeLogger with config string. +// config need to be correct JSON as string: {"interval":360}. +func (bl *BeeLogger) SetLogger(adapterName string, config string) error { + bl.lock.Lock() + defer bl.lock.Unlock() + if log, ok := adapters[adapterName]; ok { + lg := log() + err := lg.Init(config) + if err != nil { + fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error()) + return err + } + bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg}) + } else { + return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) + } + return nil +} + +// DelLogger remove a logger adapter in BeeLogger. +func (bl *BeeLogger) DelLogger(adapterName string) error { + bl.lock.Lock() + defer bl.lock.Unlock() + outputs := []*nameLogger{} + for _, lg := range bl.outputs { + if lg.name == adapterName { + lg.Destroy() + } else { + outputs = append(outputs, lg) + } + } + if len(outputs) == len(bl.outputs) { + return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName) + } + bl.outputs = outputs + return nil +} + +func (bl *BeeLogger) writeToLoggers(msg string, level int) { + for _, l := range bl.outputs { + err := l.WriteMsg(msg, level) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err) + } + } +} + +func (bl *BeeLogger) writeMsg(logLevel int, msg string) error { + if bl.enableFuncCallDepth { + _, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth) + if !ok { + file = "???" + line = 0 + } + _, filename := path.Split(file) + msg = "[" + filename + ":" + strconv.FormatInt(int64(line), 10) + "]" + msg + } + if bl.asynchronous { + lm := logMsgPool.Get().(*logMsg) + lm.level = logLevel + lm.msg = msg + bl.msgChan <- lm + } else { + bl.writeToLoggers(msg, logLevel) + } + return nil +} + +// SetLevel Set log message level. +// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning), +// log providers will not even be sent the message. +func (bl *BeeLogger) SetLevel(l int) { + bl.level = l +} + +// SetLogFuncCallDepth set log funcCallDepth +func (bl *BeeLogger) SetLogFuncCallDepth(d int) { + bl.loggerFuncCallDepth = d +} + +// GetLogFuncCallDepth return log funcCallDepth for wrapper +func (bl *BeeLogger) GetLogFuncCallDepth() int { + return bl.loggerFuncCallDepth +} + +// EnableFuncCallDepth enable log funcCallDepth +func (bl *BeeLogger) EnableFuncCallDepth(b bool) { + bl.enableFuncCallDepth = b +} + +// start logger chan reading. +// when chan is not empty, write logs. +func (bl *BeeLogger) startLogger() { + for { + select { + case bm := <-bl.msgChan: + bl.writeToLoggers(bm.msg, bm.level) + logMsgPool.Put(bm) + } + } +} + +// Emergency Log EMERGENCY level message. +func (bl *BeeLogger) Emergency(format string, v ...interface{}) { + if LevelEmergency > bl.level { + return + } + msg := fmt.Sprintf("[M] "+format, v...) + bl.writeMsg(LevelEmergency, msg) +} + +// Alert Log ALERT level message. +func (bl *BeeLogger) Alert(format string, v ...interface{}) { + if LevelAlert > bl.level { + return + } + msg := fmt.Sprintf("[A] "+format, v...) + bl.writeMsg(LevelAlert, msg) +} + +// Critical Log CRITICAL level message. +func (bl *BeeLogger) Critical(format string, v ...interface{}) { + if LevelCritical > bl.level { + return + } + msg := fmt.Sprintf("[C] "+format, v...) + bl.writeMsg(LevelCritical, msg) +} + +// Error Log ERROR level message. +func (bl *BeeLogger) Error(format string, v ...interface{}) { + if LevelError > bl.level { + return + } + msg := fmt.Sprintf("[E] "+format, v...) + bl.writeMsg(LevelError, msg) +} + +// Warning Log WARNING level message. +func (bl *BeeLogger) Warning(format string, v ...interface{}) { + if LevelWarning > bl.level { + return + } + msg := fmt.Sprintf("[W] "+format, v...) + bl.writeMsg(LevelWarning, msg) +} + +// Notice Log NOTICE level message. +func (bl *BeeLogger) Notice(format string, v ...interface{}) { + if LevelNotice > bl.level { + return + } + msg := fmt.Sprintf("[N] "+format, v...) + bl.writeMsg(LevelNotice, msg) +} + +// Informational Log INFORMATIONAL level message. +func (bl *BeeLogger) Informational(format string, v ...interface{}) { + if LevelInformational > bl.level { + return + } + msg := fmt.Sprintf("[I] "+format, v...) + bl.writeMsg(LevelInformational, msg) +} + +// Debug Log DEBUG level message. +func (bl *BeeLogger) Debug(format string, v ...interface{}) { + if LevelDebug > bl.level { + return + } + msg := fmt.Sprintf("[D] "+format, v...) + bl.writeMsg(LevelDebug, msg) +} + +// Warn Log WARN level message. +// compatibility alias for Warning() +func (bl *BeeLogger) Warn(format string, v ...interface{}) { + if LevelWarning > bl.level { + return + } + msg := fmt.Sprintf("[W] "+format, v...) + bl.writeMsg(LevelWarning, msg) +} + +// Info Log INFO level message. +// compatibility alias for Informational() +func (bl *BeeLogger) Info(format string, v ...interface{}) { + if LevelInformational > bl.level { + return + } + msg := fmt.Sprintf("[I] "+format, v...) + bl.writeMsg(LevelInformational, msg) +} + +// Trace Log TRACE level message. +// compatibility alias for Debug() +func (bl *BeeLogger) Trace(format string, v ...interface{}) { + if LevelDebug > bl.level { + return + } + msg := fmt.Sprintf("[D] "+format, v...) + bl.writeMsg(LevelDebug, msg) +} + +// Flush flush all chan data. +func (bl *BeeLogger) Flush() { + for _, l := range bl.outputs { + l.Flush() + } +} + +// Close close logger, flush all chan data and destroy all adapters in BeeLogger. +func (bl *BeeLogger) Close() { + for { + if len(bl.msgChan) > 0 { + bm := <-bl.msgChan + bl.writeToLoggers(bm.msg, bm.level) + logMsgPool.Put(bm) + continue + } + break + } + for _, l := range bl.outputs { + l.Flush() + l.Destroy() + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/logs/smtp.go b/Godeps/_workspace/src/github.com/astaxie/beego/logs/smtp.go new file mode 100644 index 000000000..748462f9d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/logs/smtp.go @@ -0,0 +1,160 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logs + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/smtp" + "strings" + "time" +) + +// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server. +type SMTPWriter struct { + Username string `json:"username"` + Password string `json:"password"` + Host string `json:"host"` + Subject string `json:"subject"` + FromAddress string `json:"fromAddress"` + RecipientAddresses []string `json:"sendTos"` + Level int `json:"level"` +} + +// NewSMTPWriter create smtp writer. +func newSMTPWriter() Logger { + return &SMTPWriter{Level: LevelTrace} +} + +// Init smtp writer with json config. +// config like: +// { +// "username":"example@gmail.com", +// "password:"password", +// "host":"smtp.gmail.com:465", +// "subject":"email title", +// "fromAddress":"from@example.com", +// "sendTos":["email1","email2"], +// "level":LevelError +// } +func (s *SMTPWriter) Init(jsonconfig string) error { + err := json.Unmarshal([]byte(jsonconfig), s) + if err != nil { + return err + } + return nil +} + +func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth { + if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 { + return nil + } + return smtp.PlainAuth( + "", + s.Username, + s.Password, + host, + ) +} + +func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error { + client, err := smtp.Dial(hostAddressWithPort) + if err != nil { + return err + } + + host, _, _ := net.SplitHostPort(hostAddressWithPort) + tlsConn := &tls.Config{ + InsecureSkipVerify: true, + ServerName: host, + } + if err = client.StartTLS(tlsConn); err != nil { + return err + } + + if auth != nil { + if err = client.Auth(auth); err != nil { + return err + } + } + + if err = client.Mail(fromAddress); err != nil { + return err + } + + for _, rec := range recipients { + if err = client.Rcpt(rec); err != nil { + return err + } + } + + w, err := client.Data() + if err != nil { + return err + } + _, err = w.Write([]byte(msgContent)) + if err != nil { + return err + } + + err = w.Close() + if err != nil { + return err + } + + err = client.Quit() + if err != nil { + return err + } + + return nil +} + +// WriteMsg write message in smtp writer. +// it will send an email with subject and only this message. +func (s *SMTPWriter) WriteMsg(msg string, level int) error { + if level > s.Level { + return nil + } + + hp := strings.Split(s.Host, ":") + + // Set up authentication information. + auth := s.getSMTPAuth(hp[0]) + + // Connect to the server, authenticate, set the sender and recipient, + // and send the email all in one step. + contentType := "Content-Type: text/plain" + "; charset=UTF-8" + mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress + + ">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", time.Now().Format("2006-01-02 15:04:05")) + msg) + + return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg) +} + +// Flush implementing method. empty. +func (s *SMTPWriter) Flush() { + return +} + +// Destroy implementing method. empty. +func (s *SMTPWriter) Destroy() { + return +} + +func init() { + Register("smtp", newSMTPWriter) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/migration/ddl.go b/Godeps/_workspace/src/github.com/astaxie/beego/migration/ddl.go new file mode 100644 index 000000000..51243337f --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/migration/ddl.go @@ -0,0 +1,53 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package migration + +// Table store the tablename and Column +type Table struct { + TableName string + Columns []*Column +} + +// Create return the create sql +func (t *Table) Create() string { + return "" +} + +// Drop return the drop sql +func (t *Table) Drop() string { + return "" +} + +// Column define the columns name type and Default +type Column struct { + Name string + Type string + Default interface{} +} + +// Create return create sql with the provided tbname and columns +func Create(tbname string, columns ...Column) string { + return "" +} + +// Drop return the drop sql with the provided tbname and columns +func Drop(tbname string, columns ...Column) string { + return "" +} + +// TableDDL is still in think +func TableDDL(tbname string, columns ...Column) string { + return "" +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/migration/migration.go b/Godeps/_workspace/src/github.com/astaxie/beego/migration/migration.go new file mode 100644 index 000000000..1591bc50d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/migration/migration.go @@ -0,0 +1,278 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package migration is used for migration +// +// The table structure is as follow: +// +// CREATE TABLE `migrations` ( +// `id_migration` int(10) unsigned NOT NULL AUTO_INCREMENT COMMENT 'surrogate key', +// `name` varchar(255) DEFAULT NULL COMMENT 'migration name, unique', +// `created_at` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT 'date migrated or rolled back', +// `statements` longtext COMMENT 'SQL statements for this migration', +// `rollback_statements` longtext, +// `status` enum('update','rollback') DEFAULT NULL COMMENT 'update indicates it is a normal migration while rollback means this migration is rolled back', +// PRIMARY KEY (`id_migration`) +// ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +package migration + +import ( + "errors" + "sort" + "strings" + "time" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/orm" +) + +// const the data format for the bee generate migration datatype +const ( + DateFormat = "20060102_150405" + DBDateFormat = "2006-01-02 15:04:05" +) + +// Migrationer is an interface for all Migration struct +type Migrationer interface { + Up() + Down() + Reset() + Exec(name, status string) error + GetCreated() int64 +} + +var ( + migrationMap map[string]Migrationer +) + +func init() { + migrationMap = make(map[string]Migrationer) +} + +// Migration the basic type which will implement the basic type +type Migration struct { + sqls []string + Created string +} + +// Up implement in the Inheritance struct for upgrade +func (m *Migration) Up() { + +} + +// Down implement in the Inheritance struct for down +func (m *Migration) Down() { + +} + +// SQL add sql want to execute +func (m *Migration) SQL(sql string) { + m.sqls = append(m.sqls, sql) +} + +// Reset the sqls +func (m *Migration) Reset() { + m.sqls = make([]string, 0) +} + +// Exec execute the sql already add in the sql +func (m *Migration) Exec(name, status string) error { + o := orm.NewOrm() + for _, s := range m.sqls { + beego.Info("exec sql:", s) + r := o.Raw(s) + _, err := r.Exec() + if err != nil { + return err + } + } + return m.addOrUpdateRecord(name, status) +} + +func (m *Migration) addOrUpdateRecord(name, status string) error { + o := orm.NewOrm() + if status == "down" { + status = "rollback" + p, err := o.Raw("update migrations set status = ?, rollback_statements = ?, created_at = ? where name = ?").Prepare() + if err != nil { + return nil + } + _, err = p.Exec(status, strings.Join(m.sqls, "; "), time.Now().Format(DBDateFormat), name) + return err + } + status = "update" + p, err := o.Raw("insert into migrations(name, created_at, statements, status) values(?,?,?,?)").Prepare() + if err != nil { + return err + } + _, err = p.Exec(name, time.Now().Format(DBDateFormat), strings.Join(m.sqls, "; "), status) + return err +} + +// GetCreated get the unixtime from the Created +func (m *Migration) GetCreated() int64 { + t, err := time.Parse(DateFormat, m.Created) + if err != nil { + return 0 + } + return t.Unix() +} + +// Register register the Migration in the map +func Register(name string, m Migrationer) error { + if _, ok := migrationMap[name]; ok { + return errors.New("already exist name:" + name) + } + migrationMap[name] = m + return nil +} + +// Upgrade upgrate the migration from lasttime +func Upgrade(lasttime int64) error { + sm := sortMap(migrationMap) + i := 0 + for _, v := range sm { + if v.created > lasttime { + beego.Info("start upgrade", v.name) + v.m.Reset() + v.m.Up() + err := v.m.Exec(v.name, "up") + if err != nil { + beego.Error("execute error:", err) + time.Sleep(2 * time.Second) + return err + } + beego.Info("end upgrade:", v.name) + i++ + } + } + beego.Info("total success upgrade:", i, " migration") + time.Sleep(2 * time.Second) + return nil +} + +// Rollback rollback the migration by the name +func Rollback(name string) error { + if v, ok := migrationMap[name]; ok { + beego.Info("start rollback") + v.Reset() + v.Down() + err := v.Exec(name, "down") + if err != nil { + beego.Error("execute error:", err) + time.Sleep(2 * time.Second) + return err + } + beego.Info("end rollback") + time.Sleep(2 * time.Second) + return nil + } + beego.Error("not exist the migrationMap name:" + name) + time.Sleep(2 * time.Second) + return errors.New("not exist the migrationMap name:" + name) +} + +// Reset reset all migration +// run all migration's down function +func Reset() error { + sm := sortMap(migrationMap) + i := 0 + for j := len(sm) - 1; j >= 0; j-- { + v := sm[j] + if isRollBack(v.name) { + beego.Info("skip the", v.name) + time.Sleep(1 * time.Second) + continue + } + beego.Info("start reset:", v.name) + v.m.Reset() + v.m.Down() + err := v.m.Exec(v.name, "down") + if err != nil { + beego.Error("execute error:", err) + time.Sleep(2 * time.Second) + return err + } + i++ + beego.Info("end reset:", v.name) + } + beego.Info("total success reset:", i, " migration") + time.Sleep(2 * time.Second) + return nil +} + +// Refresh first Reset, then Upgrade +func Refresh() error { + err := Reset() + if err != nil { + beego.Error("execute error:", err) + time.Sleep(2 * time.Second) + return err + } + err = Upgrade(0) + return err +} + +type dataSlice []data + +type data struct { + created int64 + name string + m Migrationer +} + +// Len is part of sort.Interface. +func (d dataSlice) Len() int { + return len(d) +} + +// Swap is part of sort.Interface. +func (d dataSlice) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} + +// Less is part of sort.Interface. We use count as the value to sort by +func (d dataSlice) Less(i, j int) bool { + return d[i].created < d[j].created +} + +func sortMap(m map[string]Migrationer) dataSlice { + s := make(dataSlice, 0, len(m)) + for k, v := range m { + d := data{} + d.created = v.GetCreated() + d.name = k + d.m = v + s = append(s, d) + } + sort.Sort(s) + return s +} + +func isRollBack(name string) bool { + o := orm.NewOrm() + var maps []orm.Params + num, err := o.Raw("select * from migrations where `name` = ? order by id_migration desc", name).Values(&maps) + if err != nil { + beego.Info("get name has error", err) + return false + } + if num <= 0 { + return false + } + if maps[0]["status"] == "rollback" { + return true + } + return false +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/mime.go b/Godeps/_workspace/src/github.com/astaxie/beego/mime.go new file mode 100644 index 000000000..e85fcb2ac --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/mime.go @@ -0,0 +1,556 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +var mimemaps = map[string]string{ + ".3dm": "x-world/x-3dmf", + ".3dmf": "x-world/x-3dmf", + ".7z": "application/x-7z-compressed", + ".a": "application/octet-stream", + ".aab": "application/x-authorware-bin", + ".aam": "application/x-authorware-map", + ".aas": "application/x-authorware-seg", + ".abc": "text/vndabc", + ".ace": "application/x-ace-compressed", + ".acgi": "text/html", + ".afl": "video/animaflex", + ".ai": "application/postscript", + ".aif": "audio/aiff", + ".aifc": "audio/aiff", + ".aiff": "audio/aiff", + ".aim": "application/x-aim", + ".aip": "text/x-audiosoft-intra", + ".alz": "application/x-alz-compressed", + ".ani": "application/x-navi-animation", + ".aos": "application/x-nokia-9000-communicator-add-on-software", + ".aps": "application/mime", + ".apk": "application/vnd.android.package-archive", + ".arc": "application/x-arc-compressed", + ".arj": "application/arj", + ".art": "image/x-jg", + ".asf": "video/x-ms-asf", + ".asm": "text/x-asm", + ".asp": "text/asp", + ".asx": "application/x-mplayer2", + ".au": "audio/basic", + ".avi": "video/x-msvideo", + ".avs": "video/avs-video", + ".bcpio": "application/x-bcpio", + ".bin": "application/mac-binary", + ".bmp": "image/bmp", + ".boo": "application/book", + ".book": "application/book", + ".boz": "application/x-bzip2", + ".bsh": "application/x-bsh", + ".bz2": "application/x-bzip2", + ".bz": "application/x-bzip", + ".c++": "text/plain", + ".c": "text/x-c", + ".cab": "application/vnd.ms-cab-compressed", + ".cat": "application/vndms-pkiseccat", + ".cc": "text/x-c", + ".ccad": "application/clariscad", + ".cco": "application/x-cocoa", + ".cdf": "application/cdf", + ".cer": "application/pkix-cert", + ".cha": "application/x-chat", + ".chat": "application/x-chat", + ".chrt": "application/vnd.kde.kchart", + ".class": "application/java", + ".com": "text/plain", + ".conf": "text/plain", + ".cpio": "application/x-cpio", + ".cpp": "text/x-c", + ".cpt": "application/mac-compactpro", + ".crl": "application/pkcs-crl", + ".crt": "application/pkix-cert", + ".crx": "application/x-chrome-extension", + ".csh": "text/x-scriptcsh", + ".css": "text/css", + ".csv": "text/csv", + ".cxx": "text/plain", + ".dar": "application/x-dar", + ".dcr": "application/x-director", + ".deb": "application/x-debian-package", + ".deepv": "application/x-deepv", + ".def": "text/plain", + ".der": "application/x-x509-ca-cert", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".divx": "video/divx", + ".dl": "video/dl", + ".dmg": "application/x-apple-diskimage", + ".doc": "application/msword", + ".dot": "application/msword", + ".dp": "application/commonground", + ".drw": "application/drafting", + ".dump": "application/octet-stream", + ".dv": "video/x-dv", + ".dvi": "application/x-dvi", + ".dwf": "drawing/x-dwf=(old)", + ".dwg": "application/acad", + ".dxf": "application/dxf", + ".dxr": "application/x-director", + ".el": "text/x-scriptelisp", + ".elc": "application/x-bytecodeelisp=(compiled=elisp)", + ".eml": "message/rfc822", + ".env": "application/x-envoy", + ".eps": "application/postscript", + ".es": "application/x-esrehber", + ".etx": "text/x-setext", + ".evy": "application/envoy", + ".exe": "application/octet-stream", + ".f77": "text/x-fortran", + ".f90": "text/x-fortran", + ".f": "text/x-fortran", + ".fdf": "application/vndfdf", + ".fif": "application/fractals", + ".fli": "video/fli", + ".flo": "image/florian", + ".flv": "video/x-flv", + ".flx": "text/vndfmiflexstor", + ".fmf": "video/x-atomic3d-feature", + ".for": "text/x-fortran", + ".fpx": "image/vndfpx", + ".frl": "application/freeloader", + ".funk": "audio/make", + ".g3": "image/g3fax", + ".g": "text/plain", + ".gif": "image/gif", + ".gl": "video/gl", + ".gsd": "audio/x-gsm", + ".gsm": "audio/x-gsm", + ".gsp": "application/x-gsp", + ".gss": "application/x-gss", + ".gtar": "application/x-gtar", + ".gz": "application/x-compressed", + ".gzip": "application/x-gzip", + ".h": "text/x-h", + ".hdf": "application/x-hdf", + ".help": "application/x-helpfile", + ".hgl": "application/vndhp-hpgl", + ".hh": "text/x-h", + ".hlb": "text/x-script", + ".hlp": "application/hlp", + ".hpg": "application/vndhp-hpgl", + ".hpgl": "application/vndhp-hpgl", + ".hqx": "application/binhex", + ".hta": "application/hta", + ".htc": "text/x-component", + ".htm": "text/html", + ".html": "text/html", + ".htmls": "text/html", + ".htt": "text/webviewhtml", + ".htx": "text/html", + ".ice": "x-conference/x-cooltalk", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".icz": "text/calendar", + ".idc": "text/plain", + ".ief": "image/ief", + ".iefs": "image/ief", + ".iges": "application/iges", + ".igs": "application/iges", + ".ima": "application/x-ima", + ".imap": "application/x-httpd-imap", + ".inf": "application/inf", + ".ins": "application/x-internett-signup", + ".ip": "application/x-ip2", + ".isu": "video/x-isvideo", + ".it": "audio/it", + ".iv": "application/x-inventor", + ".ivr": "i-world/i-vrml", + ".ivy": "application/x-livescreen", + ".jam": "audio/x-jam", + ".jav": "text/x-java-source", + ".java": "text/x-java-source", + ".jcm": "application/x-java-commerce", + ".jfif-tbnl": "image/jpeg", + ".jfif": "image/jpeg", + ".jnlp": "application/x-java-jnlp-file", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".jps": "image/x-jps", + ".js": "application/javascript", + ".json": "application/json", + ".jut": "image/jutvision", + ".kar": "audio/midi", + ".karbon": "application/vnd.kde.karbon", + ".kfo": "application/vnd.kde.kformula", + ".flw": "application/vnd.kde.kivio", + ".kml": "application/vnd.google-earth.kml+xml", + ".kmz": "application/vnd.google-earth.kmz", + ".kon": "application/vnd.kde.kontour", + ".kpr": "application/vnd.kde.kpresenter", + ".kpt": "application/vnd.kde.kpresenter", + ".ksp": "application/vnd.kde.kspread", + ".kwd": "application/vnd.kde.kword", + ".kwt": "application/vnd.kde.kword", + ".ksh": "text/x-scriptksh", + ".la": "audio/nspaudio", + ".lam": "audio/x-liveaudio", + ".latex": "application/x-latex", + ".lha": "application/lha", + ".lhx": "application/octet-stream", + ".list": "text/plain", + ".lma": "audio/nspaudio", + ".log": "text/plain", + ".lsp": "text/x-scriptlisp", + ".lst": "text/plain", + ".lsx": "text/x-la-asf", + ".ltx": "application/x-latex", + ".lzh": "application/octet-stream", + ".lzx": "application/lzx", + ".m1v": "video/mpeg", + ".m2a": "audio/mpeg", + ".m2v": "video/mpeg", + ".m3u": "audio/x-mpegurl", + ".m": "text/x-m", + ".man": "application/x-troff-man", + ".manifest": "text/cache-manifest", + ".map": "application/x-navimap", + ".mar": "text/plain", + ".mbd": "application/mbedlet", + ".mc$": "application/x-magic-cap-package-10", + ".mcd": "application/mcad", + ".mcf": "text/mcf", + ".mcp": "application/netmc", + ".me": "application/x-troff-me", + ".mht": "message/rfc822", + ".mhtml": "message/rfc822", + ".mid": "application/x-midi", + ".midi": "application/x-midi", + ".mif": "application/x-frame", + ".mime": "message/rfc822", + ".mjf": "audio/x-vndaudioexplosionmjuicemediafile", + ".mjpg": "video/x-motion-jpeg", + ".mm": "application/base64", + ".mme": "application/base64", + ".mod": "audio/mod", + ".moov": "video/quicktime", + ".mov": "video/quicktime", + ".movie": "video/x-sgi-movie", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg3", + ".mp4": "video/mp4", + ".mpa": "audio/mpeg", + ".mpc": "application/x-project", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".mpp": "application/vndms-project", + ".mpt": "application/x-project", + ".mpv": "application/x-project", + ".mpx": "application/x-project", + ".mrc": "application/marc", + ".ms": "application/x-troff-ms", + ".mv": "video/x-sgi-movie", + ".my": "audio/make", + ".mzz": "application/x-vndaudioexplosionmzz", + ".nap": "image/naplps", + ".naplps": "image/naplps", + ".nc": "application/x-netcdf", + ".ncm": "application/vndnokiaconfiguration-message", + ".nif": "image/x-niff", + ".niff": "image/x-niff", + ".nix": "application/x-mix-transfer", + ".nsc": "application/x-conference", + ".nvd": "application/x-navidoc", + ".o": "application/octet-stream", + ".oda": "application/oda", + ".odb": "application/vnd.oasis.opendocument.database", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".odi": "application/vnd.oasis.opendocument.image", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".odt": "application/vnd.oasis.opendocument.text", + ".oga": "audio/ogg", + ".ogg": "audio/ogg", + ".ogv": "video/ogg", + ".omc": "application/x-omc", + ".omcd": "application/x-omcdatamaker", + ".omcr": "application/x-omcregerator", + ".otc": "application/vnd.oasis.opendocument.chart-template", + ".otf": "application/vnd.oasis.opendocument.formula-template", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".oti": "application/vnd.oasis.opendocument.image-template", + ".otm": "application/vnd.oasis.opendocument.text-master", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".p10": "application/pkcs10", + ".p12": "application/pkcs-12", + ".p7a": "application/x-pkcs7-signature", + ".p7c": "application/pkcs7-mime", + ".p7m": "application/pkcs7-mime", + ".p7r": "application/x-pkcs7-certreqresp", + ".p7s": "application/pkcs7-signature", + ".p": "text/x-pascal", + ".part": "application/pro_eng", + ".pas": "text/pascal", + ".pbm": "image/x-portable-bitmap", + ".pcl": "application/vndhp-pcl", + ".pct": "image/x-pict", + ".pcx": "image/x-pcx", + ".pdb": "chemical/x-pdb", + ".pdf": "application/pdf", + ".pfunk": "audio/make", + ".pgm": "image/x-portable-graymap", + ".pic": "image/pict", + ".pict": "image/pict", + ".pkg": "application/x-newton-compatible-pkg", + ".pko": "application/vndms-pkipko", + ".pl": "text/x-scriptperl", + ".plx": "application/x-pixclscript", + ".pm4": "application/x-pagemaker", + ".pm5": "application/x-pagemaker", + ".pm": "text/x-scriptperl-module", + ".png": "image/png", + ".pnm": "application/x-portable-anymap", + ".pot": "application/mspowerpoint", + ".pov": "model/x-pov", + ".ppa": "application/vndms-powerpoint", + ".ppm": "image/x-portable-pixmap", + ".pps": "application/mspowerpoint", + ".ppt": "application/mspowerpoint", + ".ppz": "application/mspowerpoint", + ".pre": "application/x-freelance", + ".prt": "application/pro_eng", + ".ps": "application/postscript", + ".psd": "application/octet-stream", + ".pvu": "paleovu/x-pv", + ".pwz": "application/vndms-powerpoint", + ".py": "text/x-scriptphyton", + ".pyc": "applicaiton/x-bytecodepython", + ".qcp": "audio/vndqcelp", + ".qd3": "x-world/x-3dmf", + ".qd3d": "x-world/x-3dmf", + ".qif": "image/x-quicktime", + ".qt": "video/quicktime", + ".qtc": "video/x-qtc", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ra": "audio/x-pn-realaudio", + ".ram": "audio/x-pn-realaudio", + ".rar": "application/x-rar-compressed", + ".ras": "application/x-cmu-raster", + ".rast": "image/cmu-raster", + ".rexx": "text/x-scriptrexx", + ".rf": "image/vndrn-realflash", + ".rgb": "image/x-rgb", + ".rm": "application/vndrn-realmedia", + ".rmi": "audio/mid", + ".rmm": "audio/x-pn-realaudio", + ".rmp": "audio/x-pn-realaudio", + ".rng": "application/ringing-tones", + ".rnx": "application/vndrn-realplayer", + ".roff": "application/x-troff", + ".rp": "image/vndrn-realpix", + ".rpm": "audio/x-pn-realaudio-plugin", + ".rt": "text/vndrn-realtext", + ".rtf": "text/richtext", + ".rtx": "text/richtext", + ".rv": "video/vndrn-realvideo", + ".s": "text/x-asm", + ".s3m": "audio/s3m", + ".s7z": "application/x-7z-compressed", + ".saveme": "application/octet-stream", + ".sbk": "application/x-tbook", + ".scm": "text/x-scriptscheme", + ".sdml": "text/plain", + ".sdp": "application/sdp", + ".sdr": "application/sounder", + ".sea": "application/sea", + ".set": "application/set", + ".sgm": "text/x-sgml", + ".sgml": "text/x-sgml", + ".sh": "text/x-scriptsh", + ".shar": "application/x-bsh", + ".shtml": "text/x-server-parsed-html", + ".sid": "audio/x-psid", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".sit": "application/x-stuffit", + ".sitx": "application/x-stuffitx", + ".sl": "application/x-seelogo", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".sol": "application/solids", + ".spc": "text/x-speech", + ".spl": "application/futuresplash", + ".spr": "application/x-sprite", + ".sprite": "application/x-sprite", + ".spx": "audio/ogg", + ".src": "application/x-wais-source", + ".ssi": "text/x-server-parsed-html", + ".ssm": "application/streamingmedia", + ".sst": "application/vndms-pkicertstore", + ".step": "application/step", + ".stl": "application/sla", + ".stp": "application/step", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".svf": "image/vnddwg", + ".svg": "image/svg+xml", + ".svr": "application/x-world", + ".swf": "application/x-shockwave-flash", + ".t": "application/x-troff", + ".talk": "text/x-speech", + ".tar": "application/x-tar", + ".tbk": "application/toolbook", + ".tcl": "text/x-scripttcl", + ".tcsh": "text/x-scripttcsh", + ".tex": "application/x-tex", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".text": "text/plain", + ".tgz": "application/gnutar", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".tsi": "audio/tsp-audio", + ".tsp": "application/dsptype", + ".tsv": "text/tab-separated-values", + ".turbot": "image/florian", + ".txt": "text/plain", + ".uil": "text/x-uil", + ".uni": "text/uri-list", + ".unis": "text/uri-list", + ".unv": "application/i-deas", + ".uri": "text/uri-list", + ".uris": "text/uri-list", + ".ustar": "application/x-ustar", + ".uu": "text/x-uuencode", + ".uue": "text/x-uuencode", + ".vcd": "application/x-cdlink", + ".vcf": "text/x-vcard", + ".vcard": "text/x-vcard", + ".vcs": "text/x-vcalendar", + ".vda": "application/vda", + ".vdo": "video/vdo", + ".vew": "application/groupwise", + ".viv": "video/vivo", + ".vivo": "video/vivo", + ".vmd": "application/vocaltec-media-desc", + ".vmf": "application/vocaltec-media-file", + ".voc": "audio/voc", + ".vos": "video/vosaic", + ".vox": "audio/voxware", + ".vqe": "audio/x-twinvq-plugin", + ".vqf": "audio/x-twinvq", + ".vql": "audio/x-twinvq-plugin", + ".vrml": "application/x-vrml", + ".vrt": "x-world/x-vrt", + ".vsd": "application/x-visio", + ".vst": "application/x-visio", + ".vsw": "application/x-visio", + ".w60": "application/wordperfect60", + ".w61": "application/wordperfect61", + ".w6w": "application/msword", + ".wav": "audio/wav", + ".wb1": "application/x-qpro", + ".wbmp": "image/vnd.wap.wbmp", + ".web": "application/vndxara", + ".wiz": "application/msword", + ".wk1": "application/x-123", + ".wmf": "windows/metafile", + ".wml": "text/vnd.wap.wml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmls": "text/vnd.wap.wmlscript", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".word": "application/msword", + ".wp5": "application/wordperfect", + ".wp6": "application/wordperfect", + ".wp": "application/wordperfect", + ".wpd": "application/wordperfect", + ".wq1": "application/x-lotus", + ".wri": "application/mswrite", + ".wrl": "application/x-world", + ".wrz": "model/vrml", + ".wsc": "text/scriplet", + ".wsrc": "application/x-wais-source", + ".wtk": "application/x-wintalk", + ".x-png": "image/png", + ".xbm": "image/x-xbitmap", + ".xdr": "video/x-amt-demorun", + ".xgz": "xgl/drawing", + ".xif": "image/vndxiff", + ".xl": "application/excel", + ".xla": "application/excel", + ".xlb": "application/excel", + ".xlc": "application/excel", + ".xld": "application/excel", + ".xlk": "application/excel", + ".xll": "application/excel", + ".xlm": "application/excel", + ".xls": "application/excel", + ".xlt": "application/excel", + ".xlv": "application/excel", + ".xlw": "application/excel", + ".xm": "audio/xm", + ".xml": "text/xml", + ".xmz": "xgl/movie", + ".xpix": "application/x-vndls-xpix", + ".xpm": "image/x-xpixmap", + ".xsr": "video/x-amt-showrun", + ".xwd": "image/x-xwd", + ".xyz": "chemical/x-pdb", + ".z": "application/x-compress", + ".zip": "application/zip", + ".zoo": "application/octet-stream", + ".zsh": "text/x-scriptzsh", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".docm": "application/vnd.ms-word.document.macroEnabled.12", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".dotm": "application/vnd.ms-word.template.macroEnabled.12", + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xlsm": "application/vnd.ms-excel.sheet.macroEnabled.12", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".xltm": "application/vnd.ms-excel.template.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".pptm": "application/vnd.ms-powerpoint.presentation.macroEnabled.12", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".ppsm": "application/vnd.ms-powerpoint.slideshow.macroEnabled.12", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".potm": "application/vnd.ms-powerpoint.template.macroEnabled.12", + ".ppam": "application/vnd.ms-powerpoint.addin.macroEnabled.12", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".sldm": "application/vnd.ms-powerpoint.slide.macroEnabled.12", + ".thmx": "application/vnd.ms-officetheme", + ".onetoc": "application/onenote", + ".onetoc2": "application/onenote", + ".onetmp": "application/onenote", + ".onepkg": "application/onenote", + ".key": "application/x-iwork-keynote-sffkey", + ".kth": "application/x-iwork-keynote-sffkth", + ".nmbtemplate": "application/x-iwork-numbers-sfftemplate", + ".numbers": "application/x-iwork-numbers-sffnumbers", + ".pages": "application/x-iwork-pages-sffpages", + ".template": "application/x-iwork-pages-sfftemplate", + ".xpi": "application/x-xpinstall", + ".oex": "application/x-opera-extension", + ".mustache": "text/html", +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/namespace.go b/Godeps/_workspace/src/github.com/astaxie/beego/namespace.go new file mode 100644 index 000000000..0dfdd7afd --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/namespace.go @@ -0,0 +1,390 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "net/http" + "strings" + + beecontext "github.com/astaxie/beego/context" +) + +type namespaceCond func(*beecontext.Context) bool + +// LinkNamespace used as link action +type LinkNamespace func(*Namespace) + +// Namespace is store all the info +type Namespace struct { + prefix string + handlers *ControllerRegister +} + +// NewNamespace get new Namespace +func NewNamespace(prefix string, params ...LinkNamespace) *Namespace { + ns := &Namespace{ + prefix: prefix, + handlers: NewControllerRegister(), + } + for _, p := range params { + p(ns) + } + return ns +} + +// Cond set condtion function +// if cond return true can run this namespace, else can't +// usage: +// ns.Cond(func (ctx *context.Context) bool{ +// if ctx.Input.Domain() == "api.beego.me" { +// return true +// } +// return false +// }) +// Cond as the first filter +func (n *Namespace) Cond(cond namespaceCond) *Namespace { + fn := func(ctx *beecontext.Context) { + if !cond(ctx) { + exception("405", ctx) + } + } + if v, ok := n.handlers.filters[BeforeRouter]; ok { + mr := new(FilterRouter) + mr.tree = NewTree() + mr.pattern = "*" + mr.filterFunc = fn + mr.tree.AddRouter("*", true) + n.handlers.filters[BeforeRouter] = append([]*FilterRouter{mr}, v...) + } else { + n.handlers.InsertFilter("*", BeforeRouter, fn) + } + return n +} + +// Filter add filter in the Namespace +// action has before & after +// FilterFunc +// usage: +// Filter("before", func (ctx *context.Context){ +// _, ok := ctx.Input.Session("uid").(int) +// if !ok && ctx.Request.RequestURI != "/login" { +// ctx.Redirect(302, "/login") +// } +// }) +func (n *Namespace) Filter(action string, filter ...FilterFunc) *Namespace { + var a int + if action == "before" { + a = BeforeRouter + } else if action == "after" { + a = FinishRouter + } + for _, f := range filter { + n.handlers.InsertFilter("*", a, f) + } + return n +} + +// Router same as beego.Rourer +// refer: https://godoc.org/github.com/astaxie/beego#Router +func (n *Namespace) Router(rootpath string, c ControllerInterface, mappingMethods ...string) *Namespace { + n.handlers.Add(rootpath, c, mappingMethods...) + return n +} + +// AutoRouter same as beego.AutoRouter +// refer: https://godoc.org/github.com/astaxie/beego#AutoRouter +func (n *Namespace) AutoRouter(c ControllerInterface) *Namespace { + n.handlers.AddAuto(c) + return n +} + +// AutoPrefix same as beego.AutoPrefix +// refer: https://godoc.org/github.com/astaxie/beego#AutoPrefix +func (n *Namespace) AutoPrefix(prefix string, c ControllerInterface) *Namespace { + n.handlers.AddAutoPrefix(prefix, c) + return n +} + +// Get same as beego.Get +// refer: https://godoc.org/github.com/astaxie/beego#Get +func (n *Namespace) Get(rootpath string, f FilterFunc) *Namespace { + n.handlers.Get(rootpath, f) + return n +} + +// Post same as beego.Post +// refer: https://godoc.org/github.com/astaxie/beego#Post +func (n *Namespace) Post(rootpath string, f FilterFunc) *Namespace { + n.handlers.Post(rootpath, f) + return n +} + +// Delete same as beego.Delete +// refer: https://godoc.org/github.com/astaxie/beego#Delete +func (n *Namespace) Delete(rootpath string, f FilterFunc) *Namespace { + n.handlers.Delete(rootpath, f) + return n +} + +// Put same as beego.Put +// refer: https://godoc.org/github.com/astaxie/beego#Put +func (n *Namespace) Put(rootpath string, f FilterFunc) *Namespace { + n.handlers.Put(rootpath, f) + return n +} + +// Head same as beego.Head +// refer: https://godoc.org/github.com/astaxie/beego#Head +func (n *Namespace) Head(rootpath string, f FilterFunc) *Namespace { + n.handlers.Head(rootpath, f) + return n +} + +// Options same as beego.Options +// refer: https://godoc.org/github.com/astaxie/beego#Options +func (n *Namespace) Options(rootpath string, f FilterFunc) *Namespace { + n.handlers.Options(rootpath, f) + return n +} + +// Patch same as beego.Patch +// refer: https://godoc.org/github.com/astaxie/beego#Patch +func (n *Namespace) Patch(rootpath string, f FilterFunc) *Namespace { + n.handlers.Patch(rootpath, f) + return n +} + +// Any same as beego.Any +// refer: https://godoc.org/github.com/astaxie/beego#Any +func (n *Namespace) Any(rootpath string, f FilterFunc) *Namespace { + n.handlers.Any(rootpath, f) + return n +} + +// Handler same as beego.Handler +// refer: https://godoc.org/github.com/astaxie/beego#Handler +func (n *Namespace) Handler(rootpath string, h http.Handler) *Namespace { + n.handlers.Handler(rootpath, h) + return n +} + +// Include add include class +// refer: https://godoc.org/github.com/astaxie/beego#Include +func (n *Namespace) Include(cList ...ControllerInterface) *Namespace { + n.handlers.Include(cList...) + return n +} + +// Namespace add nest Namespace +// usage: +//ns := beego.NewNamespace(“/v1”). +//Namespace( +// beego.NewNamespace("/shop"). +// Get("/:id", func(ctx *context.Context) { +// ctx.Output.Body([]byte("shopinfo")) +// }), +// beego.NewNamespace("/order"). +// Get("/:id", func(ctx *context.Context) { +// ctx.Output.Body([]byte("orderinfo")) +// }), +// beego.NewNamespace("/crm"). +// Get("/:id", func(ctx *context.Context) { +// ctx.Output.Body([]byte("crminfo")) +// }), +//) +func (n *Namespace) Namespace(ns ...*Namespace) *Namespace { + for _, ni := range ns { + for k, v := range ni.handlers.routers { + if t, ok := n.handlers.routers[k]; ok { + addPrefix(v, ni.prefix) + n.handlers.routers[k].AddTree(ni.prefix, v) + } else { + t = NewTree() + t.AddTree(ni.prefix, v) + addPrefix(t, ni.prefix) + n.handlers.routers[k] = t + } + } + if ni.handlers.enableFilter { + for pos, filterList := range ni.handlers.filters { + for _, mr := range filterList { + t := NewTree() + t.AddTree(ni.prefix, mr.tree) + mr.tree = t + n.handlers.insertFilterRouter(pos, mr) + } + } + } + } + return n +} + +// AddNamespace register Namespace into beego.Handler +// support multi Namespace +func AddNamespace(nl ...*Namespace) { + for _, n := range nl { + for k, v := range n.handlers.routers { + if t, ok := BeeApp.Handlers.routers[k]; ok { + addPrefix(v, n.prefix) + BeeApp.Handlers.routers[k].AddTree(n.prefix, v) + } else { + t = NewTree() + t.AddTree(n.prefix, v) + addPrefix(t, n.prefix) + BeeApp.Handlers.routers[k] = t + } + } + if n.handlers.enableFilter { + for pos, filterList := range n.handlers.filters { + for _, mr := range filterList { + t := NewTree() + t.AddTree(n.prefix, mr.tree) + mr.tree = t + BeeApp.Handlers.insertFilterRouter(pos, mr) + } + } + } + } +} + +func addPrefix(t *Tree, prefix string) { + for _, v := range t.fixrouters { + addPrefix(v, prefix) + } + if t.wildcard != nil { + addPrefix(t.wildcard, prefix) + } + for _, l := range t.leaves { + if c, ok := l.runObject.(*controllerInfo); ok { + if !strings.HasPrefix(c.pattern, prefix) { + c.pattern = prefix + c.pattern + } + } + } + +} + +// NSCond is Namespace Condition +func NSCond(cond namespaceCond) LinkNamespace { + return func(ns *Namespace) { + ns.Cond(cond) + } +} + +// NSBefore Namespace BeforeRouter filter +func NSBefore(filiterList ...FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Filter("before", filiterList...) + } +} + +// NSAfter add Namespace FinishRouter filter +func NSAfter(filiterList ...FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Filter("after", filiterList...) + } +} + +// NSInclude Namespace Include ControllerInterface +func NSInclude(cList ...ControllerInterface) LinkNamespace { + return func(ns *Namespace) { + ns.Include(cList...) + } +} + +// NSRouter call Namespace Router +func NSRouter(rootpath string, c ControllerInterface, mappingMethods ...string) LinkNamespace { + return func(ns *Namespace) { + ns.Router(rootpath, c, mappingMethods...) + } +} + +// NSGet call Namespace Get +func NSGet(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Get(rootpath, f) + } +} + +// NSPost call Namespace Post +func NSPost(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Post(rootpath, f) + } +} + +// NSHead call Namespace Head +func NSHead(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Head(rootpath, f) + } +} + +// NSPut call Namespace Put +func NSPut(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Put(rootpath, f) + } +} + +// NSDelete call Namespace Delete +func NSDelete(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Delete(rootpath, f) + } +} + +// NSAny call Namespace Any +func NSAny(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Any(rootpath, f) + } +} + +// NSOptions call Namespace Options +func NSOptions(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Options(rootpath, f) + } +} + +// NSPatch call Namespace Patch +func NSPatch(rootpath string, f FilterFunc) LinkNamespace { + return func(ns *Namespace) { + ns.Patch(rootpath, f) + } +} + +// NSAutoRouter call Namespace AutoRouter +func NSAutoRouter(c ControllerInterface) LinkNamespace { + return func(ns *Namespace) { + ns.AutoRouter(c) + } +} + +// NSAutoPrefix call Namespace AutoPrefix +func NSAutoPrefix(prefix string, c ControllerInterface) LinkNamespace { + return func(ns *Namespace) { + ns.AutoPrefix(prefix, c) + } +} + +// NSNamespace add sub Namespace +func NSNamespace(prefix string, params ...LinkNamespace) LinkNamespace { + return func(ns *Namespace) { + n := NewNamespace(prefix, params...) + ns.Namespace(n) + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/orm/README.md new file mode 100644 index 000000000..fa7fdca10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/README.md @@ -0,0 +1,156 @@ +# beego orm + +[![Build Status](https://drone.io/github.com/astaxie/beego/status.png)](https://drone.io/github.com/astaxie/beego/latest) + +A powerful orm framework for go. + +It is heavily influenced by Django ORM, SQLAlchemy. + +**Support Database:** + +* MySQL: [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) +* PostgreSQL: [github.com/lib/pq](https://github.com/lib/pq) +* Sqlite3: [github.com/mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) + +Passed all test, but need more feedback. + +**Features:** + +* full go type support +* easy for usage, simple CRUD operation +* auto join with relation table +* cross DataBase compatible query +* Raw SQL query / mapper without orm model +* full test keep stable and strong + +more features please read the docs + +**Install:** + + go get github.com/astaxie/beego/orm + +## Changelog + +* 2013-08-19: support table auto create +* 2013-08-13: update test for database types +* 2013-08-13: go type support, such as int8, uint8, byte, rune +* 2013-08-13: date / datetime timezone support very well + +## Quick Start + +#### Simple Usage + +```go +package main + +import ( + "fmt" + "github.com/astaxie/beego/orm" + _ "github.com/go-sql-driver/mysql" // import your used driver +) + +// Model Struct +type User struct { + Id int `orm:"auto"` + Name string `orm:"size(100)"` +} + +func init() { + // register model + orm.RegisterModel(new(User)) + + // set default database + orm.RegisterDataBase("default", "mysql", "root:root@/my_db?charset=utf8", 30) +} + +func main() { + o := orm.NewOrm() + + user := User{Name: "slene"} + + // insert + id, err := o.Insert(&user) + + // update + user.Name = "astaxie" + num, err := o.Update(&user) + + // read one + u := User{Id: user.Id} + err = o.Read(&u) + + // delete + num, err = o.Delete(&u) +} +``` + +#### Next with relation + +```go +type Post struct { + Id int `orm:"auto"` + Title string `orm:"size(100)"` + User *User `orm:"rel(fk)"` +} + +var posts []*Post +qs := o.QueryTable("post") +num, err := qs.Filter("User__Name", "slene").All(&posts) +``` + +#### Use Raw sql + +If you don't like ORM,use Raw SQL to query / mapping without ORM setting + +```go +var maps []Params +num, err := o.Raw("SELECT id FROM user WHERE name = ?", "slene").Values(&maps) +if num > 0 { + fmt.Println(maps[0]["id"]) +} +``` + +#### Transaction + +```go +o.Begin() +... +user := User{Name: "slene"} +id, err := o.Insert(&user) +if err == nil { + o.Commit() +} else { + o.Rollback() +} + +``` + +#### Debug Log Queries + +In development env, you can simple use + +```go +func main() { + orm.Debug = true +... +``` + +enable log queries. + +output include all queries, such as exec / prepare / transaction. + +like this: + +```go +[ORM] - 2013-08-09 13:18:16 - [Queries/default] - [ db.Exec / 0.4ms] - [INSERT INTO `user` (`name`) VALUES (?)] - `slene` +... +``` + +note: not recommend use this in product env. + +## Docs + +more details and examples in docs and test + +[documents](http://beego.me/docs/mvc/model/overview.md) + diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd.go new file mode 100644 index 000000000..3638a75cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd.go @@ -0,0 +1,283 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "flag" + "fmt" + "os" + "strings" +) + +type commander interface { + Parse([]string) + Run() error +} + +var ( + commands = make(map[string]commander) +) + +// print help. +func printHelp(errs ...string) { + content := `orm command usage: + + syncdb - auto create tables + sqlall - print sql of create tables + help - print this help +` + + if len(errs) > 0 { + fmt.Println(errs[0]) + } + fmt.Println(content) + os.Exit(2) +} + +// RunCommand listen for orm command and then run it if command arguments passed. +func RunCommand() { + if len(os.Args) < 2 || os.Args[1] != "orm" { + return + } + + BootStrap() + + args := argString(os.Args[2:]) + name := args.Get(0) + + if name == "help" { + printHelp() + } + + if cmd, ok := commands[name]; ok { + cmd.Parse(os.Args[3:]) + cmd.Run() + os.Exit(0) + } else { + if name == "" { + printHelp() + } else { + printHelp(fmt.Sprintf("unknown command %s", name)) + } + } +} + +// sync database struct command interface. +type commandSyncDb struct { + al *alias + force bool + verbose bool + noInfo bool + rtOnError bool +} + +// parse orm command line arguments. +func (d *commandSyncDb) Parse(args []string) { + var name string + + flagSet := flag.NewFlagSet("orm command: syncdb", flag.ExitOnError) + flagSet.StringVar(&name, "db", "default", "DataBase alias name") + flagSet.BoolVar(&d.force, "force", false, "drop tables before create") + flagSet.BoolVar(&d.verbose, "v", false, "verbose info") + flagSet.Parse(args) + + d.al = getDbAlias(name) +} + +// run orm line command. +func (d *commandSyncDb) Run() error { + var drops []string + if d.force { + drops = getDbDropSQL(d.al) + } + + db := d.al.DB + + if d.force { + for i, mi := range modelCache.allOrdered() { + query := drops[i] + if !d.noInfo { + fmt.Printf("drop table `%s`\n", mi.table) + } + _, err := db.Exec(query) + if d.verbose { + fmt.Printf(" %s\n\n", query) + } + if err != nil { + if d.rtOnError { + return err + } + fmt.Printf(" %s\n", err.Error()) + } + } + } + + sqls, indexes := getDbCreateSQL(d.al) + + tables, err := d.al.DbBaser.GetTables(db) + if err != nil { + if d.rtOnError { + return err + } + fmt.Printf(" %s\n", err.Error()) + } + + for i, mi := range modelCache.allOrdered() { + if tables[mi.table] { + if !d.noInfo { + fmt.Printf("table `%s` already exists, skip\n", mi.table) + } + + var fields []*fieldInfo + columns, err := d.al.DbBaser.GetColumns(db, mi.table) + if err != nil { + if d.rtOnError { + return err + } + fmt.Printf(" %s\n", err.Error()) + } + + for _, fi := range mi.fields.fieldsDB { + if _, ok := columns[fi.column]; ok == false { + fields = append(fields, fi) + } + } + + for _, fi := range fields { + query := getColumnAddQuery(d.al, fi) + + if !d.noInfo { + fmt.Printf("add column `%s` for table `%s`\n", fi.fullName, mi.table) + } + + _, err := db.Exec(query) + if d.verbose { + fmt.Printf(" %s\n", query) + } + if err != nil { + if d.rtOnError { + return err + } + fmt.Printf(" %s\n", err.Error()) + } + } + + for _, idx := range indexes[mi.table] { + if d.al.DbBaser.IndexExists(db, idx.Table, idx.Name) == false { + if !d.noInfo { + fmt.Printf("create index `%s` for table `%s`\n", idx.Name, idx.Table) + } + + query := idx.SQL + _, err := db.Exec(query) + if d.verbose { + fmt.Printf(" %s\n", query) + } + if err != nil { + if d.rtOnError { + return err + } + fmt.Printf(" %s\n", err.Error()) + } + } + } + + continue + } + + if !d.noInfo { + fmt.Printf("create table `%s` \n", mi.table) + } + + queries := []string{sqls[i]} + for _, idx := range indexes[mi.table] { + queries = append(queries, idx.SQL) + } + + for _, query := range queries { + _, err := db.Exec(query) + if d.verbose { + query = " " + strings.Join(strings.Split(query, "\n"), "\n ") + fmt.Println(query) + } + if err != nil { + if d.rtOnError { + return err + } + fmt.Printf(" %s\n", err.Error()) + } + } + if d.verbose { + fmt.Println("") + } + } + + return nil +} + +// database creation commander interface implement. +type commandSQLAll struct { + al *alias +} + +// parse orm command line arguments. +func (d *commandSQLAll) Parse(args []string) { + var name string + + flagSet := flag.NewFlagSet("orm command: sqlall", flag.ExitOnError) + flagSet.StringVar(&name, "db", "default", "DataBase alias name") + flagSet.Parse(args) + + d.al = getDbAlias(name) +} + +// run orm line command. +func (d *commandSQLAll) Run() error { + sqls, indexes := getDbCreateSQL(d.al) + var all []string + for i, mi := range modelCache.allOrdered() { + queries := []string{sqls[i]} + for _, idx := range indexes[mi.table] { + queries = append(queries, idx.SQL) + } + sql := strings.Join(queries, "\n") + all = append(all, sql) + } + fmt.Println(strings.Join(all, "\n\n")) + + return nil +} + +func init() { + commands["syncdb"] = new(commandSyncDb) + commands["sqlall"] = new(commandSQLAll) +} + +// RunSyncdb run syncdb command line. +// name means table's alias name. default is "default". +// force means run next sql if the current is error. +// verbose means show all info when running command or not. +func RunSyncdb(name string, force bool, verbose bool) error { + BootStrap() + + al := getDbAlias(name) + cmd := new(commandSyncDb) + cmd.al = al + cmd.force = force + cmd.noInfo = !verbose + cmd.verbose = verbose + cmd.rtOnError = true + return cmd.Run() +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd_utils.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd_utils.go new file mode 100644 index 000000000..da0ee8ab4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/cmd_utils.go @@ -0,0 +1,294 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "os" + "strings" +) + +type dbIndex struct { + Table string + Name string + SQL string +} + +// create database drop sql. +func getDbDropSQL(al *alias) (sqls []string) { + if len(modelCache.cache) == 0 { + fmt.Println("no Model found, need register your model") + os.Exit(2) + } + + Q := al.DbBaser.TableQuote() + + for _, mi := range modelCache.allOrdered() { + sqls = append(sqls, fmt.Sprintf(`DROP TABLE IF EXISTS %s%s%s`, Q, mi.table, Q)) + } + return sqls +} + +// get database column type string. +func getColumnTyp(al *alias, fi *fieldInfo) (col string) { + T := al.DbBaser.DbTypes() + fieldType := fi.fieldType + fieldSize := fi.size + +checkColumn: + switch fieldType { + case TypeBooleanField: + col = T["bool"] + case TypeCharField: + col = fmt.Sprintf(T["string"], fieldSize) + case TypeTextField: + col = T["string-text"] + case TypeDateField: + col = T["time.Time-date"] + case TypeDateTimeField: + col = T["time.Time"] + case TypeBitField: + col = T["int8"] + case TypeSmallIntegerField: + col = T["int16"] + case TypeIntegerField: + col = T["int32"] + case TypeBigIntegerField: + if al.Driver == DRSqlite { + fieldType = TypeIntegerField + goto checkColumn + } + col = T["int64"] + case TypePositiveBitField: + col = T["uint8"] + case TypePositiveSmallIntegerField: + col = T["uint16"] + case TypePositiveIntegerField: + col = T["uint32"] + case TypePositiveBigIntegerField: + col = T["uint64"] + case TypeFloatField: + col = T["float64"] + case TypeDecimalField: + s := T["float64-decimal"] + if strings.Index(s, "%d") == -1 { + col = s + } else { + col = fmt.Sprintf(s, fi.digits, fi.decimals) + } + case RelForeignKey, RelOneToOne: + fieldType = fi.relModelInfo.fields.pk.fieldType + fieldSize = fi.relModelInfo.fields.pk.size + goto checkColumn + } + + return +} + +// create alter sql string. +func getColumnAddQuery(al *alias, fi *fieldInfo) string { + Q := al.DbBaser.TableQuote() + typ := getColumnTyp(al, fi) + + if fi.null == false { + typ += " " + "NOT NULL" + } + + return fmt.Sprintf("ALTER TABLE %s%s%s ADD COLUMN %s%s%s %s %s", + Q, fi.mi.table, Q, + Q, fi.column, Q, + typ, getColumnDefault(fi), + ) +} + +// create database creation string. +func getDbCreateSQL(al *alias) (sqls []string, tableIndexes map[string][]dbIndex) { + if len(modelCache.cache) == 0 { + fmt.Println("no Model found, need register your model") + os.Exit(2) + } + + Q := al.DbBaser.TableQuote() + T := al.DbBaser.DbTypes() + sep := fmt.Sprintf("%s, %s", Q, Q) + + tableIndexes = make(map[string][]dbIndex) + + for _, mi := range modelCache.allOrdered() { + sql := fmt.Sprintf("-- %s\n", strings.Repeat("-", 50)) + sql += fmt.Sprintf("-- Table Structure for `%s`\n", mi.fullName) + sql += fmt.Sprintf("-- %s\n", strings.Repeat("-", 50)) + + sql += fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s%s%s (\n", Q, mi.table, Q) + + columns := make([]string, 0, len(mi.fields.fieldsDB)) + + sqlIndexes := [][]string{} + + for _, fi := range mi.fields.fieldsDB { + + column := fmt.Sprintf(" %s%s%s ", Q, fi.column, Q) + col := getColumnTyp(al, fi) + + if fi.auto { + switch al.Driver { + case DRSqlite, DRPostgres: + column += T["auto"] + default: + column += col + " " + T["auto"] + } + } else if fi.pk { + column += col + " " + T["pk"] + } else { + column += col + + if fi.null == false { + column += " " + "NOT NULL" + } + + //if fi.initial.String() != "" { + // column += " DEFAULT " + fi.initial.String() + //} + + // Append attribute DEFAULT + column += getColumnDefault(fi) + + if fi.unique { + column += " " + "UNIQUE" + } + + if fi.index { + sqlIndexes = append(sqlIndexes, []string{fi.column}) + } + } + + if strings.Index(column, "%COL%") != -1 { + column = strings.Replace(column, "%COL%", fi.column, -1) + } + + columns = append(columns, column) + } + + if mi.model != nil { + allnames := getTableUnique(mi.addrField) + if !mi.manual && len(mi.uniques) > 0 { + allnames = append(allnames, mi.uniques) + } + for _, names := range allnames { + cols := make([]string, 0, len(names)) + for _, name := range names { + if fi, ok := mi.fields.GetByAny(name); ok && fi.dbcol { + cols = append(cols, fi.column) + } else { + panic(fmt.Errorf("cannot found column `%s` when parse UNIQUE in `%s.TableUnique`", name, mi.fullName)) + } + } + column := fmt.Sprintf(" UNIQUE (%s%s%s)", Q, strings.Join(cols, sep), Q) + columns = append(columns, column) + } + } + + sql += strings.Join(columns, ",\n") + sql += "\n)" + + if al.Driver == DRMySQL { + var engine string + if mi.model != nil { + engine = getTableEngine(mi.addrField) + } + if engine == "" { + engine = al.Engine + } + sql += " ENGINE=" + engine + } + + sql += ";" + sqls = append(sqls, sql) + + if mi.model != nil { + for _, names := range getTableIndex(mi.addrField) { + cols := make([]string, 0, len(names)) + for _, name := range names { + if fi, ok := mi.fields.GetByAny(name); ok && fi.dbcol { + cols = append(cols, fi.column) + } else { + panic(fmt.Errorf("cannot found column `%s` when parse INDEX in `%s.TableIndex`", name, mi.fullName)) + } + } + sqlIndexes = append(sqlIndexes, cols) + } + } + + for _, names := range sqlIndexes { + name := mi.table + "_" + strings.Join(names, "_") + cols := strings.Join(names, sep) + sql := fmt.Sprintf("CREATE INDEX %s%s%s ON %s%s%s (%s%s%s);", Q, name, Q, Q, mi.table, Q, Q, cols, Q) + + index := dbIndex{} + index.Table = mi.table + index.Name = name + index.SQL = sql + + tableIndexes[mi.table] = append(tableIndexes[mi.table], index) + } + + } + + return +} + +// Get string value for the attribute "DEFAULT" for the CREATE, ALTER commands +func getColumnDefault(fi *fieldInfo) string { + var ( + v, t, d string + ) + + // Skip default attribute if field is in relations + if fi.rel || fi.reverse { + return v + } + + t = " DEFAULT '%s' " + + // These defaults will be useful if there no config value orm:"default" and NOT NULL is on + switch fi.fieldType { + case TypeDateField, TypeDateTimeField, TypeTextField: + return v + + case TypeBitField, TypeSmallIntegerField, TypeIntegerField, + TypeBigIntegerField, TypePositiveBitField, TypePositiveSmallIntegerField, + TypePositiveIntegerField, TypePositiveBigIntegerField, TypeFloatField, + TypeDecimalField: + t = " DEFAULT %s " + d = "0" + case TypeBooleanField: + t = " DEFAULT %s " + d = "FALSE" + } + + if fi.colDefault { + if !fi.initial.Exist() { + v = fmt.Sprintf(t, "") + } else { + v = fmt.Sprintf(t, fi.initial.String()) + } + } else { + if !fi.null { + v = fmt.Sprintf(t, d) + } + } + + return v +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db.go new file mode 100644 index 000000000..b62c165bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db.go @@ -0,0 +1,1650 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +const ( + formatDate = "2006-01-02" + formatDateTime = "2006-01-02 15:04:05" +) + +var ( + // ErrMissPK missing pk error + ErrMissPK = errors.New("missed pk value") +) + +var ( + operators = map[string]bool{ + "exact": true, + "iexact": true, + "contains": true, + "icontains": true, + // "regex": true, + // "iregex": true, + "gt": true, + "gte": true, + "lt": true, + "lte": true, + "eq": true, + "nq": true, + "startswith": true, + "endswith": true, + "istartswith": true, + "iendswith": true, + "in": true, + "between": true, + // "year": true, + // "month": true, + // "day": true, + // "week_day": true, + "isnull": true, + // "search": true, + } +) + +// an instance of dbBaser interface/ +type dbBase struct { + ins dbBaser +} + +// check dbBase implements dbBaser interface. +var _ dbBaser = new(dbBase) + +// get struct columns values as interface slice. +func (d *dbBase) collectValues(mi *modelInfo, ind reflect.Value, cols []string, skipAuto bool, insert bool, names *[]string, tz *time.Location) (values []interface{}, err error) { + var columns []string + + if names != nil { + columns = *names + } + + for _, column := range cols { + var fi *fieldInfo + if fi, _ = mi.fields.GetByAny(column); fi != nil { + column = fi.column + } else { + panic(fmt.Errorf("wrong db field/column name `%s` for model `%s`", column, mi.fullName)) + } + if fi.dbcol == false || fi.auto && skipAuto { + continue + } + value, err := d.collectFieldValue(mi, fi, ind, insert, tz) + if err != nil { + return nil, err + } + + if names != nil { + columns = append(columns, column) + } + + values = append(values, value) + } + + if names != nil { + *names = columns + } + + return +} + +// get one field value in struct column as interface. +func (d *dbBase) collectFieldValue(mi *modelInfo, fi *fieldInfo, ind reflect.Value, insert bool, tz *time.Location) (interface{}, error) { + var value interface{} + if fi.pk { + _, value, _ = getExistPk(mi, ind) + } else { + field := ind.Field(fi.fieldIndex) + if fi.isFielder { + f := field.Addr().Interface().(Fielder) + value = f.RawValue() + } else { + switch fi.fieldType { + case TypeBooleanField: + if nb, ok := field.Interface().(sql.NullBool); ok { + value = nil + if nb.Valid { + value = nb.Bool + } + } else if field.Kind() == reflect.Ptr { + if field.IsNil() { + value = nil + } else { + value = field.Elem().Bool() + } + } else { + value = field.Bool() + } + case TypeCharField, TypeTextField: + if ns, ok := field.Interface().(sql.NullString); ok { + value = nil + if ns.Valid { + value = ns.String + } + } else if field.Kind() == reflect.Ptr { + if field.IsNil() { + value = nil + } else { + value = field.Elem().String() + } + } else { + value = field.String() + } + case TypeFloatField, TypeDecimalField: + if nf, ok := field.Interface().(sql.NullFloat64); ok { + value = nil + if nf.Valid { + value = nf.Float64 + } + } else if field.Kind() == reflect.Ptr { + if field.IsNil() { + value = nil + } else { + value = field.Elem().Float() + } + } else { + vu := field.Interface() + if _, ok := vu.(float32); ok { + value, _ = StrTo(ToStr(vu)).Float64() + } else { + value = field.Float() + } + } + case TypeDateField, TypeDateTimeField: + value = field.Interface() + if t, ok := value.(time.Time); ok { + d.ins.TimeToDB(&t, tz) + if t.IsZero() { + value = nil + } else { + value = t + } + } + default: + switch { + case fi.fieldType&IsPostiveIntegerField > 0: + if field.Kind() == reflect.Ptr { + if field.IsNil() { + value = nil + } else { + value = field.Elem().Uint() + } + } else { + value = field.Uint() + } + case fi.fieldType&IsIntegerField > 0: + if ni, ok := field.Interface().(sql.NullInt64); ok { + value = nil + if ni.Valid { + value = ni.Int64 + } + } else if field.Kind() == reflect.Ptr { + if field.IsNil() { + value = nil + } else { + value = field.Elem().Int() + } + } else { + value = field.Int() + } + case fi.fieldType&IsRelField > 0: + if field.IsNil() { + value = nil + } else { + if _, vu, ok := getExistPk(fi.relModelInfo, reflect.Indirect(field)); ok { + value = vu + } else { + value = nil + } + } + if fi.null == false && value == nil { + return nil, fmt.Errorf("field `%s` cannot be NULL", fi.fullName) + } + } + } + } + switch fi.fieldType { + case TypeDateField, TypeDateTimeField: + if fi.autoNow || fi.autoNowAdd && insert { + if insert { + if t, ok := value.(time.Time); ok && !t.IsZero() { + break + } + } + tnow := time.Now() + d.ins.TimeToDB(&tnow, tz) + value = tnow + if fi.isFielder { + f := field.Addr().Interface().(Fielder) + f.SetRaw(tnow.In(DefaultTimeLoc)) + } else { + field.Set(reflect.ValueOf(tnow.In(DefaultTimeLoc))) + } + } + } + } + return value, nil +} + +// create insert sql preparation statement object. +func (d *dbBase) PrepareInsert(q dbQuerier, mi *modelInfo) (stmtQuerier, string, error) { + Q := d.ins.TableQuote() + + dbcols := make([]string, 0, len(mi.fields.dbcols)) + marks := make([]string, 0, len(mi.fields.dbcols)) + for _, fi := range mi.fields.fieldsDB { + if fi.auto == false { + dbcols = append(dbcols, fi.column) + marks = append(marks, "?") + } + } + qmarks := strings.Join(marks, ", ") + sep := fmt.Sprintf("%s, %s", Q, Q) + columns := strings.Join(dbcols, sep) + + query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks) + + d.ins.ReplaceMarks(&query) + + d.ins.HasReturningID(mi, &query) + + stmt, err := q.Prepare(query) + return stmt, query, err +} + +// insert struct with prepared statement and given struct reflect value. +func (d *dbBase) InsertStmt(stmt stmtQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location) (int64, error) { + values, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, nil, tz) + if err != nil { + return 0, err + } + + if d.ins.HasReturningID(mi, nil) { + row := stmt.QueryRow(values...) + var id int64 + err := row.Scan(&id) + return id, err + } + res, err := stmt.Exec(values...) + if err == nil { + return res.LastInsertId() + } + return 0, err +} + +// query sql ,read records and persist in dbBaser. +func (d *dbBase) Read(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string) error { + var whereCols []string + var args []interface{} + + // if specify cols length > 0, then use it for where condition. + if len(cols) > 0 { + var err error + whereCols = make([]string, 0, len(cols)) + args, err = d.collectValues(mi, ind, cols, false, false, &whereCols, tz) + if err != nil { + return err + } + } else { + // default use pk value as where condtion. + pkColumn, pkValue, ok := getExistPk(mi, ind) + if ok == false { + return ErrMissPK + } + whereCols = []string{pkColumn} + args = append(args, pkValue) + } + + Q := d.ins.TableQuote() + + sep := fmt.Sprintf("%s, %s", Q, Q) + sels := strings.Join(mi.fields.dbcols, sep) + colsNum := len(mi.fields.dbcols) + + sep = fmt.Sprintf("%s = ? AND %s", Q, Q) + wheres := strings.Join(whereCols, sep) + + query := fmt.Sprintf("SELECT %s%s%s FROM %s%s%s WHERE %s%s%s = ?", Q, sels, Q, Q, mi.table, Q, Q, wheres, Q) + + refs := make([]interface{}, colsNum) + for i := range refs { + var ref interface{} + refs[i] = &ref + } + + d.ins.ReplaceMarks(&query) + + row := q.QueryRow(query, args...) + if err := row.Scan(refs...); err != nil { + if err == sql.ErrNoRows { + return ErrNoRows + } + return err + } + elm := reflect.New(mi.addrField.Elem().Type()) + mind := reflect.Indirect(elm) + d.setColsValues(mi, &mind, mi.fields.dbcols, refs, tz) + ind.Set(mind) + return nil +} + +// execute insert sql dbQuerier with given struct reflect.Value. +func (d *dbBase) Insert(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location) (int64, error) { + names := make([]string, 0, len(mi.fields.dbcols)-1) + values, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, &names, tz) + if err != nil { + return 0, err + } + + return d.InsertValue(q, mi, false, names, values) +} + +// multi-insert sql with given slice struct reflect.Value. +func (d *dbBase) InsertMulti(q dbQuerier, mi *modelInfo, sind reflect.Value, bulk int, tz *time.Location) (int64, error) { + var ( + cnt int64 + nums int + values []interface{} + names []string + ) + + // typ := reflect.Indirect(mi.addrField).Type() + + length := sind.Len() + + for i := 1; i <= length; i++ { + + ind := reflect.Indirect(sind.Index(i - 1)) + + // Is this needed ? + // if !ind.Type().AssignableTo(typ) { + // return cnt, ErrArgs + // } + + if i == 1 { + vus, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, &names, tz) + if err != nil { + return cnt, err + } + values = make([]interface{}, bulk*len(vus)) + nums += copy(values, vus) + + } else { + + vus, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, nil, tz) + if err != nil { + return cnt, err + } + + if len(vus) != len(names) { + return cnt, ErrArgs + } + + nums += copy(values[nums:], vus) + } + + if i > 1 && i%bulk == 0 || length == i { + num, err := d.InsertValue(q, mi, true, names, values[:nums]) + if err != nil { + return cnt, err + } + cnt += num + nums = 0 + } + } + + return cnt, nil +} + +// execute insert sql with given struct and given values. +// insert the given values, not the field values in struct. +func (d *dbBase) InsertValue(q dbQuerier, mi *modelInfo, isMulti bool, names []string, values []interface{}) (int64, error) { + Q := d.ins.TableQuote() + + marks := make([]string, len(names)) + for i := range marks { + marks[i] = "?" + } + + sep := fmt.Sprintf("%s, %s", Q, Q) + qmarks := strings.Join(marks, ", ") + columns := strings.Join(names, sep) + + multi := len(values) / len(names) + + if isMulti { + qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks + } + + query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks) + + d.ins.ReplaceMarks(&query) + + if isMulti || !d.ins.HasReturningID(mi, &query) { + res, err := q.Exec(query, values...) + if err == nil { + if isMulti { + return res.RowsAffected() + } + return res.LastInsertId() + } + return 0, err + } + row := q.QueryRow(query, values...) + var id int64 + err := row.Scan(&id) + return id, err +} + +// execute update sql dbQuerier with given struct reflect.Value. +func (d *dbBase) Update(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string) (int64, error) { + pkName, pkValue, ok := getExistPk(mi, ind) + if ok == false { + return 0, ErrMissPK + } + + var setNames []string + + // if specify cols length is zero, then commit all columns. + if len(cols) == 0 { + cols = mi.fields.dbcols + setNames = make([]string, 0, len(mi.fields.dbcols)-1) + } else { + setNames = make([]string, 0, len(cols)) + } + + setValues, err := d.collectValues(mi, ind, cols, true, false, &setNames, tz) + if err != nil { + return 0, err + } + + setValues = append(setValues, pkValue) + + Q := d.ins.TableQuote() + + sep := fmt.Sprintf("%s = ?, %s", Q, Q) + setColumns := strings.Join(setNames, sep) + + query := fmt.Sprintf("UPDATE %s%s%s SET %s%s%s = ? WHERE %s%s%s = ?", Q, mi.table, Q, Q, setColumns, Q, Q, pkName, Q) + + d.ins.ReplaceMarks(&query) + + res, err := q.Exec(query, setValues...) + if err == nil { + return res.RowsAffected() + } + return 0, err +} + +// execute delete sql dbQuerier with given struct reflect.Value. +// delete index is pk. +func (d *dbBase) Delete(q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location) (int64, error) { + pkName, pkValue, ok := getExistPk(mi, ind) + if ok == false { + return 0, ErrMissPK + } + + Q := d.ins.TableQuote() + + query := fmt.Sprintf("DELETE FROM %s%s%s WHERE %s%s%s = ?", Q, mi.table, Q, Q, pkName, Q) + + d.ins.ReplaceMarks(&query) + res, err := q.Exec(query, pkValue) + if err == nil { + num, err := res.RowsAffected() + if err != nil { + return 0, err + } + if num > 0 { + if mi.fields.pk.auto { + if mi.fields.pk.fieldType&IsPostiveIntegerField > 0 { + ind.Field(mi.fields.pk.fieldIndex).SetUint(0) + } else { + ind.Field(mi.fields.pk.fieldIndex).SetInt(0) + } + } + err := d.deleteRels(q, mi, []interface{}{pkValue}, tz) + if err != nil { + return num, err + } + } + return num, err + } + return 0, err +} + +// update table-related record by querySet. +// need querySet not struct reflect.Value to update related records. +func (d *dbBase) UpdateBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, params Params, tz *time.Location) (int64, error) { + columns := make([]string, 0, len(params)) + values := make([]interface{}, 0, len(params)) + for col, val := range params { + if fi, ok := mi.fields.GetByAny(col); ok == false || fi.dbcol == false { + panic(fmt.Errorf("wrong field/column name `%s`", col)) + } else { + columns = append(columns, fi.column) + values = append(values, val) + } + } + + if len(columns) == 0 { + panic(fmt.Errorf("update params cannot empty")) + } + + tables := newDbTables(mi, d.ins) + if qs != nil { + tables.parseRelated(qs.related, qs.relDepth) + } + + where, args := tables.getCondSQL(cond, false, tz) + + values = append(values, args...) + + join := tables.getJoinSQL() + + var query, T string + + Q := d.ins.TableQuote() + + if d.ins.SupportUpdateJoin() { + T = "T0." + } + + cols := make([]string, 0, len(columns)) + + for i, v := range columns { + col := fmt.Sprintf("%s%s%s%s", T, Q, v, Q) + if c, ok := values[i].(colValue); ok { + switch c.opt { + case ColAdd: + cols = append(cols, col+" = "+col+" + ?") + case ColMinus: + cols = append(cols, col+" = "+col+" - ?") + case ColMultiply: + cols = append(cols, col+" = "+col+" * ?") + case ColExcept: + cols = append(cols, col+" = "+col+" / ?") + } + values[i] = c.value + } else { + cols = append(cols, col+" = ?") + } + } + + sets := strings.Join(cols, ", ") + " " + + if d.ins.SupportUpdateJoin() { + query = fmt.Sprintf("UPDATE %s%s%s T0 %sSET %s%s", Q, mi.table, Q, join, sets, where) + } else { + supQuery := fmt.Sprintf("SELECT T0.%s%s%s FROM %s%s%s T0 %s%s", Q, mi.fields.pk.column, Q, Q, mi.table, Q, join, where) + query = fmt.Sprintf("UPDATE %s%s%s SET %sWHERE %s%s%s IN ( %s )", Q, mi.table, Q, sets, Q, mi.fields.pk.column, Q, supQuery) + } + + d.ins.ReplaceMarks(&query) + res, err := q.Exec(query, values...) + if err == nil { + return res.RowsAffected() + } + return 0, err +} + +// delete related records. +// do UpdateBanch or DeleteBanch by condition of tables' relationship. +func (d *dbBase) deleteRels(q dbQuerier, mi *modelInfo, args []interface{}, tz *time.Location) error { + for _, fi := range mi.fields.fieldsReverse { + fi = fi.reverseFieldInfo + switch fi.onDelete { + case odCascade: + cond := NewCondition().And(fmt.Sprintf("%s__in", fi.name), args...) + _, err := d.DeleteBatch(q, nil, fi.mi, cond, tz) + if err != nil { + return err + } + case odSetDefault, odSetNULL: + cond := NewCondition().And(fmt.Sprintf("%s__in", fi.name), args...) + params := Params{fi.column: nil} + if fi.onDelete == odSetDefault { + params[fi.column] = fi.initial.String() + } + _, err := d.UpdateBatch(q, nil, fi.mi, cond, params, tz) + if err != nil { + return err + } + case odDoNothing: + } + } + return nil +} + +// delete table-related records. +func (d *dbBase) DeleteBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, tz *time.Location) (int64, error) { + tables := newDbTables(mi, d.ins) + tables.skipEnd = true + + if qs != nil { + tables.parseRelated(qs.related, qs.relDepth) + } + + if cond == nil || cond.IsEmpty() { + panic(fmt.Errorf("delete operation cannot execute without condition")) + } + + Q := d.ins.TableQuote() + + where, args := tables.getCondSQL(cond, false, tz) + join := tables.getJoinSQL() + + cols := fmt.Sprintf("T0.%s%s%s", Q, mi.fields.pk.column, Q) + query := fmt.Sprintf("SELECT %s FROM %s%s%s T0 %s%s", cols, Q, mi.table, Q, join, where) + + d.ins.ReplaceMarks(&query) + + var rs *sql.Rows + r, err := q.Query(query, args...) + if err != nil { + return 0, err + } + rs = r + defer rs.Close() + + var ref interface{} + args = make([]interface{}, 0) + cnt := 0 + for rs.Next() { + if err := rs.Scan(&ref); err != nil { + return 0, err + } + args = append(args, reflect.ValueOf(ref).Interface()) + cnt++ + } + + if cnt == 0 { + return 0, nil + } + + marks := make([]string, len(args)) + for i := range marks { + marks[i] = "?" + } + sql := fmt.Sprintf("IN (%s)", strings.Join(marks, ", ")) + query = fmt.Sprintf("DELETE FROM %s%s%s WHERE %s%s%s %s", Q, mi.table, Q, Q, mi.fields.pk.column, Q, sql) + + d.ins.ReplaceMarks(&query) + res, err := q.Exec(query, args...) + if err == nil { + num, err := res.RowsAffected() + if err != nil { + return 0, err + } + if num > 0 { + err := d.deleteRels(q, mi, args, tz) + if err != nil { + return num, err + } + } + return num, nil + } + return 0, err +} + +// read related records. +func (d *dbBase) ReadBatch(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, container interface{}, tz *time.Location, cols []string) (int64, error) { + + val := reflect.ValueOf(container) + ind := reflect.Indirect(val) + + errTyp := true + one := true + isPtr := true + + if val.Kind() == reflect.Ptr { + fn := "" + if ind.Kind() == reflect.Slice { + one = false + typ := ind.Type().Elem() + switch typ.Kind() { + case reflect.Ptr: + fn = getFullName(typ.Elem()) + case reflect.Struct: + isPtr = false + fn = getFullName(typ) + } + } else { + fn = getFullName(ind.Type()) + } + errTyp = fn != mi.fullName + } + + if errTyp { + if one { + panic(fmt.Errorf("wrong object type `%s` for rows scan, need *%s", val.Type(), mi.fullName)) + } else { + panic(fmt.Errorf("wrong object type `%s` for rows scan, need *[]*%s or *[]%s", val.Type(), mi.fullName, mi.fullName)) + } + } + + rlimit := qs.limit + offset := qs.offset + + Q := d.ins.TableQuote() + + var tCols []string + if len(cols) > 0 { + hasRel := len(qs.related) > 0 || qs.relDepth > 0 + tCols = make([]string, 0, len(cols)) + var maps map[string]bool + if hasRel { + maps = make(map[string]bool) + } + for _, col := range cols { + if fi, ok := mi.fields.GetByAny(col); ok { + tCols = append(tCols, fi.column) + if hasRel { + maps[fi.column] = true + } + } else { + panic(fmt.Errorf("wrong field/column name `%s`", col)) + } + } + if hasRel { + for _, fi := range mi.fields.fieldsDB { + if fi.fieldType&IsRelField > 0 { + if maps[fi.column] == false { + tCols = append(tCols, fi.column) + } + } + } + } + } else { + tCols = mi.fields.dbcols + } + + colsNum := len(tCols) + sep := fmt.Sprintf("%s, T0.%s", Q, Q) + sels := fmt.Sprintf("T0.%s%s%s", Q, strings.Join(tCols, sep), Q) + + tables := newDbTables(mi, d.ins) + tables.parseRelated(qs.related, qs.relDepth) + + where, args := tables.getCondSQL(cond, false, tz) + groupBy := tables.getGroupSQL(qs.groups) + orderBy := tables.getOrderSQL(qs.orders) + limit := tables.getLimitSQL(mi, offset, rlimit) + join := tables.getJoinSQL() + + for _, tbl := range tables.tables { + if tbl.sel { + colsNum += len(tbl.mi.fields.dbcols) + sep := fmt.Sprintf("%s, %s.%s", Q, tbl.index, Q) + sels += fmt.Sprintf(", %s.%s%s%s", tbl.index, Q, strings.Join(tbl.mi.fields.dbcols, sep), Q) + } + } + + sqlSelect := "SELECT" + if qs.distinct { + sqlSelect += " DISTINCT" + } + query := fmt.Sprintf("%s %s FROM %s%s%s T0 %s%s%s%s%s", sqlSelect, sels, Q, mi.table, Q, join, where, groupBy, orderBy, limit) + + d.ins.ReplaceMarks(&query) + + var rs *sql.Rows + r, err := q.Query(query, args...) + if err != nil { + return 0, err + } + rs = r + + refs := make([]interface{}, colsNum) + for i := range refs { + var ref interface{} + refs[i] = &ref + } + + defer rs.Close() + + slice := ind + + var cnt int64 + for rs.Next() { + if one && cnt == 0 || one == false { + if err := rs.Scan(refs...); err != nil { + return 0, err + } + + elm := reflect.New(mi.addrField.Elem().Type()) + mind := reflect.Indirect(elm) + + cacheV := make(map[string]*reflect.Value) + cacheM := make(map[string]*modelInfo) + trefs := refs + + d.setColsValues(mi, &mind, tCols, refs[:len(tCols)], tz) + trefs = refs[len(tCols):] + + for _, tbl := range tables.tables { + // loop selected tables + if tbl.sel { + last := mind + names := "" + mmi := mi + // loop cascade models + for _, name := range tbl.names { + names += name + if val, ok := cacheV[names]; ok { + last = *val + mmi = cacheM[names] + } else { + fi := mmi.fields.GetByName(name) + lastm := mmi + mmi = fi.relModelInfo + field := last + if last.Kind() != reflect.Invalid { + field = reflect.Indirect(last.Field(fi.fieldIndex)) + if field.IsValid() { + d.setColsValues(mmi, &field, mmi.fields.dbcols, trefs[:len(mmi.fields.dbcols)], tz) + for _, fi := range mmi.fields.fieldsReverse { + if fi.inModel && fi.reverseFieldInfo.mi == lastm { + if fi.reverseFieldInfo != nil { + f := field.Field(fi.fieldIndex) + if f.Kind() == reflect.Ptr { + f.Set(last.Addr()) + } + } + } + } + last = field + } + } + cacheV[names] = &field + cacheM[names] = mmi + } + } + trefs = trefs[len(mmi.fields.dbcols):] + } + } + + if one { + ind.Set(mind) + } else { + if cnt == 0 { + // you can use a empty & caped container list + // orm will not replace it + if ind.Len() != 0 { + // if container is not empty + // create a new one + slice = reflect.New(ind.Type()).Elem() + } + } + + if isPtr { + slice = reflect.Append(slice, mind.Addr()) + } else { + slice = reflect.Append(slice, mind) + } + } + } + cnt++ + } + + if one == false { + if cnt > 0 { + ind.Set(slice) + } else { + // when a result is empty and container is nil + // to set a empty container + if ind.IsNil() { + ind.Set(reflect.MakeSlice(ind.Type(), 0, 0)) + } + } + } + + return cnt, nil +} + +// excute count sql and return count result int64. +func (d *dbBase) Count(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, tz *time.Location) (cnt int64, err error) { + tables := newDbTables(mi, d.ins) + tables.parseRelated(qs.related, qs.relDepth) + + where, args := tables.getCondSQL(cond, false, tz) + tables.getOrderSQL(qs.orders) + join := tables.getJoinSQL() + + Q := d.ins.TableQuote() + + query := fmt.Sprintf("SELECT COUNT(*) FROM %s%s%s T0 %s%s", Q, mi.table, Q, join, where) + + d.ins.ReplaceMarks(&query) + + row := q.QueryRow(query, args...) + + err = row.Scan(&cnt) + return +} + +// generate sql with replacing operator string placeholders and replaced values. +func (d *dbBase) GenerateOperatorSQL(mi *modelInfo, fi *fieldInfo, operator string, args []interface{}, tz *time.Location) (string, []interface{}) { + sql := "" + params := getFlatParams(fi, args, tz) + + if len(params) == 0 { + panic(fmt.Errorf("operator `%s` need at least one args", operator)) + } + arg := params[0] + + switch operator { + case "in": + marks := make([]string, len(params)) + for i := range marks { + marks[i] = "?" + } + sql = fmt.Sprintf("IN (%s)", strings.Join(marks, ", ")) + case "between": + if len(params) != 2 { + panic(fmt.Errorf("operator `%s` need 2 args not %d", operator, len(params))) + } + sql = "BETWEEN ? AND ?" + default: + if len(params) > 1 { + panic(fmt.Errorf("operator `%s` need 1 args not %d", operator, len(params))) + } + sql = d.ins.OperatorSQL(operator) + switch operator { + case "exact": + if arg == nil { + params[0] = "IS NULL" + } + case "iexact", "contains", "icontains", "startswith", "endswith", "istartswith", "iendswith": + param := strings.Replace(ToStr(arg), `%`, `\%`, -1) + switch operator { + case "iexact": + case "contains", "icontains": + param = fmt.Sprintf("%%%s%%", param) + case "startswith", "istartswith": + param = fmt.Sprintf("%s%%", param) + case "endswith", "iendswith": + param = fmt.Sprintf("%%%s", param) + } + params[0] = param + case "isnull": + if b, ok := arg.(bool); ok { + if b { + sql = "IS NULL" + } else { + sql = "IS NOT NULL" + } + params = nil + } else { + panic(fmt.Errorf("operator `%s` need a bool value not `%T`", operator, arg)) + } + } + } + return sql, params +} + +// gernerate sql string with inner function, such as UPPER(text). +func (d *dbBase) GenerateOperatorLeftCol(*fieldInfo, string, *string) { + // default not use +} + +// set values to struct column. +func (d *dbBase) setColsValues(mi *modelInfo, ind *reflect.Value, cols []string, values []interface{}, tz *time.Location) { + for i, column := range cols { + val := reflect.Indirect(reflect.ValueOf(values[i])).Interface() + + fi := mi.fields.GetByColumn(column) + + field := ind.Field(fi.fieldIndex) + + value, err := d.convertValueFromDB(fi, val, tz) + if err != nil { + panic(fmt.Errorf("Raw value: `%v` %s", val, err.Error())) + } + + _, err = d.setFieldValue(fi, value, field) + + if err != nil { + panic(fmt.Errorf("Raw value: `%v` %s", val, err.Error())) + } + } +} + +// convert value from database result to value following in field type. +func (d *dbBase) convertValueFromDB(fi *fieldInfo, val interface{}, tz *time.Location) (interface{}, error) { + if val == nil { + return nil, nil + } + + var value interface{} + var tErr error + + var str *StrTo + switch v := val.(type) { + case []byte: + s := StrTo(string(v)) + str = &s + case string: + s := StrTo(v) + str = &s + } + + fieldType := fi.fieldType + +setValue: + switch { + case fieldType == TypeBooleanField: + if str == nil { + switch v := val.(type) { + case int64: + b := v == 1 + value = b + default: + s := StrTo(ToStr(v)) + str = &s + } + } + if str != nil { + b, err := str.Bool() + if err != nil { + tErr = err + goto end + } + value = b + } + case fieldType == TypeCharField || fieldType == TypeTextField: + if str == nil { + value = ToStr(val) + } else { + value = str.String() + } + case fieldType == TypeDateField || fieldType == TypeDateTimeField: + if str == nil { + switch t := val.(type) { + case time.Time: + d.ins.TimeFromDB(&t, tz) + value = t + default: + s := StrTo(ToStr(t)) + str = &s + } + } + if str != nil { + s := str.String() + var ( + t time.Time + err error + ) + if len(s) >= 19 { + s = s[:19] + t, err = time.ParseInLocation(formatDateTime, s, tz) + } else { + if len(s) > 10 { + s = s[:10] + } + t, err = time.ParseInLocation(formatDate, s, tz) + } + t = t.In(DefaultTimeLoc) + + if err != nil && s != "0000-00-00" && s != "0000-00-00 00:00:00" { + tErr = err + goto end + } + value = t + } + case fieldType&IsIntegerField > 0: + if str == nil { + s := StrTo(ToStr(val)) + str = &s + } + if str != nil { + var err error + switch fieldType { + case TypeBitField: + _, err = str.Int8() + case TypeSmallIntegerField: + _, err = str.Int16() + case TypeIntegerField: + _, err = str.Int32() + case TypeBigIntegerField: + _, err = str.Int64() + case TypePositiveBitField: + _, err = str.Uint8() + case TypePositiveSmallIntegerField: + _, err = str.Uint16() + case TypePositiveIntegerField: + _, err = str.Uint32() + case TypePositiveBigIntegerField: + _, err = str.Uint64() + } + if err != nil { + tErr = err + goto end + } + if fieldType&IsPostiveIntegerField > 0 { + v, _ := str.Uint64() + value = v + } else { + v, _ := str.Int64() + value = v + } + } + case fieldType == TypeFloatField || fieldType == TypeDecimalField: + if str == nil { + switch v := val.(type) { + case float64: + value = v + default: + s := StrTo(ToStr(v)) + str = &s + } + } + if str != nil { + v, err := str.Float64() + if err != nil { + tErr = err + goto end + } + value = v + } + case fieldType&IsRelField > 0: + fi = fi.relModelInfo.fields.pk + fieldType = fi.fieldType + goto setValue + } + +end: + if tErr != nil { + err := fmt.Errorf("convert to `%s` failed, field: %s err: %s", fi.addrValue.Type(), fi.fullName, tErr) + return nil, err + } + + return value, nil + +} + +// set one value to struct column field. +func (d *dbBase) setFieldValue(fi *fieldInfo, value interface{}, field reflect.Value) (interface{}, error) { + + fieldType := fi.fieldType + isNative := fi.isFielder == false + +setValue: + switch { + case fieldType == TypeBooleanField: + if isNative { + if nb, ok := field.Interface().(sql.NullBool); ok { + if value == nil { + nb.Valid = false + } else { + nb.Bool = value.(bool) + nb.Valid = true + } + field.Set(reflect.ValueOf(nb)) + } else if field.Kind() == reflect.Ptr { + if value != nil { + v := value.(bool) + field.Set(reflect.ValueOf(&v)) + } + } else { + if value == nil { + value = false + } + field.SetBool(value.(bool)) + } + } + case fieldType == TypeCharField || fieldType == TypeTextField: + if isNative { + if ns, ok := field.Interface().(sql.NullString); ok { + if value == nil { + ns.Valid = false + } else { + ns.String = value.(string) + ns.Valid = true + } + field.Set(reflect.ValueOf(ns)) + } else if field.Kind() == reflect.Ptr { + if value != nil { + v := value.(string) + field.Set(reflect.ValueOf(&v)) + } + } else { + if value == nil { + value = "" + } + field.SetString(value.(string)) + } + } + case fieldType == TypeDateField || fieldType == TypeDateTimeField: + if isNative { + if value == nil { + value = time.Time{} + } + field.Set(reflect.ValueOf(value)) + } + case fieldType == TypePositiveBitField && field.Kind() == reflect.Ptr: + if value != nil { + v := uint8(value.(uint64)) + field.Set(reflect.ValueOf(&v)) + } + case fieldType == TypePositiveSmallIntegerField && field.Kind() == reflect.Ptr: + if value != nil { + v := uint16(value.(uint64)) + field.Set(reflect.ValueOf(&v)) + } + case fieldType == TypePositiveIntegerField && field.Kind() == reflect.Ptr: + if value != nil { + if field.Type() == reflect.TypeOf(new(uint)) { + v := uint(value.(uint64)) + field.Set(reflect.ValueOf(&v)) + } else { + v := uint32(value.(uint64)) + field.Set(reflect.ValueOf(&v)) + } + } + case fieldType == TypePositiveBigIntegerField && field.Kind() == reflect.Ptr: + if value != nil { + v := value.(uint64) + field.Set(reflect.ValueOf(&v)) + } + case fieldType == TypeBitField && field.Kind() == reflect.Ptr: + if value != nil { + v := int8(value.(int64)) + field.Set(reflect.ValueOf(&v)) + } + case fieldType == TypeSmallIntegerField && field.Kind() == reflect.Ptr: + if value != nil { + v := int16(value.(int64)) + field.Set(reflect.ValueOf(&v)) + } + case fieldType == TypeIntegerField && field.Kind() == reflect.Ptr: + if value != nil { + if field.Type() == reflect.TypeOf(new(int)) { + v := int(value.(int64)) + field.Set(reflect.ValueOf(&v)) + } else { + v := int32(value.(int64)) + field.Set(reflect.ValueOf(&v)) + } + } + case fieldType == TypeBigIntegerField && field.Kind() == reflect.Ptr: + if value != nil { + v := value.(int64) + field.Set(reflect.ValueOf(&v)) + } + case fieldType&IsIntegerField > 0: + if fieldType&IsPostiveIntegerField > 0 { + if isNative { + if value == nil { + value = uint64(0) + } + field.SetUint(value.(uint64)) + } + } else { + if isNative { + if ni, ok := field.Interface().(sql.NullInt64); ok { + if value == nil { + ni.Valid = false + } else { + ni.Int64 = value.(int64) + ni.Valid = true + } + field.Set(reflect.ValueOf(ni)) + } else { + if value == nil { + value = int64(0) + } + field.SetInt(value.(int64)) + } + } + } + case fieldType == TypeFloatField || fieldType == TypeDecimalField: + if isNative { + if nf, ok := field.Interface().(sql.NullFloat64); ok { + if value == nil { + nf.Valid = false + } else { + nf.Float64 = value.(float64) + nf.Valid = true + } + field.Set(reflect.ValueOf(nf)) + } else if field.Kind() == reflect.Ptr { + if value != nil { + if field.Type() == reflect.TypeOf(new(float32)) { + v := float32(value.(float64)) + field.Set(reflect.ValueOf(&v)) + } else { + v := value.(float64) + field.Set(reflect.ValueOf(&v)) + } + } + } else { + + if value == nil { + value = float64(0) + } + field.SetFloat(value.(float64)) + } + } + case fieldType&IsRelField > 0: + if value != nil { + fieldType = fi.relModelInfo.fields.pk.fieldType + mf := reflect.New(fi.relModelInfo.addrField.Elem().Type()) + field.Set(mf) + f := mf.Elem().Field(fi.relModelInfo.fields.pk.fieldIndex) + field = f + goto setValue + } + } + + if isNative == false { + fd := field.Addr().Interface().(Fielder) + err := fd.SetRaw(value) + if err != nil { + err = fmt.Errorf("converted value `%v` set to Fielder `%s` failed, err: %s", value, fi.fullName, err) + return nil, err + } + } + + return value, nil +} + +// query sql, read values , save to *[]ParamList. +func (d *dbBase) ReadValues(q dbQuerier, qs *querySet, mi *modelInfo, cond *Condition, exprs []string, container interface{}, tz *time.Location) (int64, error) { + + var ( + maps []Params + lists []ParamsList + list ParamsList + ) + + typ := 0 + switch v := container.(type) { + case *[]Params: + d := *v + if len(d) == 0 { + maps = d + } + typ = 1 + case *[]ParamsList: + d := *v + if len(d) == 0 { + lists = d + } + typ = 2 + case *ParamsList: + d := *v + if len(d) == 0 { + list = d + } + typ = 3 + default: + panic(fmt.Errorf("unsupport read values type `%T`", container)) + } + + tables := newDbTables(mi, d.ins) + + var ( + cols []string + infos []*fieldInfo + ) + + hasExprs := len(exprs) > 0 + + Q := d.ins.TableQuote() + + if hasExprs { + cols = make([]string, 0, len(exprs)) + infos = make([]*fieldInfo, 0, len(exprs)) + for _, ex := range exprs { + index, name, fi, suc := tables.parseExprs(mi, strings.Split(ex, ExprSep)) + if suc == false { + panic(fmt.Errorf("unknown field/column name `%s`", ex)) + } + cols = append(cols, fmt.Sprintf("%s.%s%s%s %s%s%s", index, Q, fi.column, Q, Q, name, Q)) + infos = append(infos, fi) + } + } else { + cols = make([]string, 0, len(mi.fields.dbcols)) + infos = make([]*fieldInfo, 0, len(exprs)) + for _, fi := range mi.fields.fieldsDB { + cols = append(cols, fmt.Sprintf("T0.%s%s%s %s%s%s", Q, fi.column, Q, Q, fi.name, Q)) + infos = append(infos, fi) + } + } + + where, args := tables.getCondSQL(cond, false, tz) + groupBy := tables.getGroupSQL(qs.groups) + orderBy := tables.getOrderSQL(qs.orders) + limit := tables.getLimitSQL(mi, qs.offset, qs.limit) + join := tables.getJoinSQL() + + sels := strings.Join(cols, ", ") + + query := fmt.Sprintf("SELECT %s FROM %s%s%s T0 %s%s%s%s%s", sels, Q, mi.table, Q, join, where, groupBy, orderBy, limit) + + d.ins.ReplaceMarks(&query) + + rs, err := q.Query(query, args...) + if err != nil { + return 0, err + } + refs := make([]interface{}, len(cols)) + for i := range refs { + var ref interface{} + refs[i] = &ref + } + + defer rs.Close() + + var ( + cnt int64 + columns []string + ) + for rs.Next() { + if cnt == 0 { + cols, err := rs.Columns() + if err != nil { + return 0, err + } + columns = cols + } + + if err := rs.Scan(refs...); err != nil { + return 0, err + } + + switch typ { + case 1: + params := make(Params, len(cols)) + for i, ref := range refs { + fi := infos[i] + + val := reflect.Indirect(reflect.ValueOf(ref)).Interface() + + value, err := d.convertValueFromDB(fi, val, tz) + if err != nil { + panic(fmt.Errorf("db value convert failed `%v` %s", val, err.Error())) + } + + params[columns[i]] = value + } + maps = append(maps, params) + case 2: + params := make(ParamsList, 0, len(cols)) + for i, ref := range refs { + fi := infos[i] + + val := reflect.Indirect(reflect.ValueOf(ref)).Interface() + + value, err := d.convertValueFromDB(fi, val, tz) + if err != nil { + panic(fmt.Errorf("db value convert failed `%v` %s", val, err.Error())) + } + + params = append(params, value) + } + lists = append(lists, params) + case 3: + for i, ref := range refs { + fi := infos[i] + + val := reflect.Indirect(reflect.ValueOf(ref)).Interface() + + value, err := d.convertValueFromDB(fi, val, tz) + if err != nil { + panic(fmt.Errorf("db value convert failed `%v` %s", val, err.Error())) + } + + list = append(list, value) + } + } + + cnt++ + } + + switch v := container.(type) { + case *[]Params: + *v = maps + case *[]ParamsList: + *v = lists + case *ParamsList: + *v = list + } + + return cnt, nil +} + +func (d *dbBase) RowsTo(dbQuerier, *querySet, *modelInfo, *Condition, interface{}, string, string, *time.Location) (int64, error) { + return 0, nil +} + +// flag of update joined record. +func (d *dbBase) SupportUpdateJoin() bool { + return true +} + +func (d *dbBase) MaxLimit() uint64 { + return 18446744073709551615 +} + +// return quote. +func (d *dbBase) TableQuote() string { + return "`" +} + +// replace value placeholer in parametered sql string. +func (d *dbBase) ReplaceMarks(query *string) { + // default use `?` as mark, do nothing +} + +// flag of RETURNING sql. +func (d *dbBase) HasReturningID(*modelInfo, *string) bool { + return false +} + +// convert time from db. +func (d *dbBase) TimeFromDB(t *time.Time, tz *time.Location) { + *t = t.In(tz) +} + +// convert time to db. +func (d *dbBase) TimeToDB(t *time.Time, tz *time.Location) { + *t = t.In(tz) +} + +// get database types. +func (d *dbBase) DbTypes() map[string]string { + return nil +} + +// gt all tables. +func (d *dbBase) GetTables(db dbQuerier) (map[string]bool, error) { + tables := make(map[string]bool) + query := d.ins.ShowTablesQuery() + rows, err := db.Query(query) + if err != nil { + return tables, err + } + + defer rows.Close() + + for rows.Next() { + var table string + err := rows.Scan(&table) + if err != nil { + return tables, err + } + if table != "" { + tables[table] = true + } + } + + return tables, nil +} + +// get all cloumns in table. +func (d *dbBase) GetColumns(db dbQuerier, table string) (map[string][3]string, error) { + columns := make(map[string][3]string) + query := d.ins.ShowColumnsQuery(table) + rows, err := db.Query(query) + if err != nil { + return columns, err + } + + defer rows.Close() + + for rows.Next() { + var ( + name string + typ string + null string + ) + err := rows.Scan(&name, &typ, &null) + if err != nil { + return columns, err + } + columns[name] = [3]string{name, typ, null} + } + + return columns, nil +} + +// not implement. +func (d *dbBase) OperatorSQL(operator string) string { + panic(ErrNotImplement) +} + +// not implement. +func (d *dbBase) ShowTablesQuery() string { + panic(ErrNotImplement) +} + +// not implement. +func (d *dbBase) ShowColumnsQuery(table string) string { + panic(ErrNotImplement) +} + +// not implement. +func (d *dbBase) IndexExists(dbQuerier, string, string) bool { + panic(ErrNotImplement) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_alias.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_alias.go new file mode 100644 index 000000000..79576b8e5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_alias.go @@ -0,0 +1,297 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "fmt" + "reflect" + "sync" + "time" +) + +// DriverType database driver constant int. +type DriverType int + +// Enum the Database driver +const ( + _ DriverType = iota // int enum type + DRMySQL // mysql + DRSqlite // sqlite + DROracle // oracle + DRPostgres // pgsql + DRTiDB // TiDB +) + +// database driver string. +type driver string + +// get type constant int of current driver.. +func (d driver) Type() DriverType { + a, _ := dataBaseCache.get(string(d)) + return a.Driver +} + +// get name of current driver +func (d driver) Name() string { + return string(d) +} + +// check driver iis implemented Driver interface or not. +var _ Driver = new(driver) + +var ( + dataBaseCache = &_dbCache{cache: make(map[string]*alias)} + drivers = map[string]DriverType{ + "mysql": DRMySQL, + "postgres": DRPostgres, + "sqlite3": DRSqlite, + "tidb": DRTiDB, + } + dbBasers = map[DriverType]dbBaser{ + DRMySQL: newdbBaseMysql(), + DRSqlite: newdbBaseSqlite(), + DROracle: newdbBaseOracle(), + DRPostgres: newdbBasePostgres(), + DRTiDB: newdbBaseTidb(), + } +) + +// database alias cacher. +type _dbCache struct { + mux sync.RWMutex + cache map[string]*alias +} + +// add database alias with original name. +func (ac *_dbCache) add(name string, al *alias) (added bool) { + ac.mux.Lock() + defer ac.mux.Unlock() + if _, ok := ac.cache[name]; ok == false { + ac.cache[name] = al + added = true + } + return +} + +// get database alias if cached. +func (ac *_dbCache) get(name string) (al *alias, ok bool) { + ac.mux.RLock() + defer ac.mux.RUnlock() + al, ok = ac.cache[name] + return +} + +// get default alias. +func (ac *_dbCache) getDefault() (al *alias) { + al, _ = ac.get("default") + return +} + +type alias struct { + Name string + Driver DriverType + DriverName string + DataSource string + MaxIdleConns int + MaxOpenConns int + DB *sql.DB + DbBaser dbBaser + TZ *time.Location + Engine string +} + +func detectTZ(al *alias) { + // orm timezone system match database + // default use Local + al.TZ = time.Local + + if al.DriverName == "sphinx" { + return + } + + switch al.Driver { + case DRMySQL: + row := al.DB.QueryRow("SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP)") + var tz string + row.Scan(&tz) + if len(tz) >= 8 { + if tz[0] != '-' { + tz = "+" + tz + } + t, err := time.Parse("-07:00:00", tz) + if err == nil { + al.TZ = t.Location() + } else { + DebugLog.Printf("Detect DB timezone: %s %s\n", tz, err.Error()) + } + } + + // get default engine from current database + row = al.DB.QueryRow("SELECT ENGINE, TRANSACTIONS FROM information_schema.engines WHERE SUPPORT = 'DEFAULT'") + var engine string + var tx bool + row.Scan(&engine, &tx) + + if engine != "" { + al.Engine = engine + } else { + al.Engine = "INNODB" + } + + case DRSqlite: + al.TZ = time.UTC + + case DRPostgres: + row := al.DB.QueryRow("SELECT current_setting('TIMEZONE')") + var tz string + row.Scan(&tz) + loc, err := time.LoadLocation(tz) + if err == nil { + al.TZ = loc + } else { + DebugLog.Printf("Detect DB timezone: %s %s\n", tz, err.Error()) + } + } +} + +func addAliasWthDB(aliasName, driverName string, db *sql.DB) (*alias, error) { + al := new(alias) + al.Name = aliasName + al.DriverName = driverName + al.DB = db + + if dr, ok := drivers[driverName]; ok { + al.DbBaser = dbBasers[dr] + al.Driver = dr + } else { + return nil, fmt.Errorf("driver name `%s` have not registered", driverName) + } + + err := db.Ping() + if err != nil { + return nil, fmt.Errorf("register db Ping `%s`, %s", aliasName, err.Error()) + } + + if dataBaseCache.add(aliasName, al) == false { + return nil, fmt.Errorf("DataBase alias name `%s` already registered, cannot reuse", aliasName) + } + + return al, nil +} + +// AddAliasWthDB add a aliasName for the drivename +func AddAliasWthDB(aliasName, driverName string, db *sql.DB) error { + _, err := addAliasWthDB(aliasName, driverName, db) + return err +} + +// RegisterDataBase Setting the database connect params. Use the database driver self dataSource args. +func RegisterDataBase(aliasName, driverName, dataSource string, params ...int) error { + var ( + err error + db *sql.DB + al *alias + ) + + db, err = sql.Open(driverName, dataSource) + if err != nil { + err = fmt.Errorf("register db `%s`, %s", aliasName, err.Error()) + goto end + } + + al, err = addAliasWthDB(aliasName, driverName, db) + if err != nil { + goto end + } + + al.DataSource = dataSource + + detectTZ(al) + + for i, v := range params { + switch i { + case 0: + SetMaxIdleConns(al.Name, v) + case 1: + SetMaxOpenConns(al.Name, v) + } + } + +end: + if err != nil { + if db != nil { + db.Close() + } + DebugLog.Println(err.Error()) + } + + return err +} + +// RegisterDriver Register a database driver use specify driver name, this can be definition the driver is which database type. +func RegisterDriver(driverName string, typ DriverType) error { + if t, ok := drivers[driverName]; ok == false { + drivers[driverName] = typ + } else { + if t != typ { + return fmt.Errorf("driverName `%s` db driver already registered and is other type\n", driverName) + } + } + return nil +} + +// SetDataBaseTZ Change the database default used timezone +func SetDataBaseTZ(aliasName string, tz *time.Location) error { + if al, ok := dataBaseCache.get(aliasName); ok { + al.TZ = tz + } else { + return fmt.Errorf("DataBase alias name `%s` not registered\n", aliasName) + } + return nil +} + +// SetMaxIdleConns Change the max idle conns for *sql.DB, use specify database alias name +func SetMaxIdleConns(aliasName string, maxIdleConns int) { + al := getDbAlias(aliasName) + al.MaxIdleConns = maxIdleConns + al.DB.SetMaxIdleConns(maxIdleConns) +} + +// SetMaxOpenConns Change the max open conns for *sql.DB, use specify database alias name +func SetMaxOpenConns(aliasName string, maxOpenConns int) { + al := getDbAlias(aliasName) + al.MaxOpenConns = maxOpenConns + // for tip go 1.2 + if fun := reflect.ValueOf(al.DB).MethodByName("SetMaxOpenConns"); fun.IsValid() { + fun.Call([]reflect.Value{reflect.ValueOf(maxOpenConns)}) + } +} + +// GetDB Get *sql.DB from registered database by db alias name. +// Use "default" as alias name if you not set. +func GetDB(aliasNames ...string) (*sql.DB, error) { + var name string + if len(aliasNames) > 0 { + name = aliasNames[0] + } else { + name = "default" + } + al, ok := dataBaseCache.get(name) + if ok { + return al.DB, nil + } + return nil, fmt.Errorf("DataBase of alias name `%s` not found\n", name) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_mysql.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_mysql.go new file mode 100644 index 000000000..10fe26571 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_mysql.go @@ -0,0 +1,104 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" +) + +// mysql operators. +var mysqlOperators = map[string]string{ + "exact": "= ?", + "iexact": "LIKE ?", + "contains": "LIKE BINARY ?", + "icontains": "LIKE ?", + // "regex": "REGEXP BINARY ?", + // "iregex": "REGEXP ?", + "gt": "> ?", + "gte": ">= ?", + "lt": "< ?", + "lte": "<= ?", + "eq": "= ?", + "ne": "!= ?", + "startswith": "LIKE BINARY ?", + "endswith": "LIKE BINARY ?", + "istartswith": "LIKE ?", + "iendswith": "LIKE ?", +} + +// mysql column field types. +var mysqlTypes = map[string]string{ + "auto": "AUTO_INCREMENT NOT NULL PRIMARY KEY", + "pk": "NOT NULL PRIMARY KEY", + "bool": "bool", + "string": "varchar(%d)", + "string-text": "longtext", + "time.Time-date": "date", + "time.Time": "datetime", + "int8": "tinyint", + "int16": "smallint", + "int32": "integer", + "int64": "bigint", + "uint8": "tinyint unsigned", + "uint16": "smallint unsigned", + "uint32": "integer unsigned", + "uint64": "bigint unsigned", + "float64": "double precision", + "float64-decimal": "numeric(%d, %d)", +} + +// mysql dbBaser implementation. +type dbBaseMysql struct { + dbBase +} + +var _ dbBaser = new(dbBaseMysql) + +// get mysql operator. +func (d *dbBaseMysql) OperatorSQL(operator string) string { + return mysqlOperators[operator] +} + +// get mysql table field types. +func (d *dbBaseMysql) DbTypes() map[string]string { + return mysqlTypes +} + +// show table sql for mysql. +func (d *dbBaseMysql) ShowTablesQuery() string { + return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema = DATABASE()" +} + +// show columns sql of table for mysql. +func (d *dbBaseMysql) ShowColumnsQuery(table string) string { + return fmt.Sprintf("SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE FROM information_schema.columns "+ + "WHERE table_schema = DATABASE() AND table_name = '%s'", table) +} + +// execute sql to check index exist. +func (d *dbBaseMysql) IndexExists(db dbQuerier, table string, name string) bool { + row := db.QueryRow("SELECT count(*) FROM information_schema.statistics "+ + "WHERE table_schema = DATABASE() AND table_name = ? AND index_name = ?", table, name) + var cnt int + row.Scan(&cnt) + return cnt > 0 +} + +// create new mysql dbBaser. +func newdbBaseMysql() dbBaser { + b := new(dbBaseMysql) + b.ins = b + return b +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_oracle.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_oracle.go new file mode 100644 index 000000000..1e385c9a4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_oracle.go @@ -0,0 +1,29 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +// oracle dbBaser +type dbBaseOracle struct { + dbBase +} + +var _ dbBaser = new(dbBaseOracle) + +// create oracle dbBaser. +func newdbBaseOracle() dbBaser { + b := new(dbBaseOracle) + b.ins = b + return b +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_postgres.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_postgres.go new file mode 100644 index 000000000..7dbef95a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_postgres.go @@ -0,0 +1,165 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "strconv" +) + +// postgresql operators. +var postgresOperators = map[string]string{ + "exact": "= ?", + "iexact": "= UPPER(?)", + "contains": "LIKE ?", + "icontains": "LIKE UPPER(?)", + "gt": "> ?", + "gte": ">= ?", + "lt": "< ?", + "lte": "<= ?", + "eq": "= ?", + "ne": "!= ?", + "startswith": "LIKE ?", + "endswith": "LIKE ?", + "istartswith": "LIKE UPPER(?)", + "iendswith": "LIKE UPPER(?)", +} + +// postgresql column field types. +var postgresTypes = map[string]string{ + "auto": "serial NOT NULL PRIMARY KEY", + "pk": "NOT NULL PRIMARY KEY", + "bool": "bool", + "string": "varchar(%d)", + "string-text": "text", + "time.Time-date": "date", + "time.Time": "timestamp with time zone", + "int8": `smallint CHECK("%COL%" >= -127 AND "%COL%" <= 128)`, + "int16": "smallint", + "int32": "integer", + "int64": "bigint", + "uint8": `smallint CHECK("%COL%" >= 0 AND "%COL%" <= 255)`, + "uint16": `integer CHECK("%COL%" >= 0)`, + "uint32": `bigint CHECK("%COL%" >= 0)`, + "uint64": `bigint CHECK("%COL%" >= 0)`, + "float64": "double precision", + "float64-decimal": "numeric(%d, %d)", +} + +// postgresql dbBaser. +type dbBasePostgres struct { + dbBase +} + +var _ dbBaser = new(dbBasePostgres) + +// get postgresql operator. +func (d *dbBasePostgres) OperatorSQL(operator string) string { + return postgresOperators[operator] +} + +// generate functioned sql string, such as contains(text). +func (d *dbBasePostgres) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) { + switch operator { + case "contains", "startswith", "endswith": + *leftCol = fmt.Sprintf("%s::text", *leftCol) + case "iexact", "icontains", "istartswith", "iendswith": + *leftCol = fmt.Sprintf("UPPER(%s::text)", *leftCol) + } +} + +// postgresql unsupports updating joined record. +func (d *dbBasePostgres) SupportUpdateJoin() bool { + return false +} + +func (d *dbBasePostgres) MaxLimit() uint64 { + return 0 +} + +// postgresql quote is ". +func (d *dbBasePostgres) TableQuote() string { + return `"` +} + +// postgresql value placeholder is $n. +// replace default ? to $n. +func (d *dbBasePostgres) ReplaceMarks(query *string) { + q := *query + num := 0 + for _, c := range q { + if c == '?' { + num++ + } + } + if num == 0 { + return + } + data := make([]byte, 0, len(q)+num) + num = 1 + for i := 0; i < len(q); i++ { + c := q[i] + if c == '?' { + data = append(data, '$') + data = append(data, []byte(strconv.Itoa(num))...) + num++ + } else { + data = append(data, c) + } + } + *query = string(data) +} + +// make returning sql support for postgresql. +func (d *dbBasePostgres) HasReturningID(mi *modelInfo, query *string) (has bool) { + if mi.fields.pk.auto { + if query != nil { + *query = fmt.Sprintf(`%s RETURNING "%s"`, *query, mi.fields.pk.column) + } + has = true + } + return +} + +// show table sql for postgresql. +func (d *dbBasePostgres) ShowTablesQuery() string { + return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema NOT IN ('pg_catalog', 'information_schema')" +} + +// show table columns sql for postgresql. +func (d *dbBasePostgres) ShowColumnsQuery(table string) string { + return fmt.Sprintf("SELECT column_name, data_type, is_nullable FROM information_schema.columns where table_schema NOT IN ('pg_catalog', 'information_schema') and table_name = '%s'", table) +} + +// get column types of postgresql. +func (d *dbBasePostgres) DbTypes() map[string]string { + return postgresTypes +} + +// check index exist in postgresql. +func (d *dbBasePostgres) IndexExists(db dbQuerier, table string, name string) bool { + query := fmt.Sprintf("SELECT COUNT(*) FROM pg_indexes WHERE tablename = '%s' AND indexname = '%s'", table, name) + row := db.QueryRow(query) + var cnt int + row.Scan(&cnt) + return cnt > 0 +} + +// create new postgresql dbBaser. +func newdbBasePostgres() dbBaser { + b := new(dbBasePostgres) + b.ins = b + return b +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_sqlite.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_sqlite.go new file mode 100644 index 000000000..a3cb69a79 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_sqlite.go @@ -0,0 +1,150 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "fmt" +) + +// sqlite operators. +var sqliteOperators = map[string]string{ + "exact": "= ?", + "iexact": "LIKE ? ESCAPE '\\'", + "contains": "LIKE ? ESCAPE '\\'", + "icontains": "LIKE ? ESCAPE '\\'", + "gt": "> ?", + "gte": ">= ?", + "lt": "< ?", + "lte": "<= ?", + "eq": "= ?", + "ne": "!= ?", + "startswith": "LIKE ? ESCAPE '\\'", + "endswith": "LIKE ? ESCAPE '\\'", + "istartswith": "LIKE ? ESCAPE '\\'", + "iendswith": "LIKE ? ESCAPE '\\'", +} + +// sqlite column types. +var sqliteTypes = map[string]string{ + "auto": "integer NOT NULL PRIMARY KEY AUTOINCREMENT", + "pk": "NOT NULL PRIMARY KEY", + "bool": "bool", + "string": "varchar(%d)", + "string-text": "text", + "time.Time-date": "date", + "time.Time": "datetime", + "int8": "tinyint", + "int16": "smallint", + "int32": "integer", + "int64": "bigint", + "uint8": "tinyint unsigned", + "uint16": "smallint unsigned", + "uint32": "integer unsigned", + "uint64": "bigint unsigned", + "float64": "real", + "float64-decimal": "decimal", +} + +// sqlite dbBaser. +type dbBaseSqlite struct { + dbBase +} + +var _ dbBaser = new(dbBaseSqlite) + +// get sqlite operator. +func (d *dbBaseSqlite) OperatorSQL(operator string) string { + return sqliteOperators[operator] +} + +// generate functioned sql for sqlite. +// only support DATE(text). +func (d *dbBaseSqlite) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) { + if fi.fieldType == TypeDateField { + *leftCol = fmt.Sprintf("DATE(%s)", *leftCol) + } +} + +// unable updating joined record in sqlite. +func (d *dbBaseSqlite) SupportUpdateJoin() bool { + return false +} + +// max int in sqlite. +func (d *dbBaseSqlite) MaxLimit() uint64 { + return 9223372036854775807 +} + +// get column types in sqlite. +func (d *dbBaseSqlite) DbTypes() map[string]string { + return sqliteTypes +} + +// get show tables sql in sqlite. +func (d *dbBaseSqlite) ShowTablesQuery() string { + return "SELECT name FROM sqlite_master WHERE type = 'table'" +} + +// get columns in sqlite. +func (d *dbBaseSqlite) GetColumns(db dbQuerier, table string) (map[string][3]string, error) { + query := d.ins.ShowColumnsQuery(table) + rows, err := db.Query(query) + if err != nil { + return nil, err + } + + columns := make(map[string][3]string) + for rows.Next() { + var tmp, name, typ, null sql.NullString + err := rows.Scan(&tmp, &name, &typ, &null, &tmp, &tmp) + if err != nil { + return nil, err + } + columns[name.String] = [3]string{name.String, typ.String, null.String} + } + + return columns, nil +} + +// get show columns sql in sqlite. +func (d *dbBaseSqlite) ShowColumnsQuery(table string) string { + return fmt.Sprintf("pragma table_info('%s')", table) +} + +// check index exist in sqlite. +func (d *dbBaseSqlite) IndexExists(db dbQuerier, table string, name string) bool { + query := fmt.Sprintf("PRAGMA index_list('%s')", table) + rows, err := db.Query(query) + if err != nil { + panic(err) + } + defer rows.Close() + for rows.Next() { + var tmp, index sql.NullString + rows.Scan(&tmp, &index, &tmp) + if name == index.String { + return true + } + } + return false +} + +// create new sqlite dbBaser. +func newdbBaseSqlite() dbBaser { + b := new(dbBaseSqlite) + b.ins = b + return b +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tables.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tables.go new file mode 100644 index 000000000..e4c74acee --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tables.go @@ -0,0 +1,476 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "strings" + "time" +) + +// table info struct. +type dbTable struct { + id int + index string + name string + names []string + sel bool + inner bool + mi *modelInfo + fi *fieldInfo + jtl *dbTable +} + +// tables collection struct, contains some tables. +type dbTables struct { + tablesM map[string]*dbTable + tables []*dbTable + mi *modelInfo + base dbBaser + skipEnd bool +} + +// set table info to collection. +// if not exist, create new. +func (t *dbTables) set(names []string, mi *modelInfo, fi *fieldInfo, inner bool) *dbTable { + name := strings.Join(names, ExprSep) + if j, ok := t.tablesM[name]; ok { + j.name = name + j.mi = mi + j.fi = fi + j.inner = inner + } else { + i := len(t.tables) + 1 + jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil} + t.tablesM[name] = jt + t.tables = append(t.tables, jt) + } + return t.tablesM[name] +} + +// add table info to collection. +func (t *dbTables) add(names []string, mi *modelInfo, fi *fieldInfo, inner bool) (*dbTable, bool) { + name := strings.Join(names, ExprSep) + if _, ok := t.tablesM[name]; ok == false { + i := len(t.tables) + 1 + jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil} + t.tablesM[name] = jt + t.tables = append(t.tables, jt) + return jt, true + } + return t.tablesM[name], false +} + +// get table info in collection. +func (t *dbTables) get(name string) (*dbTable, bool) { + j, ok := t.tablesM[name] + return j, ok +} + +// get related fields info in recursive depth loop. +// loop once, depth decreases one. +func (t *dbTables) loopDepth(depth int, prefix string, fi *fieldInfo, related []string) []string { + if depth < 0 || fi.fieldType == RelManyToMany { + return related + } + + if prefix == "" { + prefix = fi.name + } else { + prefix = prefix + ExprSep + fi.name + } + related = append(related, prefix) + + depth-- + for _, fi := range fi.relModelInfo.fields.fieldsRel { + related = t.loopDepth(depth, prefix, fi, related) + } + + return related +} + +// parse related fields. +func (t *dbTables) parseRelated(rels []string, depth int) { + + relsNum := len(rels) + related := make([]string, relsNum) + copy(related, rels) + + relDepth := depth + + if relsNum != 0 { + relDepth = 0 + } + + relDepth-- + for _, fi := range t.mi.fields.fieldsRel { + related = t.loopDepth(relDepth, "", fi, related) + } + + for i, s := range related { + var ( + exs = strings.Split(s, ExprSep) + names = make([]string, 0, len(exs)) + mmi = t.mi + cancel = true + jtl *dbTable + ) + + inner := true + + for _, ex := range exs { + if fi, ok := mmi.fields.GetByAny(ex); ok && fi.rel && fi.fieldType != RelManyToMany { + names = append(names, fi.name) + mmi = fi.relModelInfo + + if fi.null || t.skipEnd { + inner = false + } + + jt := t.set(names, mmi, fi, inner) + jt.jtl = jtl + + if fi.reverse { + cancel = false + } + + if cancel { + jt.sel = depth > 0 + + if i < relsNum { + jt.sel = true + } + } + + jtl = jt + + } else { + panic(fmt.Errorf("unknown model/table name `%s`", ex)) + } + } + } +} + +// generate join string. +func (t *dbTables) getJoinSQL() (join string) { + Q := t.base.TableQuote() + + for _, jt := range t.tables { + if jt.inner { + join += "INNER JOIN " + } else { + join += "LEFT OUTER JOIN " + } + var ( + table string + t1, t2 string + c1, c2 string + ) + t1 = "T0" + if jt.jtl != nil { + t1 = jt.jtl.index + } + t2 = jt.index + table = jt.mi.table + + switch { + case jt.fi.fieldType == RelManyToMany || jt.fi.fieldType == RelReverseMany || jt.fi.reverse && jt.fi.reverseFieldInfo.fieldType == RelManyToMany: + c1 = jt.fi.mi.fields.pk.column + for _, ffi := range jt.mi.fields.fieldsRel { + if jt.fi.mi == ffi.relModelInfo { + c2 = ffi.column + break + } + } + default: + c1 = jt.fi.column + c2 = jt.fi.relModelInfo.fields.pk.column + + if jt.fi.reverse { + c1 = jt.mi.fields.pk.column + c2 = jt.fi.reverseFieldInfo.column + } + } + + join += fmt.Sprintf("%s%s%s %s ON %s.%s%s%s = %s.%s%s%s ", Q, table, Q, t2, + t2, Q, c2, Q, t1, Q, c1, Q) + } + return +} + +// parse orm model struct field tag expression. +func (t *dbTables) parseExprs(mi *modelInfo, exprs []string) (index, name string, info *fieldInfo, success bool) { + var ( + jtl *dbTable + fi *fieldInfo + fiN *fieldInfo + mmi = mi + ) + + num := len(exprs) - 1 + var names []string + + inner := true + +loopFor: + for i, ex := range exprs { + + var ok, okN bool + + if fiN != nil { + fi = fiN + ok = true + fiN = nil + } + + if i == 0 { + fi, ok = mmi.fields.GetByAny(ex) + } + + _ = okN + + if ok { + + isRel := fi.rel || fi.reverse + + names = append(names, fi.name) + + switch { + case fi.rel: + mmi = fi.relModelInfo + if fi.fieldType == RelManyToMany { + mmi = fi.relThroughModelInfo + } + case fi.reverse: + mmi = fi.reverseFieldInfo.mi + } + + if i < num { + fiN, okN = mmi.fields.GetByAny(exprs[i+1]) + } + + if isRel && (fi.mi.isThrough == false || num != i) { + if fi.null || t.skipEnd { + inner = false + } + + if t.skipEnd && okN || !t.skipEnd { + if t.skipEnd && okN && fiN.pk { + goto loopEnd + } + + jt, _ := t.add(names, mmi, fi, inner) + jt.jtl = jtl + jtl = jt + } + + } + + if num != i { + continue + } + + loopEnd: + + if i == 0 || jtl == nil { + index = "T0" + } else { + index = jtl.index + } + + info = fi + + if jtl == nil { + name = fi.name + } else { + name = jtl.name + ExprSep + fi.name + } + + switch { + case fi.rel: + + case fi.reverse: + switch fi.reverseFieldInfo.fieldType { + case RelOneToOne, RelForeignKey: + index = jtl.index + info = fi.reverseFieldInfo.mi.fields.pk + name = info.name + } + } + + break loopFor + + } else { + index = "" + name = "" + info = nil + success = false + return + } + } + + success = index != "" && info != nil + return +} + +// generate condition sql. +func (t *dbTables) getCondSQL(cond *Condition, sub bool, tz *time.Location) (where string, params []interface{}) { + if cond == nil || cond.IsEmpty() { + return + } + + Q := t.base.TableQuote() + + mi := t.mi + + for i, p := range cond.params { + if i > 0 { + if p.isOr { + where += "OR " + } else { + where += "AND " + } + } + if p.isNot { + where += "NOT " + } + if p.isCond { + w, ps := t.getCondSQL(p.cond, true, tz) + if w != "" { + w = fmt.Sprintf("( %s) ", w) + } + where += w + params = append(params, ps...) + } else { + exprs := p.exprs + + num := len(exprs) - 1 + operator := "" + if operators[exprs[num]] { + operator = exprs[num] + exprs = exprs[:num] + } + + index, _, fi, suc := t.parseExprs(mi, exprs) + if suc == false { + panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(p.exprs, ExprSep))) + } + + if operator == "" { + operator = "exact" + } + + operSQL, args := t.base.GenerateOperatorSQL(mi, fi, operator, p.args, tz) + + leftCol := fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q) + t.base.GenerateOperatorLeftCol(fi, operator, &leftCol) + + where += fmt.Sprintf("%s %s ", leftCol, operSQL) + params = append(params, args...) + + } + } + + if sub == false && where != "" { + where = "WHERE " + where + } + + return +} + +// generate group sql. +func (t *dbTables) getGroupSQL(groups []string) (groupSQL string) { + if len(groups) == 0 { + return + } + + Q := t.base.TableQuote() + + groupSqls := make([]string, 0, len(groups)) + for _, group := range groups { + exprs := strings.Split(group, ExprSep) + + index, _, fi, suc := t.parseExprs(t.mi, exprs) + if suc == false { + panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep))) + } + + groupSqls = append(groupSqls, fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q)) + } + + groupSQL = fmt.Sprintf("GROUP BY %s ", strings.Join(groupSqls, ", ")) + return +} + +// generate order sql. +func (t *dbTables) getOrderSQL(orders []string) (orderSQL string) { + if len(orders) == 0 { + return + } + + Q := t.base.TableQuote() + + orderSqls := make([]string, 0, len(orders)) + for _, order := range orders { + asc := "ASC" + if order[0] == '-' { + asc = "DESC" + order = order[1:] + } + exprs := strings.Split(order, ExprSep) + + index, _, fi, suc := t.parseExprs(t.mi, exprs) + if suc == false { + panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep))) + } + + orderSqls = append(orderSqls, fmt.Sprintf("%s.%s%s%s %s", index, Q, fi.column, Q, asc)) + } + + orderSQL = fmt.Sprintf("ORDER BY %s ", strings.Join(orderSqls, ", ")) + return +} + +// generate limit sql. +func (t *dbTables) getLimitSQL(mi *modelInfo, offset int64, limit int64) (limits string) { + if limit == 0 { + limit = int64(DefaultRowsLimit) + } + if limit < 0 { + // no limit + if offset > 0 { + maxLimit := t.base.MaxLimit() + if maxLimit == 0 { + limits = fmt.Sprintf("OFFSET %d", offset) + } else { + limits = fmt.Sprintf("LIMIT %d OFFSET %d", maxLimit, offset) + } + } + } else if offset <= 0 { + limits = fmt.Sprintf("LIMIT %d", limit) + } else { + limits = fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset) + } + return +} + +// crete new tables collection. +func newDbTables(mi *modelInfo, base dbBaser) *dbTables { + tables := &dbTables{} + tables.tablesM = make(map[string]*dbTable) + tables.mi = mi + tables.base = base + return tables +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tidb.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tidb.go new file mode 100644 index 000000000..6020a488f --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_tidb.go @@ -0,0 +1,63 @@ +// Copyright 2015 TiDB Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" +) + +// mysql dbBaser implementation. +type dbBaseTidb struct { + dbBase +} + +var _ dbBaser = new(dbBaseTidb) + +// get mysql operator. +func (d *dbBaseTidb) OperatorSQL(operator string) string { + return mysqlOperators[operator] +} + +// get mysql table field types. +func (d *dbBaseTidb) DbTypes() map[string]string { + return mysqlTypes +} + +// show table sql for mysql. +func (d *dbBaseTidb) ShowTablesQuery() string { + return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema = DATABASE()" +} + +// show columns sql of table for mysql. +func (d *dbBaseTidb) ShowColumnsQuery(table string) string { + return fmt.Sprintf("SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE FROM information_schema.columns "+ + "WHERE table_schema = DATABASE() AND table_name = '%s'", table) +} + +// execute sql to check index exist. +func (d *dbBaseTidb) IndexExists(db dbQuerier, table string, name string) bool { + row := db.QueryRow("SELECT count(*) FROM information_schema.statistics "+ + "WHERE table_schema = DATABASE() AND table_name = ? AND index_name = ?", table, name) + var cnt int + row.Scan(&cnt) + return cnt > 0 +} + +// create new mysql dbBaser. +func newdbBaseTidb() dbBaser { + b := new(dbBaseTidb) + b.ins = b + return b +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_utils.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_utils.go new file mode 100644 index 000000000..ae9b1625d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/db_utils.go @@ -0,0 +1,163 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "reflect" + "time" +) + +// get table alias. +func getDbAlias(name string) *alias { + if al, ok := dataBaseCache.get(name); ok { + return al + } + panic(fmt.Errorf("unknown DataBase alias name %s", name)) +} + +// get pk column info. +func getExistPk(mi *modelInfo, ind reflect.Value) (column string, value interface{}, exist bool) { + fi := mi.fields.pk + + v := ind.Field(fi.fieldIndex) + if fi.fieldType&IsPostiveIntegerField > 0 { + vu := v.Uint() + exist = vu > 0 + value = vu + } else if fi.fieldType&IsIntegerField > 0 { + vu := v.Int() + exist = vu > 0 + value = vu + } else { + vu := v.String() + exist = vu != "" + value = vu + } + + column = fi.column + return +} + +// get fields description as flatted string. +func getFlatParams(fi *fieldInfo, args []interface{}, tz *time.Location) (params []interface{}) { + +outFor: + for _, arg := range args { + val := reflect.ValueOf(arg) + + if arg == nil { + params = append(params, arg) + continue + } + + kind := val.Kind() + if kind == reflect.Ptr { + val = val.Elem() + kind = val.Kind() + arg = val.Interface() + } + + switch kind { + case reflect.String: + v := val.String() + if fi != nil { + if fi.fieldType == TypeDateField || fi.fieldType == TypeDateTimeField { + var t time.Time + var err error + if len(v) >= 19 { + s := v[:19] + t, err = time.ParseInLocation(formatDateTime, s, DefaultTimeLoc) + } else { + s := v + if len(v) > 10 { + s = v[:10] + } + t, err = time.ParseInLocation(formatDate, s, tz) + } + if err == nil { + if fi.fieldType == TypeDateField { + v = t.In(tz).Format(formatDate) + } else { + v = t.In(tz).Format(formatDateTime) + } + } + } + } + arg = v + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + arg = val.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + arg = val.Uint() + case reflect.Float32: + arg, _ = StrTo(ToStr(arg)).Float64() + case reflect.Float64: + arg = val.Float() + case reflect.Bool: + arg = val.Bool() + case reflect.Slice, reflect.Array: + if _, ok := arg.([]byte); ok { + continue outFor + } + + var args []interface{} + for i := 0; i < val.Len(); i++ { + v := val.Index(i) + + var vu interface{} + if v.CanInterface() { + vu = v.Interface() + } + + if vu == nil { + continue + } + + args = append(args, vu) + } + + if len(args) > 0 { + p := getFlatParams(fi, args, tz) + params = append(params, p...) + } + continue outFor + case reflect.Struct: + if v, ok := arg.(time.Time); ok { + if fi != nil && fi.fieldType == TypeDateField { + arg = v.In(tz).Format(formatDate) + } else { + arg = v.In(tz).Format(formatDateTime) + } + } else { + typ := val.Type() + name := getFullName(typ) + var value interface{} + if mmi, ok := modelCache.getByFN(name); ok { + if _, vu, exist := getExistPk(mmi, val); exist { + value = vu + } + } + arg = value + + if arg == nil { + panic(fmt.Errorf("need a valid args value, unknown table or value `%s`", name)) + } + } + } + + params = append(params, arg) + } + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/models.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models.go new file mode 100644 index 000000000..faf551be2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models.go @@ -0,0 +1,120 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "sync" +) + +const ( + odCascade = "cascade" + odSetNULL = "set_null" + odSetDefault = "set_default" + odDoNothing = "do_nothing" + defaultStructTagName = "orm" + defaultStructTagDelim = ";" +) + +var ( + modelCache = &_modelCache{ + cache: make(map[string]*modelInfo), + cacheByFN: make(map[string]*modelInfo), + } + supportTag = map[string]int{ + "-": 1, + "null": 1, + "index": 1, + "unique": 1, + "pk": 1, + "auto": 1, + "auto_now": 1, + "auto_now_add": 1, + "size": 2, + "column": 2, + "default": 2, + "rel": 2, + "reverse": 2, + "rel_table": 2, + "rel_through": 2, + "digits": 2, + "decimals": 2, + "on_delete": 2, + "type": 2, + } +) + +// model info collection +type _modelCache struct { + sync.RWMutex + orders []string + cache map[string]*modelInfo + cacheByFN map[string]*modelInfo + done bool +} + +// get all model info +func (mc *_modelCache) all() map[string]*modelInfo { + m := make(map[string]*modelInfo, len(mc.cache)) + for k, v := range mc.cache { + m[k] = v + } + return m +} + +// get orderd model info +func (mc *_modelCache) allOrdered() []*modelInfo { + m := make([]*modelInfo, 0, len(mc.orders)) + for _, table := range mc.orders { + m = append(m, mc.cache[table]) + } + return m +} + +// get model info by table name +func (mc *_modelCache) get(table string) (mi *modelInfo, ok bool) { + mi, ok = mc.cache[table] + return +} + +// get model info by field name +func (mc *_modelCache) getByFN(name string) (mi *modelInfo, ok bool) { + mi, ok = mc.cacheByFN[name] + return +} + +// set model info to collection +func (mc *_modelCache) set(table string, mi *modelInfo) *modelInfo { + mii := mc.cache[table] + mc.cache[table] = mi + mc.cacheByFN[mi.fullName] = mi + if mii == nil { + mc.orders = append(mc.orders, table) + } + return mii +} + +// clean all model info. +func (mc *_modelCache) clean() { + mc.orders = make([]string, 0) + mc.cache = make(map[string]*modelInfo) + mc.cacheByFN = make(map[string]*modelInfo) + mc.done = false +} + +// ResetModelCache Clean model cache. Then you can re-RegisterModel. +// Common use this api for test case. +func ResetModelCache() { + modelCache.clean() +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_boot.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_boot.go new file mode 100644 index 000000000..3690557b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_boot.go @@ -0,0 +1,328 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" +) + +// register models. +// prefix means table name prefix. +func registerModel(prefix string, model interface{}) { + val := reflect.ValueOf(model) + ind := reflect.Indirect(val) + typ := ind.Type() + + if val.Kind() != reflect.Ptr { + panic(fmt.Errorf(" cannot use non-ptr model struct `%s`", getFullName(typ))) + } + + table := getTableName(val) + + if prefix != "" { + table = prefix + table + } + + name := getFullName(typ) + if _, ok := modelCache.getByFN(name); ok { + fmt.Printf(" model `%s` repeat register, must be unique\n", name) + os.Exit(2) + } + + if _, ok := modelCache.get(table); ok { + fmt.Printf(" table name `%s` repeat register, must be unique\n", table) + os.Exit(2) + } + + info := newModelInfo(val) + if info.fields.pk == nil { + outFor: + for _, fi := range info.fields.fieldsDB { + if strings.ToLower(fi.name) == "id" { + switch fi.addrValue.Elem().Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64: + fi.auto = true + fi.pk = true + info.fields.pk = fi + break outFor + } + } + } + + if info.fields.pk == nil { + fmt.Printf(" `%s` need a primary key field\n", name) + os.Exit(2) + } + + } + + info.table = table + info.pkg = typ.PkgPath() + info.model = model + info.manual = true + + modelCache.set(table, info) +} + +// boostrap models +func bootStrap() { + if modelCache.done { + return + } + + var ( + err error + models map[string]*modelInfo + ) + + if dataBaseCache.getDefault() == nil { + err = fmt.Errorf("must have one register DataBase alias named `default`") + goto end + } + + models = modelCache.all() + for _, mi := range models { + for _, fi := range mi.fields.columns { + if fi.rel || fi.reverse { + elm := fi.addrValue.Type().Elem() + switch fi.fieldType { + case RelReverseMany, RelManyToMany: + elm = elm.Elem() + } + + name := getFullName(elm) + mii, ok := modelCache.getByFN(name) + if ok == false || mii.pkg != elm.PkgPath() { + err = fmt.Errorf("can not found rel in field `%s`, `%s` may be miss register", fi.fullName, elm.String()) + goto end + } + fi.relModelInfo = mii + + switch fi.fieldType { + case RelManyToMany: + if fi.relThrough != "" { + msg := fmt.Sprintf("field `%s` wrong rel_through value `%s`", fi.fullName, fi.relThrough) + if i := strings.LastIndex(fi.relThrough, "."); i != -1 && len(fi.relThrough) > (i+1) { + pn := fi.relThrough[:i] + rmi, ok := modelCache.getByFN(fi.relThrough) + if ok == false || pn != rmi.pkg { + err = errors.New(msg + " cannot find table") + goto end + } + + fi.relThroughModelInfo = rmi + fi.relTable = rmi.table + + } else { + err = errors.New(msg) + goto end + } + } else { + i := newM2MModelInfo(mi, mii) + if fi.relTable != "" { + i.table = fi.relTable + } + + if v := modelCache.set(i.table, i); v != nil { + err = fmt.Errorf("the rel table name `%s` already registered, cannot be use, please change one", fi.relTable) + goto end + } + fi.relTable = i.table + fi.relThroughModelInfo = i + } + + fi.relThroughModelInfo.isThrough = true + } + } + } + } + + models = modelCache.all() + for _, mi := range models { + for _, fi := range mi.fields.fieldsRel { + switch fi.fieldType { + case RelForeignKey, RelOneToOne, RelManyToMany: + inModel := false + for _, ffi := range fi.relModelInfo.fields.fieldsReverse { + if ffi.relModelInfo == mi { + inModel = true + break + } + } + + if inModel == false { + rmi := fi.relModelInfo + ffi := new(fieldInfo) + ffi.name = mi.name + ffi.column = ffi.name + ffi.fullName = rmi.fullName + "." + ffi.name + ffi.reverse = true + ffi.relModelInfo = mi + ffi.mi = rmi + if fi.fieldType == RelOneToOne { + ffi.fieldType = RelReverseOne + } else { + ffi.fieldType = RelReverseMany + } + if rmi.fields.Add(ffi) == false { + added := false + for cnt := 0; cnt < 5; cnt++ { + ffi.name = fmt.Sprintf("%s%d", mi.name, cnt) + ffi.column = ffi.name + ffi.fullName = rmi.fullName + "." + ffi.name + if added = rmi.fields.Add(ffi); added { + break + } + } + if added == false { + panic(fmt.Errorf("cannot generate auto reverse field info `%s` to `%s`", fi.fullName, ffi.fullName)) + } + } + } + } + } + } + + models = modelCache.all() + for _, mi := range models { + for _, fi := range mi.fields.fieldsRel { + switch fi.fieldType { + case RelManyToMany: + for _, ffi := range fi.relThroughModelInfo.fields.fieldsRel { + switch ffi.fieldType { + case RelOneToOne, RelForeignKey: + if ffi.relModelInfo == fi.relModelInfo { + fi.reverseFieldInfoTwo = ffi + } + if ffi.relModelInfo == mi { + fi.reverseField = ffi.name + fi.reverseFieldInfo = ffi + } + } + } + + if fi.reverseFieldInfoTwo == nil { + err = fmt.Errorf("can not find m2m field for m2m model `%s`, ensure your m2m model defined correct", + fi.relThroughModelInfo.fullName) + goto end + } + } + } + } + + models = modelCache.all() + for _, mi := range models { + for _, fi := range mi.fields.fieldsReverse { + switch fi.fieldType { + case RelReverseOne: + found := false + mForA: + for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelOneToOne] { + if ffi.relModelInfo == mi { + found = true + fi.reverseField = ffi.name + fi.reverseFieldInfo = ffi + + ffi.reverseField = fi.name + ffi.reverseFieldInfo = fi + break mForA + } + } + if found == false { + err = fmt.Errorf("reverse field `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName) + goto end + } + case RelReverseMany: + found := false + mForB: + for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelForeignKey] { + if ffi.relModelInfo == mi { + found = true + fi.reverseField = ffi.name + fi.reverseFieldInfo = ffi + + ffi.reverseField = fi.name + ffi.reverseFieldInfo = fi + + break mForB + } + } + if found == false { + mForC: + for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelManyToMany] { + conditions := fi.relThrough != "" && fi.relThrough == ffi.relThrough || + fi.relTable != "" && fi.relTable == ffi.relTable || + fi.relThrough == "" && fi.relTable == "" + if ffi.relModelInfo == mi && conditions { + found = true + + fi.reverseField = ffi.reverseFieldInfoTwo.name + fi.reverseFieldInfo = ffi.reverseFieldInfoTwo + fi.relThroughModelInfo = ffi.relThroughModelInfo + fi.reverseFieldInfoTwo = ffi.reverseFieldInfo + fi.reverseFieldInfoM2M = ffi + ffi.reverseFieldInfoM2M = fi + + break mForC + } + } + } + if found == false { + err = fmt.Errorf("reverse field for `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName) + goto end + } + } + } + } + +end: + if err != nil { + fmt.Println(err) + os.Exit(2) + } +} + +// RegisterModel register models +func RegisterModel(models ...interface{}) { + RegisterModelWithPrefix("", models...) +} + +// RegisterModelWithPrefix register models with a prefix +func RegisterModelWithPrefix(prefix string, models ...interface{}) { + if modelCache.done { + panic(fmt.Errorf("RegisterModel must be run before BootStrap")) + } + + for _, model := range models { + registerModel(prefix, model) + } +} + +// BootStrap bootrap models. +// make all model parsed and can not add more models +func BootStrap() { + if modelCache.done { + return + } + + modelCache.Lock() + defer modelCache.Unlock() + bootStrap() + modelCache.done = true +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_fields.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_fields.go new file mode 100644 index 000000000..a8cf8e4f6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_fields.go @@ -0,0 +1,629 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "strconv" + "time" +) + +// Define the Type enum +const ( + TypeBooleanField = 1 << iota + TypeCharField + TypeTextField + TypeDateField + TypeDateTimeField + TypeBitField + TypeSmallIntegerField + TypeIntegerField + TypeBigIntegerField + TypePositiveBitField + TypePositiveSmallIntegerField + TypePositiveIntegerField + TypePositiveBigIntegerField + TypeFloatField + TypeDecimalField + RelForeignKey + RelOneToOne + RelManyToMany + RelReverseOne + RelReverseMany +) + +// Define some logic enum +const ( + IsIntegerField = ^-TypePositiveBigIntegerField >> 4 << 5 + IsPostiveIntegerField = ^-TypePositiveBigIntegerField >> 8 << 9 + IsRelField = ^-RelReverseMany >> 14 << 15 + IsFieldType = ^-RelReverseMany<<1 + 1 +) + +// BooleanField A true/false field. +type BooleanField bool + +// Value return the BooleanField +func (e BooleanField) Value() bool { + return bool(e) +} + +// Set will set the BooleanField +func (e *BooleanField) Set(d bool) { + *e = BooleanField(d) +} + +// String format the Bool to string +func (e *BooleanField) String() string { + return strconv.FormatBool(e.Value()) +} + +// FieldType return BooleanField the type +func (e *BooleanField) FieldType() int { + return TypeBooleanField +} + +// SetRaw set the interface to bool +func (e *BooleanField) SetRaw(value interface{}) error { + switch d := value.(type) { + case bool: + e.Set(d) + case string: + v, err := StrTo(d).Bool() + if err != nil { + e.Set(v) + } + return err + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return the current value +func (e *BooleanField) RawValue() interface{} { + return e.Value() +} + +// verify the BooleanField implement the Fielder interface +var _ Fielder = new(BooleanField) + +// CharField A string field +// required values tag: size +// The size is enforced at the database level and in models’s validation. +// eg: `orm:"size(120)"` +type CharField string + +// Value return the CharField's Value +func (e CharField) Value() string { + return string(e) +} + +// Set CharField value +func (e *CharField) Set(d string) { + *e = CharField(d) +} + +// String return the CharField +func (e *CharField) String() string { + return e.Value() +} + +// FieldType return the enum type +func (e *CharField) FieldType() int { + return TypeCharField +} + +// SetRaw set the interface to string +func (e *CharField) SetRaw(value interface{}) error { + switch d := value.(type) { + case string: + e.Set(d) + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return the CharField value +func (e *CharField) RawValue() interface{} { + return e.Value() +} + +// verify CharField implement Fielder +var _ Fielder = new(CharField) + +// DateField A date, represented in go by a time.Time instance. +// only date values like 2006-01-02 +// Has a few extra, optional attr tag: +// +// auto_now: +// Automatically set the field to now every time the object is saved. Useful for “last-modified” timestamps. +// Note that the current date is always used; it’s not just a default value that you can override. +// +// auto_now_add: +// Automatically set the field to now when the object is first created. Useful for creation of timestamps. +// Note that the current date is always used; it’s not just a default value that you can override. +// +// eg: `orm:"auto_now"` or `orm:"auto_now_add"` +type DateField time.Time + +// Value return the time.Time +func (e DateField) Value() time.Time { + return time.Time(e) +} + +// Set set the DateField's value +func (e *DateField) Set(d time.Time) { + *e = DateField(d) +} + +// String convert datatime to string +func (e *DateField) String() string { + return e.Value().String() +} + +// FieldType return enum type Date +func (e *DateField) FieldType() int { + return TypeDateField +} + +// SetRaw convert the interface to time.Time. Allow string and time.Time +func (e *DateField) SetRaw(value interface{}) error { + switch d := value.(type) { + case time.Time: + e.Set(d) + case string: + v, err := timeParse(d, formatDate) + if err != nil { + e.Set(v) + } + return err + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return Date value +func (e *DateField) RawValue() interface{} { + return e.Value() +} + +// verify DateField implement fielder interface +var _ Fielder = new(DateField) + +// DateTimeField A date, represented in go by a time.Time instance. +// datetime values like 2006-01-02 15:04:05 +// Takes the same extra arguments as DateField. +type DateTimeField time.Time + +// Value return the datatime value +func (e DateTimeField) Value() time.Time { + return time.Time(e) +} + +// Set set the time.Time to datatime +func (e *DateTimeField) Set(d time.Time) { + *e = DateTimeField(d) +} + +// String return the time's String +func (e *DateTimeField) String() string { + return e.Value().String() +} + +// FieldType return the enum TypeDateTimeField +func (e *DateTimeField) FieldType() int { + return TypeDateTimeField +} + +// SetRaw convert the string or time.Time to DateTimeField +func (e *DateTimeField) SetRaw(value interface{}) error { + switch d := value.(type) { + case time.Time: + e.Set(d) + case string: + v, err := timeParse(d, formatDateTime) + if err != nil { + e.Set(v) + } + return err + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return the datatime value +func (e *DateTimeField) RawValue() interface{} { + return e.Value() +} + +// verify datatime implement fielder +var _ Fielder = new(DateTimeField) + +// FloatField A floating-point number represented in go by a float32 value. +type FloatField float64 + +// Value return the FloatField value +func (e FloatField) Value() float64 { + return float64(e) +} + +// Set the Float64 +func (e *FloatField) Set(d float64) { + *e = FloatField(d) +} + +// String return the string +func (e *FloatField) String() string { + return ToStr(e.Value(), -1, 32) +} + +// FieldType return the enum type +func (e *FloatField) FieldType() int { + return TypeFloatField +} + +// SetRaw converter interface Float64 float32 or string to FloatField +func (e *FloatField) SetRaw(value interface{}) error { + switch d := value.(type) { + case float32: + e.Set(float64(d)) + case float64: + e.Set(d) + case string: + v, err := StrTo(d).Float64() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return the FloatField value +func (e *FloatField) RawValue() interface{} { + return e.Value() +} + +// verify FloatField implement Fielder +var _ Fielder = new(FloatField) + +// SmallIntegerField -32768 to 32767 +type SmallIntegerField int16 + +// Value return int16 value +func (e SmallIntegerField) Value() int16 { + return int16(e) +} + +// Set the SmallIntegerField value +func (e *SmallIntegerField) Set(d int16) { + *e = SmallIntegerField(d) +} + +// String convert smallint to string +func (e *SmallIntegerField) String() string { + return ToStr(e.Value()) +} + +// FieldType return enum type SmallIntegerField +func (e *SmallIntegerField) FieldType() int { + return TypeSmallIntegerField +} + +// SetRaw convert interface int16/string to int16 +func (e *SmallIntegerField) SetRaw(value interface{}) error { + switch d := value.(type) { + case int16: + e.Set(d) + case string: + v, err := StrTo(d).Int16() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return smallint value +func (e *SmallIntegerField) RawValue() interface{} { + return e.Value() +} + +// verify SmallIntegerField implement Fielder +var _ Fielder = new(SmallIntegerField) + +// IntegerField -2147483648 to 2147483647 +type IntegerField int32 + +// Value return the int32 +func (e IntegerField) Value() int32 { + return int32(e) +} + +// Set IntegerField value +func (e *IntegerField) Set(d int32) { + *e = IntegerField(d) +} + +// String convert Int32 to string +func (e *IntegerField) String() string { + return ToStr(e.Value()) +} + +// FieldType return the enum type +func (e *IntegerField) FieldType() int { + return TypeIntegerField +} + +// SetRaw convert interface int32/string to int32 +func (e *IntegerField) SetRaw(value interface{}) error { + switch d := value.(type) { + case int32: + e.Set(d) + case string: + v, err := StrTo(d).Int32() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return IntegerField value +func (e *IntegerField) RawValue() interface{} { + return e.Value() +} + +// verify IntegerField implement Fielder +var _ Fielder = new(IntegerField) + +// BigIntegerField -9223372036854775808 to 9223372036854775807. +type BigIntegerField int64 + +// Value return int64 +func (e BigIntegerField) Value() int64 { + return int64(e) +} + +// Set the BigIntegerField value +func (e *BigIntegerField) Set(d int64) { + *e = BigIntegerField(d) +} + +// String convert BigIntegerField to string +func (e *BigIntegerField) String() string { + return ToStr(e.Value()) +} + +// FieldType return enum type +func (e *BigIntegerField) FieldType() int { + return TypeBigIntegerField +} + +// SetRaw convert interface int64/string to int64 +func (e *BigIntegerField) SetRaw(value interface{}) error { + switch d := value.(type) { + case int64: + e.Set(d) + case string: + v, err := StrTo(d).Int64() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return BigIntegerField value +func (e *BigIntegerField) RawValue() interface{} { + return e.Value() +} + +// verify BigIntegerField implement Fielder +var _ Fielder = new(BigIntegerField) + +// PositiveSmallIntegerField 0 to 65535 +type PositiveSmallIntegerField uint16 + +// Value return uint16 +func (e PositiveSmallIntegerField) Value() uint16 { + return uint16(e) +} + +// Set PositiveSmallIntegerField value +func (e *PositiveSmallIntegerField) Set(d uint16) { + *e = PositiveSmallIntegerField(d) +} + +// String convert uint16 to string +func (e *PositiveSmallIntegerField) String() string { + return ToStr(e.Value()) +} + +// FieldType return enum type +func (e *PositiveSmallIntegerField) FieldType() int { + return TypePositiveSmallIntegerField +} + +// SetRaw convert Interface uint16/string to uint16 +func (e *PositiveSmallIntegerField) SetRaw(value interface{}) error { + switch d := value.(type) { + case uint16: + e.Set(d) + case string: + v, err := StrTo(d).Uint16() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue returns PositiveSmallIntegerField value +func (e *PositiveSmallIntegerField) RawValue() interface{} { + return e.Value() +} + +// verify PositiveSmallIntegerField implement Fielder +var _ Fielder = new(PositiveSmallIntegerField) + +// PositiveIntegerField 0 to 4294967295 +type PositiveIntegerField uint32 + +// Value return PositiveIntegerField value. Uint32 +func (e PositiveIntegerField) Value() uint32 { + return uint32(e) +} + +// Set the PositiveIntegerField value +func (e *PositiveIntegerField) Set(d uint32) { + *e = PositiveIntegerField(d) +} + +// String convert PositiveIntegerField to string +func (e *PositiveIntegerField) String() string { + return ToStr(e.Value()) +} + +// FieldType return enum type +func (e *PositiveIntegerField) FieldType() int { + return TypePositiveIntegerField +} + +// SetRaw convert interface uint32/string to Uint32 +func (e *PositiveIntegerField) SetRaw(value interface{}) error { + switch d := value.(type) { + case uint32: + e.Set(d) + case string: + v, err := StrTo(d).Uint32() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return the PositiveIntegerField Value +func (e *PositiveIntegerField) RawValue() interface{} { + return e.Value() +} + +// verify PositiveIntegerField implement Fielder +var _ Fielder = new(PositiveIntegerField) + +// PositiveBigIntegerField 0 to 18446744073709551615 +type PositiveBigIntegerField uint64 + +// Value return uint64 +func (e PositiveBigIntegerField) Value() uint64 { + return uint64(e) +} + +// Set PositiveBigIntegerField value +func (e *PositiveBigIntegerField) Set(d uint64) { + *e = PositiveBigIntegerField(d) +} + +// String convert PositiveBigIntegerField to string +func (e *PositiveBigIntegerField) String() string { + return ToStr(e.Value()) +} + +// FieldType return enum type +func (e *PositiveBigIntegerField) FieldType() int { + return TypePositiveIntegerField +} + +// SetRaw convert interface uint64/string to Uint64 +func (e *PositiveBigIntegerField) SetRaw(value interface{}) error { + switch d := value.(type) { + case uint64: + e.Set(d) + case string: + v, err := StrTo(d).Uint64() + if err != nil { + e.Set(v) + } + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return PositiveBigIntegerField value +func (e *PositiveBigIntegerField) RawValue() interface{} { + return e.Value() +} + +// verify PositiveBigIntegerField implement Fielder +var _ Fielder = new(PositiveBigIntegerField) + +// TextField A large text field. +type TextField string + +// Value return TextField value +func (e TextField) Value() string { + return string(e) +} + +// Set the TextField value +func (e *TextField) Set(d string) { + *e = TextField(d) +} + +// String convert TextField to string +func (e *TextField) String() string { + return e.Value() +} + +// FieldType return enum type +func (e *TextField) FieldType() int { + return TypeTextField +} + +// SetRaw convert interface string to string +func (e *TextField) SetRaw(value interface{}) error { + switch d := value.(type) { + case string: + e.Set(d) + default: + return fmt.Errorf(" unknown value `%s`", value) + } + return nil +} + +// RawValue return TextField value +func (e *TextField) RawValue() interface{} { + return e.Value() +} + +// verify TextField implement Fielder +var _ Fielder = new(TextField) diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_f.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_f.go new file mode 100644 index 000000000..14e1f2c62 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_f.go @@ -0,0 +1,452 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var errSkipField = errors.New("skip field") + +// field info collection +type fields struct { + pk *fieldInfo + columns map[string]*fieldInfo + fields map[string]*fieldInfo + fieldsLow map[string]*fieldInfo + fieldsByType map[int][]*fieldInfo + fieldsRel []*fieldInfo + fieldsReverse []*fieldInfo + fieldsDB []*fieldInfo + rels []*fieldInfo + orders []string + dbcols []string +} + +// add field info +func (f *fields) Add(fi *fieldInfo) (added bool) { + if f.fields[fi.name] == nil && f.columns[fi.column] == nil { + f.columns[fi.column] = fi + f.fields[fi.name] = fi + f.fieldsLow[strings.ToLower(fi.name)] = fi + } else { + return + } + if _, ok := f.fieldsByType[fi.fieldType]; ok == false { + f.fieldsByType[fi.fieldType] = make([]*fieldInfo, 0) + } + f.fieldsByType[fi.fieldType] = append(f.fieldsByType[fi.fieldType], fi) + f.orders = append(f.orders, fi.column) + if fi.dbcol { + f.dbcols = append(f.dbcols, fi.column) + f.fieldsDB = append(f.fieldsDB, fi) + } + if fi.rel { + f.fieldsRel = append(f.fieldsRel, fi) + } + if fi.reverse { + f.fieldsReverse = append(f.fieldsReverse, fi) + } + return true +} + +// get field info by name +func (f *fields) GetByName(name string) *fieldInfo { + return f.fields[name] +} + +// get field info by column name +func (f *fields) GetByColumn(column string) *fieldInfo { + return f.columns[column] +} + +// get field info by string, name is prior +func (f *fields) GetByAny(name string) (*fieldInfo, bool) { + if fi, ok := f.fields[name]; ok { + return fi, ok + } + if fi, ok := f.fieldsLow[strings.ToLower(name)]; ok { + return fi, ok + } + if fi, ok := f.columns[name]; ok { + return fi, ok + } + return nil, false +} + +// create new field info collection +func newFields() *fields { + f := new(fields) + f.fields = make(map[string]*fieldInfo) + f.fieldsLow = make(map[string]*fieldInfo) + f.columns = make(map[string]*fieldInfo) + f.fieldsByType = make(map[int][]*fieldInfo) + return f +} + +// single field info +type fieldInfo struct { + mi *modelInfo + fieldIndex int + fieldType int + dbcol bool + inModel bool + name string + fullName string + column string + addrValue reflect.Value + sf reflect.StructField + auto bool + pk bool + null bool + index bool + unique bool + colDefault bool + initial StrTo + size int + autoNow bool + autoNowAdd bool + rel bool + reverse bool + reverseField string + reverseFieldInfo *fieldInfo + reverseFieldInfoTwo *fieldInfo + reverseFieldInfoM2M *fieldInfo + relTable string + relThrough string + relThroughModelInfo *modelInfo + relModelInfo *modelInfo + digits int + decimals int + isFielder bool + onDelete string +} + +// new field info +func newFieldInfo(mi *modelInfo, field reflect.Value, sf reflect.StructField) (fi *fieldInfo, err error) { + var ( + tag string + tagValue string + initial StrTo + fieldType int + attrs map[string]bool + tags map[string]string + addrField reflect.Value + ) + + fi = new(fieldInfo) + + addrField = field + if field.CanAddr() && field.Kind() != reflect.Ptr { + addrField = field.Addr() + if _, ok := addrField.Interface().(Fielder); !ok { + if field.Kind() == reflect.Slice { + addrField = field + } + } + } + + parseStructTag(sf.Tag.Get(defaultStructTagName), &attrs, &tags) + + if _, ok := attrs["-"]; ok { + return nil, errSkipField + } + + digits := tags["digits"] + decimals := tags["decimals"] + size := tags["size"] + onDelete := tags["on_delete"] + + initial.Clear() + if v, ok := tags["default"]; ok { + initial.Set(v) + } + +checkType: + switch f := addrField.Interface().(type) { + case Fielder: + fi.isFielder = true + if field.Kind() == reflect.Ptr { + err = fmt.Errorf("the model Fielder can not be use ptr") + goto end + } + fieldType = f.FieldType() + if fieldType&IsRelField > 0 { + err = fmt.Errorf("unsupport rel type custom field") + goto end + } + default: + tag = "rel" + tagValue = tags[tag] + if tagValue != "" { + switch tagValue { + case "fk": + fieldType = RelForeignKey + break checkType + case "one": + fieldType = RelOneToOne + break checkType + case "m2m": + fieldType = RelManyToMany + if tv := tags["rel_table"]; tv != "" { + fi.relTable = tv + } else if tv := tags["rel_through"]; tv != "" { + fi.relThrough = tv + } + break checkType + default: + err = fmt.Errorf("error") + goto wrongTag + } + } + tag = "reverse" + tagValue = tags[tag] + if tagValue != "" { + switch tagValue { + case "one": + fieldType = RelReverseOne + break checkType + case "many": + fieldType = RelReverseMany + if tv := tags["rel_table"]; tv != "" { + fi.relTable = tv + } else if tv := tags["rel_through"]; tv != "" { + fi.relThrough = tv + } + break checkType + default: + err = fmt.Errorf("error") + goto wrongTag + } + } + + fieldType, err = getFieldType(addrField) + if err != nil { + goto end + } + if fieldType == TypeCharField && tags["type"] == "text" { + fieldType = TypeTextField + } + if fieldType == TypeFloatField && (digits != "" || decimals != "") { + fieldType = TypeDecimalField + } + if fieldType == TypeDateTimeField && tags["type"] == "date" { + fieldType = TypeDateField + } + } + + switch fieldType { + case RelForeignKey, RelOneToOne, RelReverseOne: + if field.Kind() != reflect.Ptr { + err = fmt.Errorf("rel/reverse:one field must be *%s", field.Type().Name()) + goto end + } + case RelManyToMany, RelReverseMany: + if field.Kind() != reflect.Slice { + err = fmt.Errorf("rel/reverse:many field must be slice") + goto end + } else { + if field.Type().Elem().Kind() != reflect.Ptr { + err = fmt.Errorf("rel/reverse:many slice must be []*%s", field.Type().Elem().Name()) + goto end + } + } + } + + if fieldType&IsFieldType == 0 { + err = fmt.Errorf("wrong field type") + goto end + } + + fi.fieldType = fieldType + fi.name = sf.Name + fi.column = getColumnName(fieldType, addrField, sf, tags["column"]) + fi.addrValue = addrField + fi.sf = sf + fi.fullName = mi.fullName + "." + sf.Name + + fi.null = attrs["null"] + fi.index = attrs["index"] + fi.auto = attrs["auto"] + fi.pk = attrs["pk"] + fi.unique = attrs["unique"] + + // Mark object property if there is attribute "default" in the orm configuration + if _, ok := tags["default"]; ok { + fi.colDefault = true + } + + switch fieldType { + case RelManyToMany, RelReverseMany, RelReverseOne: + fi.null = false + fi.index = false + fi.auto = false + fi.pk = false + fi.unique = false + default: + fi.dbcol = true + } + + switch fieldType { + case RelForeignKey, RelOneToOne, RelManyToMany: + fi.rel = true + if fieldType == RelOneToOne { + fi.unique = true + } + case RelReverseMany, RelReverseOne: + fi.reverse = true + } + + if fi.rel && fi.dbcol { + switch onDelete { + case odCascade, odDoNothing: + case odSetDefault: + if initial.Exist() == false { + err = errors.New("on_delete: set_default need set field a default value") + goto end + } + case odSetNULL: + if fi.null == false { + err = errors.New("on_delete: set_null need set field null") + goto end + } + default: + if onDelete == "" { + onDelete = odCascade + } else { + err = fmt.Errorf("on_delete value expected choice in `cascade,set_null,set_default,do_nothing`, unknown `%s`", onDelete) + goto end + } + } + + fi.onDelete = onDelete + } + + switch fieldType { + case TypeBooleanField: + case TypeCharField: + if size != "" { + v, e := StrTo(size).Int32() + if e != nil { + err = fmt.Errorf("wrong size value `%s`", size) + } else { + fi.size = int(v) + } + } else { + fi.size = 255 + } + case TypeTextField: + fi.index = false + fi.unique = false + case TypeDateField, TypeDateTimeField: + if attrs["auto_now"] { + fi.autoNow = true + } else if attrs["auto_now_add"] { + fi.autoNowAdd = true + } + case TypeFloatField: + case TypeDecimalField: + d1 := digits + d2 := decimals + v1, er1 := StrTo(d1).Int8() + v2, er2 := StrTo(d2).Int8() + if er1 != nil || er2 != nil { + err = fmt.Errorf("wrong digits/decimals value %s/%s", d2, d1) + goto end + } + fi.digits = int(v1) + fi.decimals = int(v2) + default: + switch { + case fieldType&IsIntegerField > 0: + case fieldType&IsRelField > 0: + } + } + + if fieldType&IsIntegerField == 0 { + if fi.auto { + err = fmt.Errorf("non-integer type cannot set auto") + goto end + } + } + + if fi.auto || fi.pk { + if fi.auto { + + switch addrField.Elem().Kind() { + case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64: + default: + err = fmt.Errorf("auto primary key only support int, int32, int64, uint, uint32, uint64 but found `%s`", addrField.Elem().Kind()) + goto end + } + + fi.pk = true + } + fi.null = false + fi.index = false + fi.unique = false + } + + if fi.unique { + fi.index = false + } + + if fi.auto || fi.pk || fi.unique || fieldType == TypeDateField || fieldType == TypeDateTimeField { + // can not set default + initial.Clear() + } + + if initial.Exist() { + v := initial + switch fieldType { + case TypeBooleanField: + _, err = v.Bool() + case TypeFloatField, TypeDecimalField: + _, err = v.Float64() + case TypeBitField: + _, err = v.Int8() + case TypeSmallIntegerField: + _, err = v.Int16() + case TypeIntegerField: + _, err = v.Int32() + case TypeBigIntegerField: + _, err = v.Int64() + case TypePositiveBitField: + _, err = v.Uint8() + case TypePositiveSmallIntegerField: + _, err = v.Uint16() + case TypePositiveIntegerField: + _, err = v.Uint32() + case TypePositiveBigIntegerField: + _, err = v.Uint64() + } + if err != nil { + tag, tagValue = "default", tags["default"] + goto wrongTag + } + } + + fi.initial = initial +end: + if err != nil { + return nil, err + } + return +wrongTag: + return nil, fmt.Errorf("wrong tag format: `%s:\"%s\"`, %s", tag, tagValue, err) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_m.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_m.go new file mode 100644 index 000000000..2654cdb5b --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_info_m.go @@ -0,0 +1,146 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "os" + "reflect" +) + +// single model info +type modelInfo struct { + pkg string + name string + fullName string + table string + model interface{} + fields *fields + manual bool + addrField reflect.Value + uniques []string + isThrough bool +} + +// new model info +func newModelInfo(val reflect.Value) (info *modelInfo) { + var ( + err error + fi *fieldInfo + sf reflect.StructField + ) + + info = &modelInfo{} + info.fields = newFields() + + ind := reflect.Indirect(val) + typ := ind.Type() + + info.addrField = val + + info.name = typ.Name() + info.fullName = getFullName(typ) + + for i := 0; i < ind.NumField(); i++ { + field := ind.Field(i) + sf = ind.Type().Field(i) + if sf.PkgPath != "" { + continue + } + fi, err = newFieldInfo(info, field, sf) + + if err != nil { + if err == errSkipField { + err = nil + continue + } + break + } + + added := info.fields.Add(fi) + if added == false { + err = fmt.Errorf("duplicate column name: %s", fi.column) + break + } + + if fi.pk { + if info.fields.pk != nil { + err = fmt.Errorf("one model must have one pk field only") + break + } else { + info.fields.pk = fi + } + } + + fi.fieldIndex = i + fi.mi = info + fi.inModel = true + } + + if err != nil { + fmt.Println(fmt.Errorf("field: %s.%s, %s", ind.Type(), sf.Name, err)) + os.Exit(2) + } + + return +} + +// combine related model info to new model info. +// prepare for relation models query. +func newM2MModelInfo(m1, m2 *modelInfo) (info *modelInfo) { + info = new(modelInfo) + info.fields = newFields() + info.table = m1.table + "_" + m2.table + "s" + info.name = camelString(info.table) + info.fullName = m1.pkg + "." + info.name + + fa := new(fieldInfo) + f1 := new(fieldInfo) + f2 := new(fieldInfo) + fa.fieldType = TypeBigIntegerField + fa.auto = true + fa.pk = true + fa.dbcol = true + fa.name = "Id" + fa.column = "id" + fa.fullName = info.fullName + "." + fa.name + + f1.dbcol = true + f2.dbcol = true + f1.fieldType = RelForeignKey + f2.fieldType = RelForeignKey + f1.name = camelString(m1.table) + f2.name = camelString(m2.table) + f1.fullName = info.fullName + "." + f1.name + f2.fullName = info.fullName + "." + f2.name + f1.column = m1.table + "_id" + f2.column = m2.table + "_id" + f1.rel = true + f2.rel = true + f1.relTable = m1.table + f2.relTable = m2.table + f1.relModelInfo = m1 + f2.relModelInfo = m2 + f1.mi = info + f2.mi = info + + info.fields.Add(fa) + info.fields.Add(f1) + info.fields.Add(f2) + info.fields.pk = fa + + info.uniques = []string{f1.column, f2.column} + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_utils.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_utils.go new file mode 100644 index 000000000..ec11d5169 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/models_utils.go @@ -0,0 +1,207 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "fmt" + "reflect" + "strings" + "time" +) + +// get reflect.Type name with package path. +func getFullName(typ reflect.Type) string { + return typ.PkgPath() + "." + typ.Name() +} + +// get table name. method, or field name. auto snaked. +func getTableName(val reflect.Value) string { + ind := reflect.Indirect(val) + fun := val.MethodByName("TableName") + if fun.IsValid() { + vals := fun.Call([]reflect.Value{}) + if len(vals) > 0 { + val := vals[0] + if val.Kind() == reflect.String { + return val.String() + } + } + } + return snakeString(ind.Type().Name()) +} + +// get table engine, mysiam or innodb. +func getTableEngine(val reflect.Value) string { + fun := val.MethodByName("TableEngine") + if fun.IsValid() { + vals := fun.Call([]reflect.Value{}) + if len(vals) > 0 { + val := vals[0] + if val.Kind() == reflect.String { + return val.String() + } + } + } + return "" +} + +// get table index from method. +func getTableIndex(val reflect.Value) [][]string { + fun := val.MethodByName("TableIndex") + if fun.IsValid() { + vals := fun.Call([]reflect.Value{}) + if len(vals) > 0 { + val := vals[0] + if val.CanInterface() { + if d, ok := val.Interface().([][]string); ok { + return d + } + } + } + } + return nil +} + +// get table unique from method +func getTableUnique(val reflect.Value) [][]string { + fun := val.MethodByName("TableUnique") + if fun.IsValid() { + vals := fun.Call([]reflect.Value{}) + if len(vals) > 0 { + val := vals[0] + if val.CanInterface() { + if d, ok := val.Interface().([][]string); ok { + return d + } + } + } + } + return nil +} + +// get snaked column name +func getColumnName(ft int, addrField reflect.Value, sf reflect.StructField, col string) string { + column := col + if col == "" { + column = snakeString(sf.Name) + } + switch ft { + case RelForeignKey, RelOneToOne: + if len(col) == 0 { + column = column + "_id" + } + case RelManyToMany, RelReverseMany, RelReverseOne: + column = sf.Name + } + return column +} + +// return field type as type constant from reflect.Value +func getFieldType(val reflect.Value) (ft int, err error) { + switch val.Type() { + case reflect.TypeOf(new(int8)): + ft = TypeBitField + case reflect.TypeOf(new(int16)): + ft = TypeSmallIntegerField + case reflect.TypeOf(new(int32)), + reflect.TypeOf(new(int)): + ft = TypeIntegerField + case reflect.TypeOf(new(int64)): + ft = TypeBigIntegerField + case reflect.TypeOf(new(uint8)): + ft = TypePositiveBitField + case reflect.TypeOf(new(uint16)): + ft = TypePositiveSmallIntegerField + case reflect.TypeOf(new(uint32)), + reflect.TypeOf(new(uint)): + ft = TypePositiveIntegerField + case reflect.TypeOf(new(uint64)): + ft = TypePositiveBigIntegerField + case reflect.TypeOf(new(float32)), + reflect.TypeOf(new(float64)): + ft = TypeFloatField + case reflect.TypeOf(new(bool)): + ft = TypeBooleanField + case reflect.TypeOf(new(string)): + ft = TypeCharField + default: + elm := reflect.Indirect(val) + switch elm.Kind() { + case reflect.Int8: + ft = TypeBitField + case reflect.Int16: + ft = TypeSmallIntegerField + case reflect.Int32, reflect.Int: + ft = TypeIntegerField + case reflect.Int64: + ft = TypeBigIntegerField + case reflect.Uint8: + ft = TypePositiveBitField + case reflect.Uint16: + ft = TypePositiveSmallIntegerField + case reflect.Uint32, reflect.Uint: + ft = TypePositiveIntegerField + case reflect.Uint64: + ft = TypePositiveBigIntegerField + case reflect.Float32, reflect.Float64: + ft = TypeFloatField + case reflect.Bool: + ft = TypeBooleanField + case reflect.String: + ft = TypeCharField + default: + if elm.Interface() == nil { + panic(fmt.Errorf("%s is nil pointer, may be miss setting tag", val)) + } + switch elm.Interface().(type) { + case sql.NullInt64: + ft = TypeBigIntegerField + case sql.NullFloat64: + ft = TypeFloatField + case sql.NullBool: + ft = TypeBooleanField + case sql.NullString: + ft = TypeCharField + case time.Time: + ft = TypeDateTimeField + } + } + } + if ft&IsFieldType == 0 { + err = fmt.Errorf("unsupport field type %s, may be miss setting tag", val) + } + return +} + +// parse struct tag string +func parseStructTag(data string, attrs *map[string]bool, tags *map[string]string) { + attr := make(map[string]bool) + tag := make(map[string]string) + for _, v := range strings.Split(data, defaultStructTagDelim) { + v = strings.TrimSpace(v) + if supportTag[v] == 1 { + attr[v] = true + } else if i := strings.Index(v, "("); i > 0 && strings.Index(v, ")") == len(v)-1 { + name := v[:i] + if supportTag[name] == 2 { + v = v[i+1 : len(v)-1] + tag[name] = v + } + } + } + *attrs = attr + *tags = tag +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm.go new file mode 100644 index 000000000..d00d6d038 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm.go @@ -0,0 +1,534 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package orm provide ORM for MySQL/PostgreSQL/sqlite +// Simple Usage +// +// package main +// +// import ( +// "fmt" +// "github.com/astaxie/beego/orm" +// _ "github.com/go-sql-driver/mysql" // import your used driver +// ) +// +// // Model Struct +// type User struct { +// Id int `orm:"auto"` +// Name string `orm:"size(100)"` +// } +// +// func init() { +// orm.RegisterDataBase("default", "mysql", "root:root@/my_db?charset=utf8", 30) +// } +// +// func main() { +// o := orm.NewOrm() +// user := User{Name: "slene"} +// // insert +// id, err := o.Insert(&user) +// // update +// user.Name = "astaxie" +// num, err := o.Update(&user) +// // read one +// u := User{Id: user.Id} +// err = o.Read(&u) +// // delete +// num, err = o.Delete(&u) +// } +// +// more docs: http://beego.me/docs/mvc/model/overview.md +package orm + +import ( + "database/sql" + "errors" + "fmt" + "os" + "reflect" + "time" +) + +// DebugQueries define the debug +const ( + DebugQueries = iota +) + +// Define common vars +var ( + Debug = false + DebugLog = NewLog(os.Stderr) + DefaultRowsLimit = 1000 + DefaultRelsDepth = 2 + DefaultTimeLoc = time.Local + ErrTxHasBegan = errors.New(" transaction already begin") + ErrTxDone = errors.New(" transaction not begin") + ErrMultiRows = errors.New(" return multi rows") + ErrNoRows = errors.New(" no row found") + ErrStmtClosed = errors.New(" stmt already closed") + ErrArgs = errors.New(" args error may be empty") + ErrNotImplement = errors.New("have not implement") +) + +// Params stores the Params +type Params map[string]interface{} + +// ParamsList stores paramslist +type ParamsList []interface{} + +type orm struct { + alias *alias + db dbQuerier + isTx bool +} + +var _ Ormer = new(orm) + +// get model info and model reflect value +func (o *orm) getMiInd(md interface{}, needPtr bool) (mi *modelInfo, ind reflect.Value) { + val := reflect.ValueOf(md) + ind = reflect.Indirect(val) + typ := ind.Type() + if needPtr && val.Kind() != reflect.Ptr { + panic(fmt.Errorf(" cannot use non-ptr model struct `%s`", getFullName(typ))) + } + name := getFullName(typ) + if mi, ok := modelCache.getByFN(name); ok { + return mi, ind + } + panic(fmt.Errorf(" table: `%s` not found, maybe not RegisterModel", name)) +} + +// get field info from model info by given field name +func (o *orm) getFieldInfo(mi *modelInfo, name string) *fieldInfo { + fi, ok := mi.fields.GetByAny(name) + if !ok { + panic(fmt.Errorf(" cannot find field `%s` for model `%s`", name, mi.fullName)) + } + return fi +} + +// read data to model +func (o *orm) Read(md interface{}, cols ...string) error { + mi, ind := o.getMiInd(md, true) + err := o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols) + if err != nil { + return err + } + return nil +} + +// Try to read a row from the database, or insert one if it doesn't exist +func (o *orm) ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error) { + cols = append([]string{col1}, cols...) + mi, ind := o.getMiInd(md, true) + err := o.alias.DbBaser.Read(o.db, mi, ind, o.alias.TZ, cols) + if err == ErrNoRows { + // Create + id, err := o.Insert(md) + return (err == nil), id, err + } + + return false, ind.Field(mi.fields.pk.fieldIndex).Int(), err +} + +// insert model data to database +func (o *orm) Insert(md interface{}) (int64, error) { + mi, ind := o.getMiInd(md, true) + id, err := o.alias.DbBaser.Insert(o.db, mi, ind, o.alias.TZ) + if err != nil { + return id, err + } + + o.setPk(mi, ind, id) + + return id, nil +} + +// set auto pk field +func (o *orm) setPk(mi *modelInfo, ind reflect.Value, id int64) { + if mi.fields.pk.auto { + if mi.fields.pk.fieldType&IsPostiveIntegerField > 0 { + ind.Field(mi.fields.pk.fieldIndex).SetUint(uint64(id)) + } else { + ind.Field(mi.fields.pk.fieldIndex).SetInt(id) + } + } +} + +// insert some models to database +func (o *orm) InsertMulti(bulk int, mds interface{}) (int64, error) { + var cnt int64 + + sind := reflect.Indirect(reflect.ValueOf(mds)) + + switch sind.Kind() { + case reflect.Array, reflect.Slice: + if sind.Len() == 0 { + return cnt, ErrArgs + } + default: + return cnt, ErrArgs + } + + if bulk <= 1 { + for i := 0; i < sind.Len(); i++ { + ind := sind.Index(i) + mi, _ := o.getMiInd(ind.Interface(), false) + id, err := o.alias.DbBaser.Insert(o.db, mi, ind, o.alias.TZ) + if err != nil { + return cnt, err + } + + o.setPk(mi, ind, id) + + cnt++ + } + } else { + mi, _ := o.getMiInd(sind.Index(0).Interface(), false) + return o.alias.DbBaser.InsertMulti(o.db, mi, sind, bulk, o.alias.TZ) + } + return cnt, nil +} + +// update model to database. +// cols set the columns those want to update. +func (o *orm) Update(md interface{}, cols ...string) (int64, error) { + mi, ind := o.getMiInd(md, true) + num, err := o.alias.DbBaser.Update(o.db, mi, ind, o.alias.TZ, cols) + if err != nil { + return num, err + } + return num, nil +} + +// delete model in database +func (o *orm) Delete(md interface{}) (int64, error) { + mi, ind := o.getMiInd(md, true) + num, err := o.alias.DbBaser.Delete(o.db, mi, ind, o.alias.TZ) + if err != nil { + return num, err + } + if num > 0 { + o.setPk(mi, ind, 0) + } + return num, nil +} + +// create a models to models queryer +func (o *orm) QueryM2M(md interface{}, name string) QueryM2Mer { + mi, ind := o.getMiInd(md, true) + fi := o.getFieldInfo(mi, name) + + switch { + case fi.fieldType == RelManyToMany: + case fi.fieldType == RelReverseMany && fi.reverseFieldInfo.mi.isThrough: + default: + panic(fmt.Errorf(" model `%s` . name `%s` is not a m2m field", fi.name, mi.fullName)) + } + + return newQueryM2M(md, o, mi, fi, ind) +} + +// load related models to md model. +// args are limit, offset int and order string. +// +// example: +// orm.LoadRelated(post,"Tags") +// for _,tag := range post.Tags{...} +// +// make sure the relation is defined in model struct tags. +func (o *orm) LoadRelated(md interface{}, name string, args ...interface{}) (int64, error) { + _, fi, ind, qseter := o.queryRelated(md, name) + + qs := qseter.(*querySet) + + var relDepth int + var limit, offset int64 + var order string + for i, arg := range args { + switch i { + case 0: + if v, ok := arg.(bool); ok { + if v { + relDepth = DefaultRelsDepth + } + } else if v, ok := arg.(int); ok { + relDepth = v + } + case 1: + limit = ToInt64(arg) + case 2: + offset = ToInt64(arg) + case 3: + order, _ = arg.(string) + } + } + + switch fi.fieldType { + case RelOneToOne, RelForeignKey, RelReverseOne: + limit = 1 + offset = 0 + } + + qs.limit = limit + qs.offset = offset + qs.relDepth = relDepth + + if len(order) > 0 { + qs.orders = []string{order} + } + + find := ind.Field(fi.fieldIndex) + + var nums int64 + var err error + switch fi.fieldType { + case RelOneToOne, RelForeignKey, RelReverseOne: + val := reflect.New(find.Type().Elem()) + container := val.Interface() + err = qs.One(container) + if err == nil { + find.Set(val) + nums = 1 + } + default: + nums, err = qs.All(find.Addr().Interface()) + } + + return nums, err +} + +// return a QuerySeter for related models to md model. +// it can do all, update, delete in QuerySeter. +// example: +// qs := orm.QueryRelated(post,"Tag") +// qs.All(&[]*Tag{}) +// +func (o *orm) QueryRelated(md interface{}, name string) QuerySeter { + // is this api needed ? + _, _, _, qs := o.queryRelated(md, name) + return qs +} + +// get QuerySeter for related models to md model +func (o *orm) queryRelated(md interface{}, name string) (*modelInfo, *fieldInfo, reflect.Value, QuerySeter) { + mi, ind := o.getMiInd(md, true) + fi := o.getFieldInfo(mi, name) + + _, _, exist := getExistPk(mi, ind) + if exist == false { + panic(ErrMissPK) + } + + var qs *querySet + + switch fi.fieldType { + case RelOneToOne, RelForeignKey, RelManyToMany: + if !fi.inModel { + break + } + qs = o.getRelQs(md, mi, fi) + case RelReverseOne, RelReverseMany: + if !fi.inModel { + break + } + qs = o.getReverseQs(md, mi, fi) + } + + if qs == nil { + panic(fmt.Errorf(" name `%s` for model `%s` is not an available rel/reverse field", md, name)) + } + + return mi, fi, ind, qs +} + +// get reverse relation QuerySeter +func (o *orm) getReverseQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet { + switch fi.fieldType { + case RelReverseOne, RelReverseMany: + default: + panic(fmt.Errorf(" name `%s` for model `%s` is not an available reverse field", fi.name, mi.fullName)) + } + + var q *querySet + + if fi.fieldType == RelReverseMany && fi.reverseFieldInfo.mi.isThrough { + q = newQuerySet(o, fi.relModelInfo).(*querySet) + q.cond = NewCondition().And(fi.reverseFieldInfoM2M.column+ExprSep+fi.reverseFieldInfo.column, md) + } else { + q = newQuerySet(o, fi.reverseFieldInfo.mi).(*querySet) + q.cond = NewCondition().And(fi.reverseFieldInfo.column, md) + } + + return q +} + +// get relation QuerySeter +func (o *orm) getRelQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet { + switch fi.fieldType { + case RelOneToOne, RelForeignKey, RelManyToMany: + default: + panic(fmt.Errorf(" name `%s` for model `%s` is not an available rel field", fi.name, mi.fullName)) + } + + q := newQuerySet(o, fi.relModelInfo).(*querySet) + q.cond = NewCondition() + + if fi.fieldType == RelManyToMany { + q.cond = q.cond.And(fi.reverseFieldInfoM2M.column+ExprSep+fi.reverseFieldInfo.column, md) + } else { + q.cond = q.cond.And(fi.reverseFieldInfo.column, md) + } + + return q +} + +// return a QuerySeter for table operations. +// table name can be string or struct. +// e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)), +func (o *orm) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) { + name := "" + if table, ok := ptrStructOrTableName.(string); ok { + name = snakeString(table) + if mi, ok := modelCache.get(name); ok { + qs = newQuerySet(o, mi) + } + } else { + name = getFullName(indirectType(reflect.TypeOf(ptrStructOrTableName))) + if mi, ok := modelCache.getByFN(name); ok { + qs = newQuerySet(o, mi) + } + } + if qs == nil { + panic(fmt.Errorf(" table name: `%s` not exists", name)) + } + return +} + +// switch to another registered database driver by given name. +func (o *orm) Using(name string) error { + if o.isTx { + panic(fmt.Errorf(" transaction has been start, cannot change db")) + } + if al, ok := dataBaseCache.get(name); ok { + o.alias = al + if Debug { + o.db = newDbQueryLog(al, al.DB) + } else { + o.db = al.DB + } + } else { + return fmt.Errorf(" unknown db alias name `%s`", name) + } + return nil +} + +// begin transaction +func (o *orm) Begin() error { + if o.isTx { + return ErrTxHasBegan + } + var tx *sql.Tx + tx, err := o.db.(txer).Begin() + if err != nil { + return err + } + o.isTx = true + if Debug { + o.db.(*dbQueryLog).SetDB(tx) + } else { + o.db = tx + } + return nil +} + +// commit transaction +func (o *orm) Commit() error { + if o.isTx == false { + return ErrTxDone + } + err := o.db.(txEnder).Commit() + if err == nil { + o.isTx = false + o.Using(o.alias.Name) + } else if err == sql.ErrTxDone { + return ErrTxDone + } + return err +} + +// rollback transaction +func (o *orm) Rollback() error { + if o.isTx == false { + return ErrTxDone + } + err := o.db.(txEnder).Rollback() + if err == nil { + o.isTx = false + o.Using(o.alias.Name) + } else if err == sql.ErrTxDone { + return ErrTxDone + } + return err +} + +// return a raw query seter for raw sql string. +func (o *orm) Raw(query string, args ...interface{}) RawSeter { + return newRawSet(o, query, args) +} + +// return current using database Driver +func (o *orm) Driver() Driver { + return driver(o.alias.Name) +} + +// NewOrm create new orm +func NewOrm() Ormer { + BootStrap() // execute only once + + o := new(orm) + err := o.Using("default") + if err != nil { + panic(err) + } + return o +} + +// NewOrmWithDB create a new ormer object with specify *sql.DB for query +func NewOrmWithDB(driverName, aliasName string, db *sql.DB) (Ormer, error) { + var al *alias + + if dr, ok := drivers[driverName]; ok { + al = new(alias) + al.DbBaser = dbBasers[dr] + al.Driver = dr + } else { + return nil, fmt.Errorf("driver name `%s` have not registered", driverName) + } + + al.Name = aliasName + al.DriverName = driverName + + o := new(orm) + o.alias = al + + if Debug { + o.db = newDbQueryLog(o.alias, db) + } else { + o.db = db + } + + return o, nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_conds.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_conds.go new file mode 100644 index 000000000..e56d6fbbd --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_conds.go @@ -0,0 +1,116 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "strings" +) + +// ExprSep define the expression separation +const ( + ExprSep = "__" +) + +type condValue struct { + exprs []string + args []interface{} + cond *Condition + isOr bool + isNot bool + isCond bool +} + +// Condition struct. +// work for WHERE conditions. +type Condition struct { + params []condValue +} + +// NewCondition return new condition struct +func NewCondition() *Condition { + c := &Condition{} + return c +} + +// And add expression to condition +func (c Condition) And(expr string, args ...interface{}) *Condition { + if expr == "" || len(args) == 0 { + panic(fmt.Errorf(" args cannot empty")) + } + c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args}) + return &c +} + +// AndNot add NOT expression to condition +func (c Condition) AndNot(expr string, args ...interface{}) *Condition { + if expr == "" || len(args) == 0 { + panic(fmt.Errorf(" args cannot empty")) + } + c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isNot: true}) + return &c +} + +// AndCond combine a condition to current condition +func (c *Condition) AndCond(cond *Condition) *Condition { + c = c.clone() + if c == cond { + panic(fmt.Errorf(" cannot use self as sub cond")) + } + if cond != nil { + c.params = append(c.params, condValue{cond: cond, isCond: true}) + } + return c +} + +// Or add OR expression to condition +func (c Condition) Or(expr string, args ...interface{}) *Condition { + if expr == "" || len(args) == 0 { + panic(fmt.Errorf(" args cannot empty")) + } + c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isOr: true}) + return &c +} + +// OrNot add OR NOT expression to condition +func (c Condition) OrNot(expr string, args ...interface{}) *Condition { + if expr == "" || len(args) == 0 { + panic(fmt.Errorf(" args cannot empty")) + } + c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isNot: true, isOr: true}) + return &c +} + +// OrCond combine a OR condition to current condition +func (c *Condition) OrCond(cond *Condition) *Condition { + c = c.clone() + if c == cond { + panic(fmt.Errorf(" cannot use self as sub cond")) + } + if cond != nil { + c.params = append(c.params, condValue{cond: cond, isCond: true, isOr: true}) + } + return c +} + +// IsEmpty check the condition arguments are empty or not. +func (c *Condition) IsEmpty() bool { + return len(c.params) == 0 +} + +// clone clone a condition +func (c Condition) clone() *Condition { + return &c +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_log.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_log.go new file mode 100644 index 000000000..712eb219f --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_log.go @@ -0,0 +1,176 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "fmt" + "io" + "log" + "strings" + "time" +) + +// Log implement the log.Logger +type Log struct { + *log.Logger +} + +// NewLog set io.Writer to create a Logger. +func NewLog(out io.Writer) *Log { + d := new(Log) + d.Logger = log.New(out, "[ORM]", 1e9) + return d +} + +func debugLogQueies(alias *alias, operaton, query string, t time.Time, err error, args ...interface{}) { + sub := time.Now().Sub(t) / 1e5 + elsp := float64(int(sub)) / 10.0 + flag := " OK" + if err != nil { + flag = "FAIL" + } + con := fmt.Sprintf(" - %s - [Queries/%s] - [%s / %11s / %7.1fms] - [%s]", t.Format(formatDateTime), alias.Name, flag, operaton, elsp, query) + cons := make([]string, 0, len(args)) + for _, arg := range args { + cons = append(cons, fmt.Sprintf("%v", arg)) + } + if len(cons) > 0 { + con += fmt.Sprintf(" - `%s`", strings.Join(cons, "`, `")) + } + if err != nil { + con += " - " + err.Error() + } + DebugLog.Println(con) +} + +// statement query logger struct. +// if dev mode, use stmtQueryLog, or use stmtQuerier. +type stmtQueryLog struct { + alias *alias + query string + stmt stmtQuerier +} + +var _ stmtQuerier = new(stmtQueryLog) + +func (d *stmtQueryLog) Close() error { + a := time.Now() + err := d.stmt.Close() + debugLogQueies(d.alias, "st.Close", d.query, a, err) + return err +} + +func (d *stmtQueryLog) Exec(args ...interface{}) (sql.Result, error) { + a := time.Now() + res, err := d.stmt.Exec(args...) + debugLogQueies(d.alias, "st.Exec", d.query, a, err, args...) + return res, err +} + +func (d *stmtQueryLog) Query(args ...interface{}) (*sql.Rows, error) { + a := time.Now() + res, err := d.stmt.Query(args...) + debugLogQueies(d.alias, "st.Query", d.query, a, err, args...) + return res, err +} + +func (d *stmtQueryLog) QueryRow(args ...interface{}) *sql.Row { + a := time.Now() + res := d.stmt.QueryRow(args...) + debugLogQueies(d.alias, "st.QueryRow", d.query, a, nil, args...) + return res +} + +func newStmtQueryLog(alias *alias, stmt stmtQuerier, query string) stmtQuerier { + d := new(stmtQueryLog) + d.stmt = stmt + d.alias = alias + d.query = query + return d +} + +// database query logger struct. +// if dev mode, use dbQueryLog, or use dbQuerier. +type dbQueryLog struct { + alias *alias + db dbQuerier + tx txer + txe txEnder +} + +var _ dbQuerier = new(dbQueryLog) +var _ txer = new(dbQueryLog) +var _ txEnder = new(dbQueryLog) + +func (d *dbQueryLog) Prepare(query string) (*sql.Stmt, error) { + a := time.Now() + stmt, err := d.db.Prepare(query) + debugLogQueies(d.alias, "db.Prepare", query, a, err) + return stmt, err +} + +func (d *dbQueryLog) Exec(query string, args ...interface{}) (sql.Result, error) { + a := time.Now() + res, err := d.db.Exec(query, args...) + debugLogQueies(d.alias, "db.Exec", query, a, err, args...) + return res, err +} + +func (d *dbQueryLog) Query(query string, args ...interface{}) (*sql.Rows, error) { + a := time.Now() + res, err := d.db.Query(query, args...) + debugLogQueies(d.alias, "db.Query", query, a, err, args...) + return res, err +} + +func (d *dbQueryLog) QueryRow(query string, args ...interface{}) *sql.Row { + a := time.Now() + res := d.db.QueryRow(query, args...) + debugLogQueies(d.alias, "db.QueryRow", query, a, nil, args...) + return res +} + +func (d *dbQueryLog) Begin() (*sql.Tx, error) { + a := time.Now() + tx, err := d.db.(txer).Begin() + debugLogQueies(d.alias, "db.Begin", "START TRANSACTION", a, err) + return tx, err +} + +func (d *dbQueryLog) Commit() error { + a := time.Now() + err := d.db.(txEnder).Commit() + debugLogQueies(d.alias, "tx.Commit", "COMMIT", a, err) + return err +} + +func (d *dbQueryLog) Rollback() error { + a := time.Now() + err := d.db.(txEnder).Rollback() + debugLogQueies(d.alias, "tx.Rollback", "ROLLBACK", a, err) + return err +} + +func (d *dbQueryLog) SetDB(db dbQuerier) { + d.db = db +} + +func newDbQueryLog(alias *alias, db dbQuerier) dbQuerier { + d := new(dbQueryLog) + d.alias = alias + d.db = db + return d +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_object.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_object.go new file mode 100644 index 000000000..df5a6600e --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_object.go @@ -0,0 +1,87 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "reflect" +) + +// an insert queryer struct +type insertSet struct { + mi *modelInfo + orm *orm + stmt stmtQuerier + closed bool +} + +var _ Inserter = new(insertSet) + +// insert model ignore it's registered or not. +func (o *insertSet) Insert(md interface{}) (int64, error) { + if o.closed { + return 0, ErrStmtClosed + } + val := reflect.ValueOf(md) + ind := reflect.Indirect(val) + typ := ind.Type() + name := getFullName(typ) + if val.Kind() != reflect.Ptr { + panic(fmt.Errorf(" cannot use non-ptr model struct `%s`", name)) + } + if name != o.mi.fullName { + panic(fmt.Errorf(" need model `%s` but found `%s`", o.mi.fullName, name)) + } + id, err := o.orm.alias.DbBaser.InsertStmt(o.stmt, o.mi, ind, o.orm.alias.TZ) + if err != nil { + return id, err + } + if id > 0 { + if o.mi.fields.pk.auto { + if o.mi.fields.pk.fieldType&IsPostiveIntegerField > 0 { + ind.Field(o.mi.fields.pk.fieldIndex).SetUint(uint64(id)) + } else { + ind.Field(o.mi.fields.pk.fieldIndex).SetInt(id) + } + } + } + return id, nil +} + +// close insert queryer statement +func (o *insertSet) Close() error { + if o.closed { + return ErrStmtClosed + } + o.closed = true + return o.stmt.Close() +} + +// create new insert queryer. +func newInsertSet(orm *orm, mi *modelInfo) (Inserter, error) { + bi := new(insertSet) + bi.orm = orm + bi.mi = mi + st, query, err := orm.alias.DbBaser.PrepareInsert(orm.db, mi) + if err != nil { + return nil, err + } + if Debug { + bi.stmt = newStmtQueryLog(orm.alias, st, query) + } else { + bi.stmt = st + } + return bi, nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_querym2m.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_querym2m.go new file mode 100644 index 000000000..b220bda6e --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_querym2m.go @@ -0,0 +1,144 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import "reflect" + +// model to model struct +type queryM2M struct { + md interface{} + mi *modelInfo + fi *fieldInfo + qs *querySet + ind reflect.Value +} + +// add models to origin models when creating queryM2M. +// example: +// m2m := orm.QueryM2M(post,"Tag") +// m2m.Add(&Tag1{},&Tag2{}) +// for _,tag := range post.Tags{} +// +// make sure the relation is defined in post model struct tag. +func (o *queryM2M) Add(mds ...interface{}) (int64, error) { + fi := o.fi + mi := fi.relThroughModelInfo + mfi := fi.reverseFieldInfo + rfi := fi.reverseFieldInfoTwo + + orm := o.qs.orm + dbase := orm.alias.DbBaser + + var models []interface{} + var otherValues []interface{} + var otherNames []string + + for _, colname := range mi.fields.dbcols { + if colname != mfi.column && colname != rfi.column && colname != fi.mi.fields.pk.column && + mi.fields.columns[colname] != mi.fields.pk { + otherNames = append(otherNames, colname) + } + } + for i, md := range mds { + if reflect.Indirect(reflect.ValueOf(md)).Kind() != reflect.Struct && i > 0 { + otherValues = append(otherValues, md) + mds = append(mds[:i], mds[i+1:]...) + } + } + for _, md := range mds { + val := reflect.ValueOf(md) + if val.Kind() == reflect.Slice || val.Kind() == reflect.Array { + for i := 0; i < val.Len(); i++ { + v := val.Index(i) + if v.CanInterface() { + models = append(models, v.Interface()) + } + } + } else { + models = append(models, md) + } + } + + _, v1, exist := getExistPk(o.mi, o.ind) + if exist == false { + panic(ErrMissPK) + } + + names := []string{mfi.column, rfi.column} + + values := make([]interface{}, 0, len(models)*2) + for _, md := range models { + + ind := reflect.Indirect(reflect.ValueOf(md)) + var v2 interface{} + if ind.Kind() != reflect.Struct { + v2 = ind.Interface() + } else { + _, v2, exist = getExistPk(fi.relModelInfo, ind) + if exist == false { + panic(ErrMissPK) + } + } + values = append(values, v1, v2) + + } + names = append(names, otherNames...) + values = append(values, otherValues...) + return dbase.InsertValue(orm.db, mi, true, names, values) +} + +// remove models following the origin model relationship +func (o *queryM2M) Remove(mds ...interface{}) (int64, error) { + fi := o.fi + qs := o.qs.Filter(fi.reverseFieldInfo.name, o.md) + + nums, err := qs.Filter(fi.reverseFieldInfoTwo.name+ExprSep+"in", mds).Delete() + if err != nil { + return nums, err + } + return nums, nil +} + +// check model is existed in relationship of origin model +func (o *queryM2M) Exist(md interface{}) bool { + fi := o.fi + return o.qs.Filter(fi.reverseFieldInfo.name, o.md). + Filter(fi.reverseFieldInfoTwo.name, md).Exist() +} + +// clean all models in related of origin model +func (o *queryM2M) Clear() (int64, error) { + fi := o.fi + return o.qs.Filter(fi.reverseFieldInfo.name, o.md).Delete() +} + +// count all related models of origin model +func (o *queryM2M) Count() (int64, error) { + fi := o.fi + return o.qs.Filter(fi.reverseFieldInfo.name, o.md).Count() +} + +var _ QueryM2Mer = new(queryM2M) + +// create new M2M queryer. +func newQueryM2M(md interface{}, o *orm, mi *modelInfo, fi *fieldInfo, ind reflect.Value) QueryM2Mer { + qm2m := new(queryM2M) + qm2m.md = md + qm2m.mi = mi + qm2m.fi = fi + qm2m.ind = ind + qm2m.qs = newQuerySet(o, fi.relThroughModelInfo).(*querySet) + return qm2m +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_queryset.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_queryset.go new file mode 100644 index 000000000..802a1fe08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_queryset.go @@ -0,0 +1,261 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" +) + +type colValue struct { + value int64 + opt operator +} + +type operator int + +// define Col operations +const ( + ColAdd operator = iota + ColMinus + ColMultiply + ColExcept +) + +// ColValue do the field raw changes. e.g Nums = Nums + 10. usage: +// Params{ +// "Nums": ColValue(Col_Add, 10), +// } +func ColValue(opt operator, value interface{}) interface{} { + switch opt { + case ColAdd, ColMinus, ColMultiply, ColExcept: + default: + panic(fmt.Errorf("orm.ColValue wrong operator")) + } + v, err := StrTo(ToStr(value)).Int64() + if err != nil { + panic(fmt.Errorf("orm.ColValue doesn't support non string/numeric type, %s", err)) + } + var val colValue + val.value = v + val.opt = opt + return val +} + +// real query struct +type querySet struct { + mi *modelInfo + cond *Condition + related []string + relDepth int + limit int64 + offset int64 + groups []string + orders []string + distinct bool + orm *orm +} + +var _ QuerySeter = new(querySet) + +// add condition expression to QuerySeter. +func (o querySet) Filter(expr string, args ...interface{}) QuerySeter { + if o.cond == nil { + o.cond = NewCondition() + } + o.cond = o.cond.And(expr, args...) + return &o +} + +// add NOT condition to querySeter. +func (o querySet) Exclude(expr string, args ...interface{}) QuerySeter { + if o.cond == nil { + o.cond = NewCondition() + } + o.cond = o.cond.AndNot(expr, args...) + return &o +} + +// set offset number +func (o *querySet) setOffset(num interface{}) { + o.offset = ToInt64(num) +} + +// add LIMIT value. +// args[0] means offset, e.g. LIMIT num,offset. +func (o querySet) Limit(limit interface{}, args ...interface{}) QuerySeter { + o.limit = ToInt64(limit) + if len(args) > 0 { + o.setOffset(args[0]) + } + return &o +} + +// add OFFSET value +func (o querySet) Offset(offset interface{}) QuerySeter { + o.setOffset(offset) + return &o +} + +// add GROUP expression +func (o querySet) GroupBy(exprs ...string) QuerySeter { + o.groups = exprs + return &o +} + +// add ORDER expression. +// "column" means ASC, "-column" means DESC. +func (o querySet) OrderBy(exprs ...string) QuerySeter { + o.orders = exprs + return &o +} + +// add DISTINCT to SELECT +func (o querySet) Distinct() QuerySeter { + o.distinct = true + return &o +} + +// set relation model to query together. +// it will query relation models and assign to parent model. +func (o querySet) RelatedSel(params ...interface{}) QuerySeter { + if len(params) == 0 { + o.relDepth = DefaultRelsDepth + } else { + for _, p := range params { + switch val := p.(type) { + case string: + o.related = append(o.related, val) + case int: + o.relDepth = val + default: + panic(fmt.Errorf(" wrong param kind: %v", val)) + } + } + } + return &o +} + +// set condition to QuerySeter. +func (o querySet) SetCond(cond *Condition) QuerySeter { + o.cond = cond + return &o +} + +// return QuerySeter execution result number +func (o *querySet) Count() (int64, error) { + return o.orm.alias.DbBaser.Count(o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ) +} + +// check result empty or not after QuerySeter executed +func (o *querySet) Exist() bool { + cnt, _ := o.orm.alias.DbBaser.Count(o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ) + return cnt > 0 +} + +// execute update with parameters +func (o *querySet) Update(values Params) (int64, error) { + return o.orm.alias.DbBaser.UpdateBatch(o.orm.db, o, o.mi, o.cond, values, o.orm.alias.TZ) +} + +// execute delete +func (o *querySet) Delete() (int64, error) { + return o.orm.alias.DbBaser.DeleteBatch(o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ) +} + +// return a insert queryer. +// it can be used in times. +// example: +// i,err := sq.PrepareInsert() +// i.Add(&user1{},&user2{}) +func (o *querySet) PrepareInsert() (Inserter, error) { + return newInsertSet(o.orm, o.mi) +} + +// query all data and map to containers. +// cols means the columns when querying. +func (o *querySet) All(container interface{}, cols ...string) (int64, error) { + return o.orm.alias.DbBaser.ReadBatch(o.orm.db, o, o.mi, o.cond, container, o.orm.alias.TZ, cols) +} + +// query one row data and map to containers. +// cols means the columns when querying. +func (o *querySet) One(container interface{}, cols ...string) error { + num, err := o.orm.alias.DbBaser.ReadBatch(o.orm.db, o, o.mi, o.cond, container, o.orm.alias.TZ, cols) + if err != nil { + return err + } + if num > 1 { + return ErrMultiRows + } + if num == 0 { + return ErrNoRows + } + return nil +} + +// query all data and map to []map[string]interface. +// expres means condition expression. +// it converts data to []map[column]value. +func (o *querySet) Values(results *[]Params, exprs ...string) (int64, error) { + return o.orm.alias.DbBaser.ReadValues(o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ) +} + +// query all data and map to [][]interface +// it converts data to [][column_index]value +func (o *querySet) ValuesList(results *[]ParamsList, exprs ...string) (int64, error) { + return o.orm.alias.DbBaser.ReadValues(o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ) +} + +// query all data and map to []interface. +// it's designed for one row record set, auto change to []value, not [][column]value. +func (o *querySet) ValuesFlat(result *ParamsList, expr string) (int64, error) { + return o.orm.alias.DbBaser.ReadValues(o.orm.db, o, o.mi, o.cond, []string{expr}, result, o.orm.alias.TZ) +} + +// query all rows into map[string]interface with specify key and value column name. +// keyCol = "name", valueCol = "value" +// table data +// name | value +// total | 100 +// found | 200 +// to map[string]interface{}{ +// "total": 100, +// "found": 200, +// } +func (o *querySet) RowsToMap(result *Params, keyCol, valueCol string) (int64, error) { + panic(ErrNotImplement) +} + +// query all rows into struct with specify key and value column name. +// keyCol = "name", valueCol = "value" +// table data +// name | value +// total | 100 +// found | 200 +// to struct { +// Total int +// Found int +// } +func (o *querySet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) { + panic(ErrNotImplement) +} + +// create new QuerySeter. +func newQuerySet(orm *orm, mi *modelInfo) QuerySeter { + o := new(querySet) + o.mi = mi + o.orm = orm + return o +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_raw.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_raw.go new file mode 100644 index 000000000..cbb18064f --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/orm_raw.go @@ -0,0 +1,827 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "fmt" + "reflect" + "time" +) + +// raw sql string prepared statement +type rawPrepare struct { + rs *rawSet + stmt stmtQuerier + closed bool +} + +func (o *rawPrepare) Exec(args ...interface{}) (sql.Result, error) { + if o.closed { + return nil, ErrStmtClosed + } + return o.stmt.Exec(args...) +} + +func (o *rawPrepare) Close() error { + o.closed = true + return o.stmt.Close() +} + +func newRawPreparer(rs *rawSet) (RawPreparer, error) { + o := new(rawPrepare) + o.rs = rs + + query := rs.query + rs.orm.alias.DbBaser.ReplaceMarks(&query) + + st, err := rs.orm.db.Prepare(query) + if err != nil { + return nil, err + } + if Debug { + o.stmt = newStmtQueryLog(rs.orm.alias, st, query) + } else { + o.stmt = st + } + return o, nil +} + +// raw query seter +type rawSet struct { + query string + args []interface{} + orm *orm +} + +var _ RawSeter = new(rawSet) + +// set args for every query +func (o rawSet) SetArgs(args ...interface{}) RawSeter { + o.args = args + return &o +} + +// execute raw sql and return sql.Result +func (o *rawSet) Exec() (sql.Result, error) { + query := o.query + o.orm.alias.DbBaser.ReplaceMarks(&query) + + args := getFlatParams(nil, o.args, o.orm.alias.TZ) + return o.orm.db.Exec(query, args...) +} + +// set field value to row container +func (o *rawSet) setFieldValue(ind reflect.Value, value interface{}) { + switch ind.Kind() { + case reflect.Bool: + if value == nil { + ind.SetBool(false) + } else if v, ok := value.(bool); ok { + ind.SetBool(v) + } else { + v, _ := StrTo(ToStr(value)).Bool() + ind.SetBool(v) + } + + case reflect.String: + if value == nil { + ind.SetString("") + } else { + ind.SetString(ToStr(value)) + } + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if value == nil { + ind.SetInt(0) + } else { + val := reflect.ValueOf(value) + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ind.SetInt(val.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ind.SetInt(int64(val.Uint())) + default: + v, _ := StrTo(ToStr(value)).Int64() + ind.SetInt(v) + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + if value == nil { + ind.SetUint(0) + } else { + val := reflect.ValueOf(value) + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ind.SetUint(uint64(val.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ind.SetUint(val.Uint()) + default: + v, _ := StrTo(ToStr(value)).Uint64() + ind.SetUint(v) + } + } + case reflect.Float64, reflect.Float32: + if value == nil { + ind.SetFloat(0) + } else { + val := reflect.ValueOf(value) + switch val.Kind() { + case reflect.Float64: + ind.SetFloat(val.Float()) + default: + v, _ := StrTo(ToStr(value)).Float64() + ind.SetFloat(v) + } + } + + case reflect.Struct: + if value == nil { + ind.Set(reflect.Zero(ind.Type())) + + } else if _, ok := ind.Interface().(time.Time); ok { + var str string + switch d := value.(type) { + case time.Time: + o.orm.alias.DbBaser.TimeFromDB(&d, o.orm.alias.TZ) + ind.Set(reflect.ValueOf(d)) + case []byte: + str = string(d) + case string: + str = d + } + if str != "" { + if len(str) >= 19 { + str = str[:19] + t, err := time.ParseInLocation(formatDateTime, str, o.orm.alias.TZ) + if err == nil { + t = t.In(DefaultTimeLoc) + ind.Set(reflect.ValueOf(t)) + } + } else if len(str) >= 10 { + str = str[:10] + t, err := time.ParseInLocation(formatDate, str, DefaultTimeLoc) + if err == nil { + ind.Set(reflect.ValueOf(t)) + } + } + } + } + } +} + +// set field value in loop for slice container +func (o *rawSet) loopSetRefs(refs []interface{}, sInds []reflect.Value, nIndsPtr *[]reflect.Value, eTyps []reflect.Type, init bool) { + nInds := *nIndsPtr + + cur := 0 + for i := 0; i < len(sInds); i++ { + sInd := sInds[i] + eTyp := eTyps[i] + + typ := eTyp + isPtr := false + if typ.Kind() == reflect.Ptr { + isPtr = true + typ = typ.Elem() + } + if typ.Kind() == reflect.Ptr { + isPtr = true + typ = typ.Elem() + } + + var nInd reflect.Value + if init { + nInd = reflect.New(sInd.Type()).Elem() + } else { + nInd = nInds[i] + } + + val := reflect.New(typ) + ind := val.Elem() + + tpName := ind.Type().String() + + if ind.Kind() == reflect.Struct { + if tpName == "time.Time" { + value := reflect.ValueOf(refs[cur]).Elem().Interface() + if isPtr && value == nil { + val = reflect.New(val.Type()).Elem() + } else { + o.setFieldValue(ind, value) + } + cur++ + } + + } else { + value := reflect.ValueOf(refs[cur]).Elem().Interface() + if isPtr && value == nil { + val = reflect.New(val.Type()).Elem() + } else { + o.setFieldValue(ind, value) + } + cur++ + } + + if nInd.Kind() == reflect.Slice { + if isPtr { + nInd = reflect.Append(nInd, val) + } else { + nInd = reflect.Append(nInd, ind) + } + } else { + if isPtr { + nInd.Set(val) + } else { + nInd.Set(ind) + } + } + + nInds[i] = nInd + } +} + +// query data and map to container +func (o *rawSet) QueryRow(containers ...interface{}) error { + var ( + refs = make([]interface{}, 0, len(containers)) + sInds []reflect.Value + eTyps []reflect.Type + sMi *modelInfo + ) + structMode := false + for _, container := range containers { + val := reflect.ValueOf(container) + ind := reflect.Indirect(val) + + if val.Kind() != reflect.Ptr { + panic(fmt.Errorf(" all args must be use ptr")) + } + + etyp := ind.Type() + typ := etyp + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + sInds = append(sInds, ind) + eTyps = append(eTyps, etyp) + + if typ.Kind() == reflect.Struct && typ.String() != "time.Time" { + if len(containers) > 1 { + panic(fmt.Errorf(" now support one struct only. see #384")) + } + + structMode = true + fn := getFullName(typ) + if mi, ok := modelCache.getByFN(fn); ok { + sMi = mi + } + } else { + var ref interface{} + refs = append(refs, &ref) + } + } + + query := o.query + o.orm.alias.DbBaser.ReplaceMarks(&query) + + args := getFlatParams(nil, o.args, o.orm.alias.TZ) + rows, err := o.orm.db.Query(query, args...) + if err != nil { + if err == sql.ErrNoRows { + return ErrNoRows + } + return err + } + + defer rows.Close() + + if rows.Next() { + if structMode { + columns, err := rows.Columns() + if err != nil { + return err + } + + columnsMp := make(map[string]interface{}, len(columns)) + + refs = make([]interface{}, 0, len(columns)) + for _, col := range columns { + var ref interface{} + columnsMp[col] = &ref + refs = append(refs, &ref) + } + + if err := rows.Scan(refs...); err != nil { + return err + } + + ind := sInds[0] + + if ind.Kind() == reflect.Ptr { + if ind.IsNil() || !ind.IsValid() { + ind.Set(reflect.New(eTyps[0].Elem())) + } + ind = ind.Elem() + } + + if sMi != nil { + for _, col := range columns { + if fi := sMi.fields.GetByColumn(col); fi != nil { + value := reflect.ValueOf(columnsMp[col]).Elem().Interface() + o.setFieldValue(ind.FieldByIndex([]int{fi.fieldIndex}), value) + } + } + } else { + for i := 0; i < ind.NumField(); i++ { + f := ind.Field(i) + fe := ind.Type().Field(i) + + var attrs map[string]bool + var tags map[string]string + parseStructTag(fe.Tag.Get("orm"), &attrs, &tags) + var col string + if col = tags["column"]; len(col) == 0 { + col = snakeString(fe.Name) + } + if v, ok := columnsMp[col]; ok { + value := reflect.ValueOf(v).Elem().Interface() + o.setFieldValue(f, value) + } + } + } + + } else { + if err := rows.Scan(refs...); err != nil { + return err + } + + nInds := make([]reflect.Value, len(sInds)) + o.loopSetRefs(refs, sInds, &nInds, eTyps, true) + for i, sInd := range sInds { + nInd := nInds[i] + sInd.Set(nInd) + } + } + + } else { + return ErrNoRows + } + + return nil +} + +// query data rows and map to container +func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) { + var ( + refs = make([]interface{}, 0, len(containers)) + sInds []reflect.Value + eTyps []reflect.Type + sMi *modelInfo + ) + structMode := false + for _, container := range containers { + val := reflect.ValueOf(container) + sInd := reflect.Indirect(val) + if val.Kind() != reflect.Ptr || sInd.Kind() != reflect.Slice { + panic(fmt.Errorf(" all args must be use ptr slice")) + } + + etyp := sInd.Type().Elem() + typ := etyp + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + sInds = append(sInds, sInd) + eTyps = append(eTyps, etyp) + + if typ.Kind() == reflect.Struct && typ.String() != "time.Time" { + if len(containers) > 1 { + panic(fmt.Errorf(" now support one struct only. see #384")) + } + + structMode = true + fn := getFullName(typ) + if mi, ok := modelCache.getByFN(fn); ok { + sMi = mi + } + } else { + var ref interface{} + refs = append(refs, &ref) + } + } + + query := o.query + o.orm.alias.DbBaser.ReplaceMarks(&query) + + args := getFlatParams(nil, o.args, o.orm.alias.TZ) + rows, err := o.orm.db.Query(query, args...) + if err != nil { + return 0, err + } + + defer rows.Close() + + var cnt int64 + nInds := make([]reflect.Value, len(sInds)) + sInd := sInds[0] + + for rows.Next() { + + if structMode { + columns, err := rows.Columns() + if err != nil { + return 0, err + } + + columnsMp := make(map[string]interface{}, len(columns)) + + refs = make([]interface{}, 0, len(columns)) + for _, col := range columns { + var ref interface{} + columnsMp[col] = &ref + refs = append(refs, &ref) + } + + if err := rows.Scan(refs...); err != nil { + return 0, err + } + + if cnt == 0 && !sInd.IsNil() { + sInd.Set(reflect.New(sInd.Type()).Elem()) + } + + var ind reflect.Value + if eTyps[0].Kind() == reflect.Ptr { + ind = reflect.New(eTyps[0].Elem()) + } else { + ind = reflect.New(eTyps[0]) + } + + if ind.Kind() == reflect.Ptr { + ind = ind.Elem() + } + + if sMi != nil { + for _, col := range columns { + if fi := sMi.fields.GetByColumn(col); fi != nil { + value := reflect.ValueOf(columnsMp[col]).Elem().Interface() + o.setFieldValue(ind.FieldByIndex([]int{fi.fieldIndex}), value) + } + } + } else { + for i := 0; i < ind.NumField(); i++ { + f := ind.Field(i) + fe := ind.Type().Field(i) + + var attrs map[string]bool + var tags map[string]string + parseStructTag(fe.Tag.Get("orm"), &attrs, &tags) + var col string + if col = tags["column"]; len(col) == 0 { + col = snakeString(fe.Name) + } + if v, ok := columnsMp[col]; ok { + value := reflect.ValueOf(v).Elem().Interface() + o.setFieldValue(f, value) + } + } + } + + if eTyps[0].Kind() == reflect.Ptr { + ind = ind.Addr() + } + + sInd = reflect.Append(sInd, ind) + + } else { + if err := rows.Scan(refs...); err != nil { + return 0, err + } + + o.loopSetRefs(refs, sInds, &nInds, eTyps, cnt == 0) + } + + cnt++ + } + + if cnt > 0 { + + if structMode { + sInds[0].Set(sInd) + } else { + for i, sInd := range sInds { + nInd := nInds[i] + sInd.Set(nInd) + } + } + } + + return cnt, nil +} + +func (o *rawSet) readValues(container interface{}, needCols []string) (int64, error) { + var ( + maps []Params + lists []ParamsList + list ParamsList + ) + + typ := 0 + switch container.(type) { + case *[]Params: + typ = 1 + case *[]ParamsList: + typ = 2 + case *ParamsList: + typ = 3 + default: + panic(fmt.Errorf(" unsupport read values type `%T`", container)) + } + + query := o.query + o.orm.alias.DbBaser.ReplaceMarks(&query) + + args := getFlatParams(nil, o.args, o.orm.alias.TZ) + + var rs *sql.Rows + rs, err := o.orm.db.Query(query, args...) + if err != nil { + return 0, err + } + + defer rs.Close() + + var ( + refs []interface{} + cnt int64 + cols []string + indexs []int + ) + + for rs.Next() { + if cnt == 0 { + columns, err := rs.Columns() + if err != nil { + return 0, err + } + if len(needCols) > 0 { + indexs = make([]int, 0, len(needCols)) + } else { + indexs = make([]int, 0, len(columns)) + } + + cols = columns + refs = make([]interface{}, len(cols)) + for i := range refs { + var ref sql.NullString + refs[i] = &ref + + if len(needCols) > 0 { + for _, c := range needCols { + if c == cols[i] { + indexs = append(indexs, i) + } + } + } else { + indexs = append(indexs, i) + } + } + } + + if err := rs.Scan(refs...); err != nil { + return 0, err + } + + switch typ { + case 1: + params := make(Params, len(cols)) + for _, i := range indexs { + ref := refs[i] + value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString) + if value.Valid { + params[cols[i]] = value.String + } else { + params[cols[i]] = nil + } + } + maps = append(maps, params) + case 2: + params := make(ParamsList, 0, len(cols)) + for _, i := range indexs { + ref := refs[i] + value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString) + if value.Valid { + params = append(params, value.String) + } else { + params = append(params, nil) + } + } + lists = append(lists, params) + case 3: + for _, i := range indexs { + ref := refs[i] + value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString) + if value.Valid { + list = append(list, value.String) + } else { + list = append(list, nil) + } + } + } + + cnt++ + } + + switch v := container.(type) { + case *[]Params: + *v = maps + case *[]ParamsList: + *v = lists + case *ParamsList: + *v = list + } + + return cnt, nil +} + +func (o *rawSet) queryRowsTo(container interface{}, keyCol, valueCol string) (int64, error) { + var ( + maps Params + ind *reflect.Value + ) + + typ := 0 + switch container.(type) { + case *Params: + typ = 1 + default: + typ = 2 + vl := reflect.ValueOf(container) + id := reflect.Indirect(vl) + if vl.Kind() != reflect.Ptr || id.Kind() != reflect.Struct { + panic(fmt.Errorf(" RowsTo unsupport type `%T` need ptr struct", container)) + } + + ind = &id + } + + query := o.query + o.orm.alias.DbBaser.ReplaceMarks(&query) + + args := getFlatParams(nil, o.args, o.orm.alias.TZ) + + rs, err := o.orm.db.Query(query, args...) + if err != nil { + return 0, err + } + + defer rs.Close() + + var ( + refs []interface{} + cnt int64 + cols []string + ) + + var ( + keyIndex = -1 + valueIndex = -1 + ) + + for rs.Next() { + if cnt == 0 { + columns, err := rs.Columns() + if err != nil { + return 0, err + } + cols = columns + refs = make([]interface{}, len(cols)) + for i := range refs { + if keyCol == cols[i] { + keyIndex = i + } + if typ == 1 || keyIndex == i { + var ref sql.NullString + refs[i] = &ref + } else { + var ref interface{} + refs[i] = &ref + } + if valueCol == cols[i] { + valueIndex = i + } + } + if keyIndex == -1 || valueIndex == -1 { + panic(fmt.Errorf(" RowsTo unknown key, value column name `%s: %s`", keyCol, valueCol)) + } + } + + if err := rs.Scan(refs...); err != nil { + return 0, err + } + + if cnt == 0 { + switch typ { + case 1: + maps = make(Params) + } + } + + key := reflect.Indirect(reflect.ValueOf(refs[keyIndex])).Interface().(sql.NullString).String + + switch typ { + case 1: + value := reflect.Indirect(reflect.ValueOf(refs[valueIndex])).Interface().(sql.NullString) + if value.Valid { + maps[key] = value.String + } else { + maps[key] = nil + } + + default: + if id := ind.FieldByName(camelString(key)); id.IsValid() { + o.setFieldValue(id, reflect.ValueOf(refs[valueIndex]).Elem().Interface()) + } + } + + cnt++ + } + + if typ == 1 { + v, _ := container.(*Params) + *v = maps + } + + return cnt, nil +} + +// query data to []map[string]interface +func (o *rawSet) Values(container *[]Params, cols ...string) (int64, error) { + return o.readValues(container, cols) +} + +// query data to [][]interface +func (o *rawSet) ValuesList(container *[]ParamsList, cols ...string) (int64, error) { + return o.readValues(container, cols) +} + +// query data to []interface +func (o *rawSet) ValuesFlat(container *ParamsList, cols ...string) (int64, error) { + return o.readValues(container, cols) +} + +// query all rows into map[string]interface with specify key and value column name. +// keyCol = "name", valueCol = "value" +// table data +// name | value +// total | 100 +// found | 200 +// to map[string]interface{}{ +// "total": 100, +// "found": 200, +// } +func (o *rawSet) RowsToMap(result *Params, keyCol, valueCol string) (int64, error) { + return o.queryRowsTo(result, keyCol, valueCol) +} + +// query all rows into struct with specify key and value column name. +// keyCol = "name", valueCol = "value" +// table data +// name | value +// total | 100 +// found | 200 +// to struct { +// Total int +// Found int +// } +func (o *rawSet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) { + return o.queryRowsTo(ptrStruct, keyCol, valueCol) +} + +// return prepared raw statement for used in times. +func (o *rawSet) Prepare() (RawPreparer, error) { + return newRawPreparer(o) +} + +func newRawSet(orm *orm, query string, args []interface{}) RawSeter { + o := new(rawSet) + o.query = query + o.args = args + o.orm = orm + return o +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb.go new file mode 100644 index 000000000..9f778916d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb.go @@ -0,0 +1,61 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import "errors" + +// QueryBuilder is the Query builder interface +type QueryBuilder interface { + Select(fields ...string) QueryBuilder + From(tables ...string) QueryBuilder + InnerJoin(table string) QueryBuilder + LeftJoin(table string) QueryBuilder + RightJoin(table string) QueryBuilder + On(cond string) QueryBuilder + Where(cond string) QueryBuilder + And(cond string) QueryBuilder + Or(cond string) QueryBuilder + In(vals ...string) QueryBuilder + OrderBy(fields ...string) QueryBuilder + Asc() QueryBuilder + Desc() QueryBuilder + Limit(limit int) QueryBuilder + Offset(offset int) QueryBuilder + GroupBy(fields ...string) QueryBuilder + Having(cond string) QueryBuilder + Update(tables ...string) QueryBuilder + Set(kv ...string) QueryBuilder + Delete(tables ...string) QueryBuilder + InsertInto(table string, fields ...string) QueryBuilder + Values(vals ...string) QueryBuilder + Subquery(sub string, alias string) string + String() string +} + +// NewQueryBuilder return the QueryBuilder +func NewQueryBuilder(driver string) (qb QueryBuilder, err error) { + if driver == "mysql" { + qb = new(MySQLQueryBuilder) + } else if driver == "tidb" { + qb = new(TiDBQueryBuilder) + } else if driver == "postgres" { + err = errors.New("postgres query builder is not supported yet") + } else if driver == "sqlite" { + err = errors.New("sqlite query builder is not supported yet") + } else { + err = errors.New("unknown driver for query builder") + } + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_mysql.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_mysql.go new file mode 100644 index 000000000..886bc50e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_mysql.go @@ -0,0 +1,179 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "strconv" + "strings" +) + +// CommaSpace is the separation +const CommaSpace = ", " + +// MySQLQueryBuilder is the SQL build +type MySQLQueryBuilder struct { + Tokens []string +} + +// Select will join the fields +func (qb *MySQLQueryBuilder) Select(fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "SELECT", strings.Join(fields, CommaSpace)) + return qb +} + +// From join the tables +func (qb *MySQLQueryBuilder) From(tables ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "FROM", strings.Join(tables, CommaSpace)) + return qb +} + +// InnerJoin INNER JOIN the table +func (qb *MySQLQueryBuilder) InnerJoin(table string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "INNER JOIN", table) + return qb +} + +// LeftJoin LEFT JOIN the table +func (qb *MySQLQueryBuilder) LeftJoin(table string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "LEFT JOIN", table) + return qb +} + +// RightJoin RIGHT JOIN the table +func (qb *MySQLQueryBuilder) RightJoin(table string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "RIGHT JOIN", table) + return qb +} + +// On join with on cond +func (qb *MySQLQueryBuilder) On(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "ON", cond) + return qb +} + +// Where join the Where cond +func (qb *MySQLQueryBuilder) Where(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "WHERE", cond) + return qb +} + +// And join the and cond +func (qb *MySQLQueryBuilder) And(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "AND", cond) + return qb +} + +// Or join the or cond +func (qb *MySQLQueryBuilder) Or(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "OR", cond) + return qb +} + +// In join the IN (vals) +func (qb *MySQLQueryBuilder) In(vals ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "IN", "(", strings.Join(vals, CommaSpace), ")") + return qb +} + +// OrderBy join the Order by fields +func (qb *MySQLQueryBuilder) OrderBy(fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "ORDER BY", strings.Join(fields, CommaSpace)) + return qb +} + +// Asc join the asc +func (qb *MySQLQueryBuilder) Asc() QueryBuilder { + qb.Tokens = append(qb.Tokens, "ASC") + return qb +} + +// Desc join the desc +func (qb *MySQLQueryBuilder) Desc() QueryBuilder { + qb.Tokens = append(qb.Tokens, "DESC") + return qb +} + +// Limit join the limit num +func (qb *MySQLQueryBuilder) Limit(limit int) QueryBuilder { + qb.Tokens = append(qb.Tokens, "LIMIT", strconv.Itoa(limit)) + return qb +} + +// Offset join the offset num +func (qb *MySQLQueryBuilder) Offset(offset int) QueryBuilder { + qb.Tokens = append(qb.Tokens, "OFFSET", strconv.Itoa(offset)) + return qb +} + +// GroupBy join the Group by fields +func (qb *MySQLQueryBuilder) GroupBy(fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "GROUP BY", strings.Join(fields, CommaSpace)) + return qb +} + +// Having join the Having cond +func (qb *MySQLQueryBuilder) Having(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "HAVING", cond) + return qb +} + +// Update join the update table +func (qb *MySQLQueryBuilder) Update(tables ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "UPDATE", strings.Join(tables, CommaSpace)) + return qb +} + +// Set join the set kv +func (qb *MySQLQueryBuilder) Set(kv ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "SET", strings.Join(kv, CommaSpace)) + return qb +} + +// Delete join the Delete tables +func (qb *MySQLQueryBuilder) Delete(tables ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "DELETE") + if len(tables) != 0 { + qb.Tokens = append(qb.Tokens, strings.Join(tables, CommaSpace)) + } + return qb +} + +// InsertInto join the insert SQL +func (qb *MySQLQueryBuilder) InsertInto(table string, fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "INSERT INTO", table) + if len(fields) != 0 { + fieldsStr := strings.Join(fields, CommaSpace) + qb.Tokens = append(qb.Tokens, "(", fieldsStr, ")") + } + return qb +} + +// Values join the Values(vals) +func (qb *MySQLQueryBuilder) Values(vals ...string) QueryBuilder { + valsStr := strings.Join(vals, CommaSpace) + qb.Tokens = append(qb.Tokens, "VALUES", "(", valsStr, ")") + return qb +} + +// Subquery join the sub as alias +func (qb *MySQLQueryBuilder) Subquery(sub string, alias string) string { + return fmt.Sprintf("(%s) AS %s", sub, alias) +} + +// String join all Tokens +func (qb *MySQLQueryBuilder) String() string { + return strings.Join(qb.Tokens, " ") +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_tidb.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_tidb.go new file mode 100644 index 000000000..c504049eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/qb_tidb.go @@ -0,0 +1,176 @@ +// Copyright 2015 TiDB Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "strconv" + "strings" +) + +// TiDBQueryBuilder is the SQL build +type TiDBQueryBuilder struct { + Tokens []string +} + +// Select will join the fields +func (qb *TiDBQueryBuilder) Select(fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "SELECT", strings.Join(fields, CommaSpace)) + return qb +} + +// From join the tables +func (qb *TiDBQueryBuilder) From(tables ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "FROM", strings.Join(tables, CommaSpace)) + return qb +} + +// InnerJoin INNER JOIN the table +func (qb *TiDBQueryBuilder) InnerJoin(table string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "INNER JOIN", table) + return qb +} + +// LeftJoin LEFT JOIN the table +func (qb *TiDBQueryBuilder) LeftJoin(table string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "LEFT JOIN", table) + return qb +} + +// RightJoin RIGHT JOIN the table +func (qb *TiDBQueryBuilder) RightJoin(table string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "RIGHT JOIN", table) + return qb +} + +// On join with on cond +func (qb *TiDBQueryBuilder) On(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "ON", cond) + return qb +} + +// Where join the Where cond +func (qb *TiDBQueryBuilder) Where(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "WHERE", cond) + return qb +} + +// And join the and cond +func (qb *TiDBQueryBuilder) And(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "AND", cond) + return qb +} + +// Or join the or cond +func (qb *TiDBQueryBuilder) Or(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "OR", cond) + return qb +} + +// In join the IN (vals) +func (qb *TiDBQueryBuilder) In(vals ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "IN", "(", strings.Join(vals, CommaSpace), ")") + return qb +} + +// OrderBy join the Order by fields +func (qb *TiDBQueryBuilder) OrderBy(fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "ORDER BY", strings.Join(fields, CommaSpace)) + return qb +} + +// Asc join the asc +func (qb *TiDBQueryBuilder) Asc() QueryBuilder { + qb.Tokens = append(qb.Tokens, "ASC") + return qb +} + +// Desc join the desc +func (qb *TiDBQueryBuilder) Desc() QueryBuilder { + qb.Tokens = append(qb.Tokens, "DESC") + return qb +} + +// Limit join the limit num +func (qb *TiDBQueryBuilder) Limit(limit int) QueryBuilder { + qb.Tokens = append(qb.Tokens, "LIMIT", strconv.Itoa(limit)) + return qb +} + +// Offset join the offset num +func (qb *TiDBQueryBuilder) Offset(offset int) QueryBuilder { + qb.Tokens = append(qb.Tokens, "OFFSET", strconv.Itoa(offset)) + return qb +} + +// GroupBy join the Group by fields +func (qb *TiDBQueryBuilder) GroupBy(fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "GROUP BY", strings.Join(fields, CommaSpace)) + return qb +} + +// Having join the Having cond +func (qb *TiDBQueryBuilder) Having(cond string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "HAVING", cond) + return qb +} + +// Update join the update table +func (qb *TiDBQueryBuilder) Update(tables ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "UPDATE", strings.Join(tables, CommaSpace)) + return qb +} + +// Set join the set kv +func (qb *TiDBQueryBuilder) Set(kv ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "SET", strings.Join(kv, CommaSpace)) + return qb +} + +// Delete join the Delete tables +func (qb *TiDBQueryBuilder) Delete(tables ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "DELETE") + if len(tables) != 0 { + qb.Tokens = append(qb.Tokens, strings.Join(tables, CommaSpace)) + } + return qb +} + +// InsertInto join the insert SQL +func (qb *TiDBQueryBuilder) InsertInto(table string, fields ...string) QueryBuilder { + qb.Tokens = append(qb.Tokens, "INSERT INTO", table) + if len(fields) != 0 { + fieldsStr := strings.Join(fields, CommaSpace) + qb.Tokens = append(qb.Tokens, "(", fieldsStr, ")") + } + return qb +} + +// Values join the Values(vals) +func (qb *TiDBQueryBuilder) Values(vals ...string) QueryBuilder { + valsStr := strings.Join(vals, CommaSpace) + qb.Tokens = append(qb.Tokens, "VALUES", "(", valsStr, ")") + return qb +} + +// Subquery join the sub as alias +func (qb *TiDBQueryBuilder) Subquery(sub string, alias string) string { + return fmt.Sprintf("(%s) AS %s", sub, alias) +} + +// String join all Tokens +func (qb *TiDBQueryBuilder) String() string { + return strings.Join(qb.Tokens, " ") +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/types.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/types.go new file mode 100644 index 000000000..5fac5fed8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/types.go @@ -0,0 +1,413 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "database/sql" + "reflect" + "time" +) + +// Driver define database driver +type Driver interface { + Name() string + Type() DriverType +} + +// Fielder define field info +type Fielder interface { + String() string + FieldType() int + SetRaw(interface{}) error + RawValue() interface{} +} + +// Ormer define the orm interface +type Ormer interface { + // read data to model + // for example: + // this will find User by Id field + // u = &User{Id: user.Id} + // err = Ormer.Read(u) + // this will find User by UserName field + // u = &User{UserName: "astaxie", Password: "pass"} + // err = Ormer.Read(u, "UserName") + Read(md interface{}, cols ...string) error + // Try to read a row from the database, or insert one if it doesn't exist + ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error) + // insert model data to database + // for example: + // user := new(User) + // id, err = Ormer.Insert(user) + // user must a pointer and Insert will set user's pk field + Insert(interface{}) (int64, error) + // insert some models to database + InsertMulti(bulk int, mds interface{}) (int64, error) + // update model to database. + // cols set the columns those want to update. + // find model by Id(pk) field and update columns specified by fields, if cols is null then update all columns + // for example: + // user := User{Id: 2} + // user.Langs = append(user.Langs, "zh-CN", "en-US") + // user.Extra.Name = "beego" + // user.Extra.Data = "orm" + // num, err = Ormer.Update(&user, "Langs", "Extra") + Update(md interface{}, cols ...string) (int64, error) + // delete model in database + Delete(md interface{}) (int64, error) + // load related models to md model. + // args are limit, offset int and order string. + // + // example: + // Ormer.LoadRelated(post,"Tags") + // for _,tag := range post.Tags{...} + //args[0] bool true useDefaultRelsDepth ; false depth 0 + //args[0] int loadRelationDepth + //args[1] int limit default limit 1000 + //args[2] int offset default offset 0 + //args[3] string order for example : "-Id" + // make sure the relation is defined in model struct tags. + LoadRelated(md interface{}, name string, args ...interface{}) (int64, error) + // create a models to models queryer + // for example: + // post := Post{Id: 4} + // m2m := Ormer.QueryM2M(&post, "Tags") + QueryM2M(md interface{}, name string) QueryM2Mer + // return a QuerySeter for table operations. + // table name can be string or struct. + // e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)), + QueryTable(ptrStructOrTableName interface{}) QuerySeter + // switch to another registered database driver by given name. + Using(name string) error + // begin transaction + // for example: + // o := NewOrm() + // err := o.Begin() + // ... + // err = o.Rollback() + Begin() error + // commit transaction + Commit() error + // rollback transaction + Rollback() error + // return a raw query seter for raw sql string. + // for example: + // ormer.Raw("UPDATE `user` SET `user_name` = ? WHERE `user_name` = ?", "slene", "testing").Exec() + // // update user testing's name to slene + Raw(query string, args ...interface{}) RawSeter + Driver() Driver +} + +// Inserter insert prepared statement +type Inserter interface { + Insert(interface{}) (int64, error) + Close() error +} + +// QuerySeter query seter +type QuerySeter interface { + // add condition expression to QuerySeter. + // for example: + // filter by UserName == 'slene' + // qs.Filter("UserName", "slene") + // sql : left outer join profile on t0.id1==t1.id2 where t1.age == 28 + // Filter("profile__Age", 28) + // // time compare + // qs.Filter("created", time.Now()) + Filter(string, ...interface{}) QuerySeter + // add NOT condition to querySeter. + // have the same usage as Filter + Exclude(string, ...interface{}) QuerySeter + // set condition to QuerySeter. + // sql's where condition + // cond := orm.NewCondition() + // cond1 := cond.And("profile__isnull", false).AndNot("status__in", 1).Or("profile__age__gt", 2000) + // //sql-> WHERE T0.`profile_id` IS NOT NULL AND NOT T0.`Status` IN (?) OR T1.`age` > 2000 + // num, err := qs.SetCond(cond1).Count() + SetCond(*Condition) QuerySeter + // add LIMIT value. + // args[0] means offset, e.g. LIMIT num,offset. + // if Limit <= 0 then Limit will be set to default limit ,eg 1000 + // if QuerySeter doesn't call Limit, the sql's Limit will be set to default limit, eg 1000 + // for example: + // qs.Limit(10, 2) + // // sql-> limit 10 offset 2 + Limit(limit interface{}, args ...interface{}) QuerySeter + // add OFFSET value + // same as Limit function's args[0] + Offset(offset interface{}) QuerySeter + // add ORDER expression. + // "column" means ASC, "-column" means DESC. + // for example: + // qs.OrderBy("-status") + OrderBy(exprs ...string) QuerySeter + // set relation model to query together. + // it will query relation models and assign to parent model. + // for example: + // // will load all related fields use left join . + // qs.RelatedSel().One(&user) + // // will load related field only profile + // qs.RelatedSel("profile").One(&user) + // user.Profile.Age = 32 + RelatedSel(params ...interface{}) QuerySeter + // return QuerySeter execution result number + // for example: + // num, err = qs.Filter("profile__age__gt", 28).Count() + Count() (int64, error) + // check result empty or not after QuerySeter executed + // the same as QuerySeter.Count > 0 + Exist() bool + // execute update with parameters + // for example: + // num, err = qs.Filter("user_name", "slene").Update(Params{ + // "Nums": ColValue(Col_Minus, 50), + // }) // user slene's Nums will minus 50 + // num, err = qs.Filter("UserName", "slene").Update(Params{ + // "user_name": "slene2" + // }) // user slene's name will change to slene2 + Update(values Params) (int64, error) + // delete from table + //for example: + // num ,err = qs.Filter("user_name__in", "testing1", "testing2").Delete() + // //delete two user who's name is testing1 or testing2 + Delete() (int64, error) + // return a insert queryer. + // it can be used in times. + // example: + // i,err := sq.PrepareInsert() + // num, err = i.Insert(&user1) // user table will add one record user1 at once + // num, err = i.Insert(&user2) // user table will add one record user2 at once + // err = i.Close() //don't forget call Close + PrepareInsert() (Inserter, error) + // query all data and map to containers. + // cols means the columns when querying. + // for example: + // var users []*User + // qs.All(&users) // users[0],users[1],users[2] ... + All(container interface{}, cols ...string) (int64, error) + // query one row data and map to containers. + // cols means the columns when querying. + // for example: + // var user User + // qs.One(&user) //user.UserName == "slene" + One(container interface{}, cols ...string) error + // query all data and map to []map[string]interface. + // expres means condition expression. + // it converts data to []map[column]value. + // for example: + // var maps []Params + // qs.Values(&maps) //maps[0]["UserName"]=="slene" + Values(results *[]Params, exprs ...string) (int64, error) + // query all data and map to [][]interface + // it converts data to [][column_index]value + // for example: + // var list []ParamsList + // qs.ValuesList(&list) // list[0][1] == "slene" + ValuesList(results *[]ParamsList, exprs ...string) (int64, error) + // query all data and map to []interface. + // it's designed for one column record set, auto change to []value, not [][column]value. + // for example: + // var list ParamsList + // qs.ValuesFlat(&list, "UserName") // list[0] == "slene" + ValuesFlat(result *ParamsList, expr string) (int64, error) + // query all rows into map[string]interface with specify key and value column name. + // keyCol = "name", valueCol = "value" + // table data + // name | value + // total | 100 + // found | 200 + // to map[string]interface{}{ + // "total": 100, + // "found": 200, + // } + RowsToMap(result *Params, keyCol, valueCol string) (int64, error) + // query all rows into struct with specify key and value column name. + // keyCol = "name", valueCol = "value" + // table data + // name | value + // total | 100 + // found | 200 + // to struct { + // Total int + // Found int + // } + RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) +} + +// QueryM2Mer model to model query struct +// all operations are on the m2m table only, will not affect the origin model table +type QueryM2Mer interface { + // add models to origin models when creating queryM2M. + // example: + // m2m := orm.QueryM2M(post,"Tag") + // m2m.Add(&Tag1{},&Tag2{}) + // for _,tag := range post.Tags{}{ ... } + // param could also be any of the follow + // []*Tag{{Id:3,Name: "TestTag1"}, {Id:4,Name: "TestTag2"}} + // &Tag{Id:5,Name: "TestTag3"} + // []interface{}{&Tag{Id:6,Name: "TestTag4"}} + // insert one or more rows to m2m table + // make sure the relation is defined in post model struct tag. + Add(...interface{}) (int64, error) + // remove models following the origin model relationship + // only delete rows from m2m table + // for example: + //tag3 := &Tag{Id:5,Name: "TestTag3"} + //num, err = m2m.Remove(tag3) + Remove(...interface{}) (int64, error) + // check model is existed in relationship of origin model + Exist(interface{}) bool + // clean all models in related of origin model + Clear() (int64, error) + // count all related models of origin model + Count() (int64, error) +} + +// RawPreparer raw query statement +type RawPreparer interface { + Exec(...interface{}) (sql.Result, error) + Close() error +} + +// RawSeter raw query seter +// create From Ormer.Raw +// for example: +// sql := fmt.Sprintf("SELECT %sid%s,%sname%s FROM %suser%s WHERE id = ?",Q,Q,Q,Q,Q,Q) +// rs := Ormer.Raw(sql, 1) +type RawSeter interface { + //execute sql and get result + Exec() (sql.Result, error) + //query data and map to container + //for example: + // var name string + // var id int + // rs.QueryRow(&id,&name) // id==2 name=="slene" + QueryRow(containers ...interface{}) error + + // query data rows and map to container + // var ids []int + // var names []int + // query = fmt.Sprintf("SELECT 'id','name' FROM %suser%s", Q, Q) + // num, err = dORM.Raw(query).QueryRows(&ids,&names) // ids=>{1,2},names=>{"nobody","slene"} + QueryRows(containers ...interface{}) (int64, error) + SetArgs(...interface{}) RawSeter + // query data to []map[string]interface + // see QuerySeter's Values + Values(container *[]Params, cols ...string) (int64, error) + // query data to [][]interface + // see QuerySeter's ValuesList + ValuesList(container *[]ParamsList, cols ...string) (int64, error) + // query data to []interface + // see QuerySeter's ValuesFlat + ValuesFlat(container *ParamsList, cols ...string) (int64, error) + // query all rows into map[string]interface with specify key and value column name. + // keyCol = "name", valueCol = "value" + // table data + // name | value + // total | 100 + // found | 200 + // to map[string]interface{}{ + // "total": 100, + // "found": 200, + // } + RowsToMap(result *Params, keyCol, valueCol string) (int64, error) + // query all rows into struct with specify key and value column name. + // keyCol = "name", valueCol = "value" + // table data + // name | value + // total | 100 + // found | 200 + // to struct { + // Total int + // Found int + // } + RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) + + // return prepared raw statement for used in times. + // for example: + // pre, err := dORM.Raw("INSERT INTO tag (name) VALUES (?)").Prepare() + // r, err := pre.Exec("name1") // INSERT INTO tag (name) VALUES (`name1`) + Prepare() (RawPreparer, error) +} + +// stmtQuerier statement querier +type stmtQuerier interface { + Close() error + Exec(args ...interface{}) (sql.Result, error) + Query(args ...interface{}) (*sql.Rows, error) + QueryRow(args ...interface{}) *sql.Row +} + +// db querier +type dbQuerier interface { + Prepare(query string) (*sql.Stmt, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// type DB interface { +// Begin() (*sql.Tx, error) +// Prepare(query string) (stmtQuerier, error) +// Exec(query string, args ...interface{}) (sql.Result, error) +// Query(query string, args ...interface{}) (*sql.Rows, error) +// QueryRow(query string, args ...interface{}) *sql.Row +// } + +// transaction beginner +type txer interface { + Begin() (*sql.Tx, error) +} + +// transaction ending +type txEnder interface { + Commit() error + Rollback() error +} + +// base database struct +type dbBaser interface { + Read(dbQuerier, *modelInfo, reflect.Value, *time.Location, []string) error + Insert(dbQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error) + InsertMulti(dbQuerier, *modelInfo, reflect.Value, int, *time.Location) (int64, error) + InsertValue(dbQuerier, *modelInfo, bool, []string, []interface{}) (int64, error) + InsertStmt(stmtQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error) + Update(dbQuerier, *modelInfo, reflect.Value, *time.Location, []string) (int64, error) + Delete(dbQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error) + ReadBatch(dbQuerier, *querySet, *modelInfo, *Condition, interface{}, *time.Location, []string) (int64, error) + SupportUpdateJoin() bool + UpdateBatch(dbQuerier, *querySet, *modelInfo, *Condition, Params, *time.Location) (int64, error) + DeleteBatch(dbQuerier, *querySet, *modelInfo, *Condition, *time.Location) (int64, error) + Count(dbQuerier, *querySet, *modelInfo, *Condition, *time.Location) (int64, error) + OperatorSQL(string) string + GenerateOperatorSQL(*modelInfo, *fieldInfo, string, []interface{}, *time.Location) (string, []interface{}) + GenerateOperatorLeftCol(*fieldInfo, string, *string) + PrepareInsert(dbQuerier, *modelInfo) (stmtQuerier, string, error) + ReadValues(dbQuerier, *querySet, *modelInfo, *Condition, []string, interface{}, *time.Location) (int64, error) + RowsTo(dbQuerier, *querySet, *modelInfo, *Condition, interface{}, string, string, *time.Location) (int64, error) + MaxLimit() uint64 + TableQuote() string + ReplaceMarks(*string) + HasReturningID(*modelInfo, *string) bool + TimeFromDB(*time.Time, *time.Location) + TimeToDB(*time.Time, *time.Location) + DbTypes() map[string]string + GetTables(dbQuerier) (map[string]bool, error) + GetColumns(dbQuerier, string) (map[string][3]string, error) + ShowTablesQuery() string + ShowColumnsQuery(string) string + IndexExists(dbQuerier, string, string) bool + collectFieldValue(*modelInfo, *fieldInfo, reflect.Value, bool, *time.Location) (interface{}, error) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/orm/utils.go b/Godeps/_workspace/src/github.com/astaxie/beego/orm/utils.go new file mode 100644 index 000000000..99437c7b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/orm/utils.go @@ -0,0 +1,266 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package orm + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// StrTo is the target string +type StrTo string + +// Set string +func (f *StrTo) Set(v string) { + if v != "" { + *f = StrTo(v) + } else { + f.Clear() + } +} + +// Clear string +func (f *StrTo) Clear() { + *f = StrTo(0x1E) +} + +// Exist check string exist +func (f StrTo) Exist() bool { + return string(f) != string(0x1E) +} + +// Bool string to bool +func (f StrTo) Bool() (bool, error) { + return strconv.ParseBool(f.String()) +} + +// Float32 string to float32 +func (f StrTo) Float32() (float32, error) { + v, err := strconv.ParseFloat(f.String(), 32) + return float32(v), err +} + +// Float64 string to float64 +func (f StrTo) Float64() (float64, error) { + return strconv.ParseFloat(f.String(), 64) +} + +// Int string to int +func (f StrTo) Int() (int, error) { + v, err := strconv.ParseInt(f.String(), 10, 32) + return int(v), err +} + +// Int8 string to int8 +func (f StrTo) Int8() (int8, error) { + v, err := strconv.ParseInt(f.String(), 10, 8) + return int8(v), err +} + +// Int16 string to int16 +func (f StrTo) Int16() (int16, error) { + v, err := strconv.ParseInt(f.String(), 10, 16) + return int16(v), err +} + +// Int32 string to int32 +func (f StrTo) Int32() (int32, error) { + v, err := strconv.ParseInt(f.String(), 10, 32) + return int32(v), err +} + +// Int64 string to int64 +func (f StrTo) Int64() (int64, error) { + v, err := strconv.ParseInt(f.String(), 10, 64) + return int64(v), err +} + +// Uint string to uint +func (f StrTo) Uint() (uint, error) { + v, err := strconv.ParseUint(f.String(), 10, 32) + return uint(v), err +} + +// Uint8 string to uint8 +func (f StrTo) Uint8() (uint8, error) { + v, err := strconv.ParseUint(f.String(), 10, 8) + return uint8(v), err +} + +// Uint16 string to uint16 +func (f StrTo) Uint16() (uint16, error) { + v, err := strconv.ParseUint(f.String(), 10, 16) + return uint16(v), err +} + +// Uint32 string to uint31 +func (f StrTo) Uint32() (uint32, error) { + v, err := strconv.ParseUint(f.String(), 10, 32) + return uint32(v), err +} + +// Uint64 string to uint64 +func (f StrTo) Uint64() (uint64, error) { + v, err := strconv.ParseUint(f.String(), 10, 64) + return uint64(v), err +} + +// String string to string +func (f StrTo) String() string { + if f.Exist() { + return string(f) + } + return "" +} + +// ToStr interface to string +func ToStr(value interface{}, args ...int) (s string) { + switch v := value.(type) { + case bool: + s = strconv.FormatBool(v) + case float32: + s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32)) + case float64: + s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64)) + case int: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int8: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int16: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int32: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int64: + s = strconv.FormatInt(v, argInt(args).Get(0, 10)) + case uint: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint8: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint16: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint32: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint64: + s = strconv.FormatUint(v, argInt(args).Get(0, 10)) + case string: + s = v + case []byte: + s = string(v) + default: + s = fmt.Sprintf("%v", v) + } + return s +} + +// ToInt64 interface to int64 +func ToInt64(value interface{}) (d int64) { + val := reflect.ValueOf(value) + switch value.(type) { + case int, int8, int16, int32, int64: + d = val.Int() + case uint, uint8, uint16, uint32, uint64: + d = int64(val.Uint()) + default: + panic(fmt.Errorf("ToInt64 need numeric not `%T`", value)) + } + return +} + +// snake string, XxYy to xx_yy +func snakeString(s string) string { + data := make([]byte, 0, len(s)*2) + j := false + num := len(s) + for i := 0; i < num; i++ { + d := s[i] + if i > 0 && d >= 'A' && d <= 'Z' && j { + data = append(data, '_') + } + if d != '_' { + j = true + } + data = append(data, d) + } + return strings.ToLower(string(data[:])) +} + +// camel string, xx_yy to XxYy +func camelString(s string) string { + data := make([]byte, 0, len(s)) + j := false + k := false + num := len(s) - 1 + for i := 0; i <= num; i++ { + d := s[i] + if k == false && d >= 'A' && d <= 'Z' { + k = true + } + if d >= 'a' && d <= 'z' && (j || k == false) { + d = d - 32 + j = false + k = true + } + if k && d == '_' && num > i && s[i+1] >= 'a' && s[i+1] <= 'z' { + j = true + continue + } + data = append(data, d) + } + return string(data[:]) +} + +type argString []string + +// get string by index from string slice +func (a argString) Get(i int, args ...string) (r string) { + if i >= 0 && i < len(a) { + r = a[i] + } else if len(args) > 0 { + r = args[0] + } + return +} + +type argInt []int + +// get int by index from int slice +func (a argInt) Get(i int, args ...int) (r int) { + if i >= 0 && i < len(a) { + r = a[i] + } + if len(args) > 0 { + r = args[0] + } + return +} + +// parse time to string with location +func timeParse(dateString, format string) (time.Time, error) { + tp, err := time.ParseInLocation(format, dateString, DefaultTimeLoc) + return tp, err +} + +// get pointer indirect type +func indirectType(v reflect.Type) reflect.Type { + switch v.Kind() { + case reflect.Ptr: + return indirectType(v.Elem()) + default: + return v + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/parser.go b/Godeps/_workspace/src/github.com/astaxie/beego/parser.go new file mode 100644 index 000000000..b14d74b9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/parser.go @@ -0,0 +1,231 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "encoding/json" + "errors" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io/ioutil" + "os" + "path" + "sort" + "strings" + + "github.com/astaxie/beego/utils" +) + +var globalRouterTemplate = `package routers + +import ( + "github.com/astaxie/beego" +) + +func init() { +{{.globalinfo}} +} +` + +var ( + lastupdateFilename = "lastupdate.tmp" + commentFilename string + pkgLastupdate map[string]int64 + genInfoList map[string][]ControllerComments +) + +const coomentPrefix = "commentsRouter_" + +func init() { + pkgLastupdate = make(map[string]int64) +} + +func parserPkg(pkgRealpath, pkgpath string) error { + rep := strings.NewReplacer("/", "_", ".", "_") + commentFilename = coomentPrefix + rep.Replace(pkgpath) + ".go" + if !compareFile(pkgRealpath) { + Info(pkgRealpath + " has not changed, not reloading") + return nil + } + genInfoList = make(map[string][]ControllerComments) + fileSet := token.NewFileSet() + astPkgs, err := parser.ParseDir(fileSet, pkgRealpath, func(info os.FileInfo) bool { + name := info.Name() + return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") + }, parser.ParseComments) + + if err != nil { + return err + } + for _, pkg := range astPkgs { + for _, fl := range pkg.Files { + for _, d := range fl.Decls { + switch specDecl := d.(type) { + case *ast.FuncDecl: + if specDecl.Recv != nil { + exp, ok := specDecl.Recv.List[0].Type.(*ast.StarExpr) // Check that the type is correct first beforing throwing to parser + if ok { + parserComments(specDecl.Doc, specDecl.Name.String(), fmt.Sprint(exp.X), pkgpath) + } + } + } + } + } + } + genRouterCode() + savetoFile(pkgRealpath) + return nil +} + +func parserComments(comments *ast.CommentGroup, funcName, controllerName, pkgpath string) error { + if comments != nil && comments.List != nil { + for _, c := range comments.List { + t := strings.TrimSpace(strings.TrimLeft(c.Text, "//")) + if strings.HasPrefix(t, "@router") { + elements := strings.TrimLeft(t, "@router ") + e1 := strings.SplitN(elements, " ", 2) + if len(e1) < 1 { + return errors.New("you should has router infomation") + } + key := pkgpath + ":" + controllerName + cc := ControllerComments{} + cc.Method = funcName + cc.Router = e1[0] + if len(e1) == 2 && e1[1] != "" { + e1 = strings.SplitN(e1[1], " ", 2) + if len(e1) >= 1 { + cc.AllowHTTPMethods = strings.Split(strings.Trim(e1[0], "[]"), ",") + } else { + cc.AllowHTTPMethods = append(cc.AllowHTTPMethods, "get") + } + } else { + cc.AllowHTTPMethods = append(cc.AllowHTTPMethods, "get") + } + if len(e1) == 2 && e1[1] != "" { + keyval := strings.Split(strings.Trim(e1[1], "[]"), " ") + for _, kv := range keyval { + kk := strings.Split(kv, ":") + cc.Params = append(cc.Params, map[string]string{strings.Join(kk[:len(kk)-1], ":"): kk[len(kk)-1]}) + } + } + genInfoList[key] = append(genInfoList[key], cc) + } + } + } + return nil +} + +func genRouterCode() { + os.Mkdir("routers", 0755) + Info("generate router from comments") + var ( + globalinfo string + sortKey []string + ) + for k := range genInfoList { + sortKey = append(sortKey, k) + } + sort.Strings(sortKey) + for _, k := range sortKey { + cList := genInfoList[k] + for _, c := range cList { + allmethod := "nil" + if len(c.AllowHTTPMethods) > 0 { + allmethod = "[]string{" + for _, m := range c.AllowHTTPMethods { + allmethod += `"` + m + `",` + } + allmethod = strings.TrimRight(allmethod, ",") + "}" + } + params := "nil" + if len(c.Params) > 0 { + params = "[]map[string]string{" + for _, p := range c.Params { + for k, v := range p { + params = params + `map[string]string{` + k + `:"` + v + `"},` + } + } + params = strings.TrimRight(params, ",") + "}" + } + globalinfo = globalinfo + ` + beego.GlobalControllerRouter["` + k + `"] = append(beego.GlobalControllerRouter["` + k + `"], + beego.ControllerComments{ + "` + strings.TrimSpace(c.Method) + `", + ` + "`" + c.Router + "`" + `, + ` + allmethod + `, + ` + params + `}) +` + } + } + if globalinfo != "" { + f, err := os.Create(path.Join("routers", commentFilename)) + if err != nil { + panic(err) + } + defer f.Close() + f.WriteString(strings.Replace(globalRouterTemplate, "{{.globalinfo}}", globalinfo, -1)) + } +} + +func compareFile(pkgRealpath string) bool { + if !utils.FileExists(path.Join("routers", commentFilename)) { + return true + } + if utils.FileExists(lastupdateFilename) { + content, err := ioutil.ReadFile(lastupdateFilename) + if err != nil { + return true + } + json.Unmarshal(content, &pkgLastupdate) + lastupdate, err := getpathTime(pkgRealpath) + if err != nil { + return true + } + if v, ok := pkgLastupdate[pkgRealpath]; ok { + if lastupdate <= v { + return false + } + } + } + return true +} + +func savetoFile(pkgRealpath string) { + lastupdate, err := getpathTime(pkgRealpath) + if err != nil { + return + } + pkgLastupdate[pkgRealpath] = lastupdate + d, err := json.Marshal(pkgLastupdate) + if err != nil { + return + } + ioutil.WriteFile(lastupdateFilename, d, os.ModePerm) +} + +func getpathTime(pkgRealpath string) (lastupdate int64, err error) { + fl, err := ioutil.ReadDir(pkgRealpath) + if err != nil { + return lastupdate, err + } + for _, f := range fl { + if lastupdate < f.ModTime().UnixNano() { + lastupdate = f.ModTime().UnixNano() + } + } + return lastupdate, nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/plugins/apiauth/apiauth.go b/Godeps/_workspace/src/github.com/astaxie/beego/plugins/apiauth/apiauth.go new file mode 100644 index 000000000..8af08088d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/plugins/apiauth/apiauth.go @@ -0,0 +1,180 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package apiauth provides handlers to enable apiauth support. +// +// Simple Usage: +// import( +// "github.com/astaxie/beego" +// "github.com/astaxie/beego/plugins/apiauth" +// ) +// +// func main(){ +// // apiauth every request +// beego.InsertFilter("*", beego.BeforeRouter,apiauth.APIBaiscAuth("appid","appkey")) +// beego.Run() +// } +// +// Advanced Usage: +// +// func getAppSecret(appid string) string { +// // get appsecret by appid +// // maybe store in configure, maybe in database +// } +// +// beego.InsertFilter("*", beego.BeforeRouter,apiauth.APISecretAuth(getAppSecret, 360)) +// +// Infomation: +// +// In the request user should include these params in the query +// +// 1. appid +// +// appid is assigned to the application +// +// 2. signature +// +// get the signature use apiauth.Signature() +// +// when you send to server remember use url.QueryEscape() +// +// 3. timestamp: +// +// send the request time, the format is yyyy-mm-dd HH:ii:ss +// +package apiauth + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/url" + "sort" + "time" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/context" +) + +// AppIDToAppSecret is used to get appsecret throw appid +type AppIDToAppSecret func(string) string + +// APIBaiscAuth use the basic appid/appkey as the AppIdToAppSecret +func APIBaiscAuth(appid, appkey string) beego.FilterFunc { + ft := func(aid string) string { + if aid == appid { + return appkey + } + return "" + } + return APISecretAuth(ft, 300) +} + +// APISecretAuth use AppIdToAppSecret verify and +func APISecretAuth(f AppIDToAppSecret, timeout int) beego.FilterFunc { + return func(ctx *context.Context) { + if ctx.Input.Query("appid") == "" { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("miss query param: appid") + return + } + appsecret := f(ctx.Input.Query("appid")) + if appsecret == "" { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("not exist this appid") + return + } + if ctx.Input.Query("signature") == "" { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("miss query param: signature") + return + } + if ctx.Input.Query("timestamp") == "" { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("miss query param: timestamp") + return + } + u, err := time.Parse("2006-01-02 15:04:05", ctx.Input.Query("timestamp")) + if err != nil { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("timestamp format is error, should 2006-01-02 15:04:05") + return + } + t := time.Now() + if t.Sub(u).Seconds() > float64(timeout) { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("timeout! the request time is long ago, please try again") + return + } + if ctx.Input.Query("signature") != + Signature(appsecret, ctx.Input.Method(), ctx.Request.Form, ctx.Input.URI()) { + ctx.ResponseWriter.WriteHeader(403) + ctx.WriteString("auth failed") + } + } +} + +// Signature used to generate signature with the appsecret/method/params/RequestURI +func Signature(appsecret, method string, params url.Values, RequestURI string) (result string) { + var query string + pa := make(map[string]string) + for k, v := range params { + pa[k] = v[0] + } + vs := mapSorter(pa) + vs.Sort() + for i := 0; i < vs.Len(); i++ { + if vs.Keys[i] == "signature" { + continue + } + if vs.Keys[i] != "" && vs.Vals[i] != "" { + query = fmt.Sprintf("%v%v%v", query, vs.Keys[i], vs.Vals[i]) + } + } + stringToSign := fmt.Sprintf("%v\n%v\n%v\n", method, query, RequestURI) + + sha256 := sha256.New + hash := hmac.New(sha256, []byte(appsecret)) + hash.Write([]byte(stringToSign)) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +type valSorter struct { + Keys []string + Vals []string +} + +func mapSorter(m map[string]string) *valSorter { + vs := &valSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + for k, v := range m { + vs.Keys = append(vs.Keys, k) + vs.Vals = append(vs.Vals, v) + } + return vs +} + +func (vs *valSorter) Sort() { + sort.Sort(vs) +} + +func (vs *valSorter) Len() int { return len(vs.Keys) } +func (vs *valSorter) Less(i, j int) bool { return vs.Keys[i] < vs.Keys[j] } +func (vs *valSorter) Swap(i, j int) { + vs.Vals[i], vs.Vals[j] = vs.Vals[j], vs.Vals[i] + vs.Keys[i], vs.Keys[j] = vs.Keys[j], vs.Keys[i] +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/plugins/auth/basic.go b/Godeps/_workspace/src/github.com/astaxie/beego/plugins/auth/basic.go new file mode 100644 index 000000000..c478044ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/plugins/auth/basic.go @@ -0,0 +1,107 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package auth provides handlers to enable basic auth support. +// Simple Usage: +// import( +// "github.com/astaxie/beego" +// "github.com/astaxie/beego/plugins/auth" +// ) +// +// func main(){ +// // authenticate every request +// beego.InsertFilter("*", beego.BeforeRouter,auth.Basic("username","secretpassword")) +// beego.Run() +// } +// +// +// Advanced Usage: +// +// func SecretAuth(username, password string) bool { +// return username == "astaxie" && password == "helloBeego" +// } +// authPlugin := auth.NewBasicAuthenticator(SecretAuth, "Authorization Required") +// beego.InsertFilter("*", beego.BeforeRouter,authPlugin) +package auth + +import ( + "encoding/base64" + "net/http" + "strings" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/context" +) + +var defaultRealm = "Authorization Required" + +// Basic is the http basic auth +func Basic(username string, password string) beego.FilterFunc { + secrets := func(user, pass string) bool { + return user == username && pass == password + } + return NewBasicAuthenticator(secrets, defaultRealm) +} + +// NewBasicAuthenticator return the BasicAuth +func NewBasicAuthenticator(secrets SecretProvider, Realm string) beego.FilterFunc { + return func(ctx *context.Context) { + a := &BasicAuth{Secrets: secrets, Realm: Realm} + if username := a.CheckAuth(ctx.Request); username == "" { + a.RequireAuth(ctx.ResponseWriter, ctx.Request) + } + } +} + +// SecretProvider is the SecretProvider function +type SecretProvider func(user, pass string) bool + +// BasicAuth store the SecretProvider and Realm +type BasicAuth struct { + Secrets SecretProvider + Realm string +} + +// CheckAuth Checks the username/password combination from the request. Returns +// either an empty string (authentication failed) or the name of the +// authenticated user. +// Supports MD5 and SHA1 password entries +func (a *BasicAuth) CheckAuth(r *http.Request) string { + s := strings.SplitN(r.Header.Get("Authorization"), " ", 2) + if len(s) != 2 || s[0] != "Basic" { + return "" + } + + b, err := base64.StdEncoding.DecodeString(s[1]) + if err != nil { + return "" + } + pair := strings.SplitN(string(b), ":", 2) + if len(pair) != 2 { + return "" + } + + if a.Secrets(pair[0], pair[1]) { + return pair[0] + } + return "" +} + +// RequireAuth http.Handler for BasicAuth which initiates the authentication process +// (or requires reauthentication). +func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) { + w.Header().Set("WWW-Authenticate", `Basic realm="`+a.Realm+`"`) + w.WriteHeader(401) + w.Write([]byte("401 Unauthorized\n")) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/plugins/cors/cors.go b/Godeps/_workspace/src/github.com/astaxie/beego/plugins/cors/cors.go new file mode 100644 index 000000000..1e973a405 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/plugins/cors/cors.go @@ -0,0 +1,226 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cors provides handlers to enable CORS support. +// Usage +// import ( +// "github.com/astaxie/beego" +// "github.com/astaxie/beego/plugins/cors" +// ) +// +// func main() { +// // CORS for https://foo.* origins, allowing: +// // - PUT and PATCH methods +// // - Origin header +// // - Credentials share +// beego.InsertFilter("*", beego.BeforeRouter, cors.Allow(&cors.Options{ +// AllowOrigins: []string{"https://*.foo.com"}, +// AllowMethods: []string{"PUT", "PATCH"}, +// AllowHeaders: []string{"Origin"}, +// ExposeHeaders: []string{"Content-Length"}, +// AllowCredentials: true, +// })) +// beego.Run() +// } +package cors + +import ( + "regexp" + "strconv" + "strings" + "time" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/context" +) + +const ( + headerAllowOrigin = "Access-Control-Allow-Origin" + headerAllowCredentials = "Access-Control-Allow-Credentials" + headerAllowHeaders = "Access-Control-Allow-Headers" + headerAllowMethods = "Access-Control-Allow-Methods" + headerExposeHeaders = "Access-Control-Expose-Headers" + headerMaxAge = "Access-Control-Max-Age" + + headerOrigin = "Origin" + headerRequestMethod = "Access-Control-Request-Method" + headerRequestHeaders = "Access-Control-Request-Headers" +) + +var ( + defaultAllowHeaders = []string{"Origin", "Accept", "Content-Type", "Authorization"} + // Regex patterns are generated from AllowOrigins. These are used and generated internally. + allowOriginPatterns = []string{} +) + +// Options represents Access Control options. +type Options struct { + // If set, all origins are allowed. + AllowAllOrigins bool + // A list of allowed origins. Wild cards and FQDNs are supported. + AllowOrigins []string + // If set, allows to share auth credentials such as cookies. + AllowCredentials bool + // A list of allowed HTTP methods. + AllowMethods []string + // A list of allowed HTTP headers. + AllowHeaders []string + // A list of exposed HTTP headers. + ExposeHeaders []string + // Max age of the CORS headers. + MaxAge time.Duration +} + +// Header converts options into CORS headers. +func (o *Options) Header(origin string) (headers map[string]string) { + headers = make(map[string]string) + // if origin is not allowed, don't extend the headers + // with CORS headers. + if !o.AllowAllOrigins && !o.IsOriginAllowed(origin) { + return + } + + // add allow origin + if o.AllowAllOrigins { + headers[headerAllowOrigin] = "*" + } else { + headers[headerAllowOrigin] = origin + } + + // add allow credentials + headers[headerAllowCredentials] = strconv.FormatBool(o.AllowCredentials) + + // add allow methods + if len(o.AllowMethods) > 0 { + headers[headerAllowMethods] = strings.Join(o.AllowMethods, ",") + } + + // add allow headers + if len(o.AllowHeaders) > 0 { + headers[headerAllowHeaders] = strings.Join(o.AllowHeaders, ",") + } + + // add exposed header + if len(o.ExposeHeaders) > 0 { + headers[headerExposeHeaders] = strings.Join(o.ExposeHeaders, ",") + } + // add a max age header + if o.MaxAge > time.Duration(0) { + headers[headerMaxAge] = strconv.FormatInt(int64(o.MaxAge/time.Second), 10) + } + return +} + +// PreflightHeader converts options into CORS headers for a preflight response. +func (o *Options) PreflightHeader(origin, rMethod, rHeaders string) (headers map[string]string) { + headers = make(map[string]string) + if !o.AllowAllOrigins && !o.IsOriginAllowed(origin) { + return + } + // verify if requested method is allowed + for _, method := range o.AllowMethods { + if method == rMethod { + headers[headerAllowMethods] = strings.Join(o.AllowMethods, ",") + break + } + } + + // verify if requested headers are allowed + var allowed []string + for _, rHeader := range strings.Split(rHeaders, ",") { + rHeader = strings.TrimSpace(rHeader) + lookupLoop: + for _, allowedHeader := range o.AllowHeaders { + if strings.ToLower(rHeader) == strings.ToLower(allowedHeader) { + allowed = append(allowed, rHeader) + break lookupLoop + } + } + } + + headers[headerAllowCredentials] = strconv.FormatBool(o.AllowCredentials) + // add allow origin + if o.AllowAllOrigins { + headers[headerAllowOrigin] = "*" + } else { + headers[headerAllowOrigin] = origin + } + + // add allowed headers + if len(allowed) > 0 { + headers[headerAllowHeaders] = strings.Join(allowed, ",") + } + + // add exposed headers + if len(o.ExposeHeaders) > 0 { + headers[headerExposeHeaders] = strings.Join(o.ExposeHeaders, ",") + } + // add a max age header + if o.MaxAge > time.Duration(0) { + headers[headerMaxAge] = strconv.FormatInt(int64(o.MaxAge/time.Second), 10) + } + return +} + +// IsOriginAllowed looks up if the origin matches one of the patterns +// generated from Options.AllowOrigins patterns. +func (o *Options) IsOriginAllowed(origin string) (allowed bool) { + for _, pattern := range allowOriginPatterns { + allowed, _ = regexp.MatchString(pattern, origin) + if allowed { + return + } + } + return +} + +// Allow enables CORS for requests those match the provided options. +func Allow(opts *Options) beego.FilterFunc { + // Allow default headers if nothing is specified. + if len(opts.AllowHeaders) == 0 { + opts.AllowHeaders = defaultAllowHeaders + } + + for _, origin := range opts.AllowOrigins { + pattern := regexp.QuoteMeta(origin) + pattern = strings.Replace(pattern, "\\*", ".*", -1) + pattern = strings.Replace(pattern, "\\?", ".", -1) + allowOriginPatterns = append(allowOriginPatterns, "^"+pattern+"$") + } + + return func(ctx *context.Context) { + var ( + origin = ctx.Input.Header(headerOrigin) + requestedMethod = ctx.Input.Header(headerRequestMethod) + requestedHeaders = ctx.Input.Header(headerRequestHeaders) + // additional headers to be added + // to the response. + headers map[string]string + ) + + if ctx.Input.Method() == "OPTIONS" && + (requestedMethod != "" || requestedHeaders != "") { + headers = opts.PreflightHeader(origin, requestedMethod, requestedHeaders) + for key, value := range headers { + ctx.Output.Header(key, value) + } + return + } + headers = opts.Header(origin) + + for key, value := range headers { + ctx.Output.Header(key, value) + } + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/router.go b/Godeps/_workspace/src/github.com/astaxie/beego/router.go new file mode 100644 index 000000000..726936dee --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/router.go @@ -0,0 +1,879 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "fmt" + "net/http" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + beecontext "github.com/astaxie/beego/context" + "github.com/astaxie/beego/toolbox" + "github.com/astaxie/beego/utils" +) + +// default filter execution points +const ( + BeforeStatic = iota + BeforeRouter + BeforeExec + AfterExec + FinishRouter +) + +const ( + routerTypeBeego = iota + routerTypeRESTFul + routerTypeHandler +) + +var ( + // HTTPMETHOD list the supported http methods. + HTTPMETHOD = map[string]string{ + "GET": "GET", + "POST": "POST", + "PUT": "PUT", + "DELETE": "DELETE", + "PATCH": "PATCH", + "OPTIONS": "OPTIONS", + "HEAD": "HEAD", + "TRACE": "TRACE", + "CONNECT": "CONNECT", + } + // these beego.Controller's methods shouldn't reflect to AutoRouter + exceptMethod = []string{"Init", "Prepare", "Finish", "Render", "RenderString", + "RenderBytes", "Redirect", "Abort", "StopRun", "UrlFor", "ServeJson", "ServeJsonp", + "ServeXml", "Input", "ParseForm", "GetString", "GetStrings", "GetInt", "GetBool", + "GetFloat", "GetFile", "SaveToFile", "StartSession", "SetSession", "GetSession", + "DelSession", "SessionRegenerateID", "DestroySession", "IsAjax", "GetSecureCookie", + "SetSecureCookie", "XsrfToken", "CheckXsrfCookie", "XsrfFormHtml", + "GetControllerAndAction"} + + urlPlaceholder = "{{placeholder}}" + // DefaultAccessLogFilter will skip the accesslog if return true + DefaultAccessLogFilter FilterHandler = &logFilter{} +) + +// FilterHandler is an interface for +type FilterHandler interface { + Filter(*beecontext.Context) bool +} + +// default log filter static file will not show +type logFilter struct { +} + +func (l *logFilter) Filter(ctx *beecontext.Context) bool { + requestPath := path.Clean(ctx.Request.URL.Path) + if requestPath == "/favicon.ico" || requestPath == "/robots.txt" { + return true + } + for prefix := range BConfig.WebConfig.StaticDir { + if strings.HasPrefix(requestPath, prefix) { + return true + } + } + return false +} + +// ExceptMethodAppend to append a slice's value into "exceptMethod", for controller's methods shouldn't reflect to AutoRouter +func ExceptMethodAppend(action string) { + exceptMethod = append(exceptMethod, action) +} + +type controllerInfo struct { + pattern string + controllerType reflect.Type + methods map[string]string + handler http.Handler + runFunction FilterFunc + routerType int +} + +// ControllerRegister containers registered router rules, controller handlers and filters. +type ControllerRegister struct { + routers map[string]*Tree + enableFilter bool + filters map[int][]*FilterRouter + pool sync.Pool +} + +// NewControllerRegister returns a new ControllerRegister. +func NewControllerRegister() *ControllerRegister { + cr := &ControllerRegister{ + routers: make(map[string]*Tree), + filters: make(map[int][]*FilterRouter), + } + cr.pool.New = func() interface{} { + return beecontext.NewContext() + } + return cr +} + +// Add controller handler and pattern rules to ControllerRegister. +// usage: +// default methods is the same name as method +// Add("/user",&UserController{}) +// Add("/api/list",&RestController{},"*:ListFood") +// Add("/api/create",&RestController{},"post:CreateFood") +// Add("/api/update",&RestController{},"put:UpdateFood") +// Add("/api/delete",&RestController{},"delete:DeleteFood") +// Add("/api",&RestController{},"get,post:ApiFunc" +// Add("/simple",&SimpleController{},"get:GetFunc;post:PostFunc") +func (p *ControllerRegister) Add(pattern string, c ControllerInterface, mappingMethods ...string) { + reflectVal := reflect.ValueOf(c) + t := reflect.Indirect(reflectVal).Type() + methods := make(map[string]string) + if len(mappingMethods) > 0 { + semi := strings.Split(mappingMethods[0], ";") + for _, v := range semi { + colon := strings.Split(v, ":") + if len(colon) != 2 { + panic("method mapping format is invalid") + } + comma := strings.Split(colon[0], ",") + for _, m := range comma { + if _, ok := HTTPMETHOD[strings.ToUpper(m)]; m == "*" || ok { + if val := reflectVal.MethodByName(colon[1]); val.IsValid() { + methods[strings.ToUpper(m)] = colon[1] + } else { + panic("'" + colon[1] + "' method doesn't exist in the controller " + t.Name()) + } + } else { + panic(v + " is an invalid method mapping. Method doesn't exist " + m) + } + } + } + } + + route := &controllerInfo{} + route.pattern = pattern + route.methods = methods + route.routerType = routerTypeBeego + route.controllerType = t + if len(methods) == 0 { + for _, m := range HTTPMETHOD { + p.addToRouter(m, pattern, route) + } + } else { + for k := range methods { + if k == "*" { + for _, m := range HTTPMETHOD { + p.addToRouter(m, pattern, route) + } + } else { + p.addToRouter(k, pattern, route) + } + } + } +} + +func (p *ControllerRegister) addToRouter(method, pattern string, r *controllerInfo) { + if !BConfig.RouterCaseSensitive { + pattern = strings.ToLower(pattern) + } + if t, ok := p.routers[method]; ok { + t.AddRouter(pattern, r) + } else { + t := NewTree() + t.AddRouter(pattern, r) + p.routers[method] = t + } +} + +// Include only when the Runmode is dev will generate router file in the router/auto.go from the controller +// Include(&BankAccount{}, &OrderController{},&RefundController{},&ReceiptController{}) +func (p *ControllerRegister) Include(cList ...ControllerInterface) { + if BConfig.RunMode == DEV { + skip := make(map[string]bool, 10) + for _, c := range cList { + reflectVal := reflect.ValueOf(c) + t := reflect.Indirect(reflectVal).Type() + gopath := os.Getenv("GOPATH") + if gopath == "" { + panic("you are in dev mode. So please set gopath") + } + pkgpath := "" + + wgopath := filepath.SplitList(gopath) + for _, wg := range wgopath { + wg, _ = filepath.EvalSymlinks(filepath.Join(wg, "src", t.PkgPath())) + if utils.FileExists(wg) { + pkgpath = wg + break + } + } + if pkgpath != "" { + if _, ok := skip[pkgpath]; !ok { + skip[pkgpath] = true + parserPkg(pkgpath, t.PkgPath()) + } + } + } + } + for _, c := range cList { + reflectVal := reflect.ValueOf(c) + t := reflect.Indirect(reflectVal).Type() + key := t.PkgPath() + ":" + t.Name() + if comm, ok := GlobalControllerRouter[key]; ok { + for _, a := range comm { + p.Add(a.Router, c, strings.Join(a.AllowHTTPMethods, ",")+":"+a.Method) + } + } + } +} + +// Get add get method +// usage: +// Get("/", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Get(pattern string, f FilterFunc) { + p.AddMethod("get", pattern, f) +} + +// Post add post method +// usage: +// Post("/api", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Post(pattern string, f FilterFunc) { + p.AddMethod("post", pattern, f) +} + +// Put add put method +// usage: +// Put("/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Put(pattern string, f FilterFunc) { + p.AddMethod("put", pattern, f) +} + +// Delete add delete method +// usage: +// Delete("/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Delete(pattern string, f FilterFunc) { + p.AddMethod("delete", pattern, f) +} + +// Head add head method +// usage: +// Head("/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Head(pattern string, f FilterFunc) { + p.AddMethod("head", pattern, f) +} + +// Patch add patch method +// usage: +// Patch("/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Patch(pattern string, f FilterFunc) { + p.AddMethod("patch", pattern, f) +} + +// Options add options method +// usage: +// Options("/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Options(pattern string, f FilterFunc) { + p.AddMethod("options", pattern, f) +} + +// Any add all method +// usage: +// Any("/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) Any(pattern string, f FilterFunc) { + p.AddMethod("*", pattern, f) +} + +// AddMethod add http method router +// usage: +// AddMethod("get","/api/:id", func(ctx *context.Context){ +// ctx.Output.Body("hello world") +// }) +func (p *ControllerRegister) AddMethod(method, pattern string, f FilterFunc) { + method = strings.ToUpper(method) + if _, ok := HTTPMETHOD[method]; method != "*" && !ok { + panic("not support http method: " + method) + } + route := &controllerInfo{} + route.pattern = pattern + route.routerType = routerTypeRESTFul + route.runFunction = f + methods := make(map[string]string) + if method == "*" { + for _, val := range HTTPMETHOD { + methods[val] = val + } + } else { + methods[method] = method + } + route.methods = methods + for k := range methods { + if k == "*" { + for _, m := range HTTPMETHOD { + p.addToRouter(m, pattern, route) + } + } else { + p.addToRouter(k, pattern, route) + } + } +} + +// Handler add user defined Handler +func (p *ControllerRegister) Handler(pattern string, h http.Handler, options ...interface{}) { + route := &controllerInfo{} + route.pattern = pattern + route.routerType = routerTypeHandler + route.handler = h + if len(options) > 0 { + if _, ok := options[0].(bool); ok { + pattern = path.Join(pattern, "?:all(.*)") + } + } + for _, m := range HTTPMETHOD { + p.addToRouter(m, pattern, route) + } +} + +// AddAuto router to ControllerRegister. +// example beego.AddAuto(&MainContorlller{}), +// MainController has method List and Page. +// visit the url /main/list to execute List function +// /main/page to execute Page function. +func (p *ControllerRegister) AddAuto(c ControllerInterface) { + p.AddAutoPrefix("/", c) +} + +// AddAutoPrefix Add auto router to ControllerRegister with prefix. +// example beego.AddAutoPrefix("/admin",&MainContorlller{}), +// MainController has method List and Page. +// visit the url /admin/main/list to execute List function +// /admin/main/page to execute Page function. +func (p *ControllerRegister) AddAutoPrefix(prefix string, c ControllerInterface) { + reflectVal := reflect.ValueOf(c) + rt := reflectVal.Type() + ct := reflect.Indirect(reflectVal).Type() + controllerName := strings.TrimSuffix(ct.Name(), "Controller") + for i := 0; i < rt.NumMethod(); i++ { + if !utils.InSlice(rt.Method(i).Name, exceptMethod) { + route := &controllerInfo{} + route.routerType = routerTypeBeego + route.methods = map[string]string{"*": rt.Method(i).Name} + route.controllerType = ct + pattern := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name), "*") + patternInit := path.Join(prefix, controllerName, rt.Method(i).Name, "*") + patternFix := path.Join(prefix, strings.ToLower(controllerName), strings.ToLower(rt.Method(i).Name)) + patternFixInit := path.Join(prefix, controllerName, rt.Method(i).Name) + route.pattern = pattern + for _, m := range HTTPMETHOD { + p.addToRouter(m, pattern, route) + p.addToRouter(m, patternInit, route) + p.addToRouter(m, patternFix, route) + p.addToRouter(m, patternFixInit, route) + } + } + } +} + +// InsertFilter Add a FilterFunc with pattern rule and action constant. +// The bool params is for setting the returnOnOutput value (false allows multiple filters to execute) +func (p *ControllerRegister) InsertFilter(pattern string, pos int, filter FilterFunc, params ...bool) error { + + mr := new(FilterRouter) + mr.tree = NewTree() + mr.pattern = pattern + mr.filterFunc = filter + if !BConfig.RouterCaseSensitive { + pattern = strings.ToLower(pattern) + } + if len(params) == 0 { + mr.returnOnOutput = true + } else { + mr.returnOnOutput = params[0] + } + mr.tree.AddRouter(pattern, true) + return p.insertFilterRouter(pos, mr) +} + +// add Filter into +func (p *ControllerRegister) insertFilterRouter(pos int, mr *FilterRouter) error { + p.filters[pos] = append(p.filters[pos], mr) + p.enableFilter = true + return nil +} + +// URLFor does another controller handler in this request function. +// it can access any controller method. +func (p *ControllerRegister) URLFor(endpoint string, values ...interface{}) string { + paths := strings.Split(endpoint, ".") + if len(paths) <= 1 { + Warn("urlfor endpoint must like path.controller.method") + return "" + } + if len(values)%2 != 0 { + Warn("urlfor params must key-value pair") + return "" + } + params := make(map[string]string) + if len(values) > 0 { + key := "" + for k, v := range values { + if k%2 == 0 { + key = fmt.Sprint(v) + } else { + params[key] = fmt.Sprint(v) + } + } + } + controllName := strings.Join(paths[:len(paths)-1], "/") + methodName := paths[len(paths)-1] + for m, t := range p.routers { + ok, url := p.geturl(t, "/", controllName, methodName, params, m) + if ok { + return url + } + } + return "" +} + +func (p *ControllerRegister) geturl(t *Tree, url, controllName, methodName string, params map[string]string, httpMethod string) (bool, string) { + for _, subtree := range t.fixrouters { + u := path.Join(url, subtree.prefix) + ok, u := p.geturl(subtree, u, controllName, methodName, params, httpMethod) + if ok { + return ok, u + } + } + if t.wildcard != nil { + u := path.Join(url, urlPlaceholder) + ok, u := p.geturl(t.wildcard, u, controllName, methodName, params, httpMethod) + if ok { + return ok, u + } + } + for _, l := range t.leaves { + if c, ok := l.runObject.(*controllerInfo); ok { + if c.routerType == routerTypeBeego && + strings.HasSuffix(path.Join(c.controllerType.PkgPath(), c.controllerType.Name()), controllName) { + find := false + if _, ok := HTTPMETHOD[strings.ToUpper(methodName)]; ok { + if len(c.methods) == 0 { + find = true + } else if m, ok := c.methods[strings.ToUpper(methodName)]; ok && m == strings.ToUpper(methodName) { + find = true + } else if m, ok = c.methods["*"]; ok && m == methodName { + find = true + } + } + if !find { + for m, md := range c.methods { + if (m == "*" || m == httpMethod) && md == methodName { + find = true + } + } + } + if find { + if l.regexps == nil { + if len(l.wildcards) == 0 { + return true, strings.Replace(url, "/"+urlPlaceholder, "", 1) + toURL(params) + } + if len(l.wildcards) == 1 { + if v, ok := params[l.wildcards[0]]; ok { + delete(params, l.wildcards[0]) + return true, strings.Replace(url, urlPlaceholder, v, 1) + toURL(params) + } + return false, "" + } + if len(l.wildcards) == 3 && l.wildcards[0] == "." { + if p, ok := params[":path"]; ok { + if e, isok := params[":ext"]; isok { + delete(params, ":path") + delete(params, ":ext") + return true, strings.Replace(url, urlPlaceholder, p+"."+e, -1) + toURL(params) + } + } + } + canskip := false + for _, v := range l.wildcards { + if v == ":" { + canskip = true + continue + } + if u, ok := params[v]; ok { + delete(params, v) + url = strings.Replace(url, urlPlaceholder, u, 1) + } else { + if canskip { + canskip = false + continue + } + return false, "" + } + } + return true, url + toURL(params) + } + var i int + var startreg bool + regurl := "" + for _, v := range strings.Trim(l.regexps.String(), "^$") { + if v == '(' { + startreg = true + continue + } else if v == ')' { + startreg = false + if v, ok := params[l.wildcards[i]]; ok { + delete(params, l.wildcards[i]) + regurl = regurl + v + i++ + } else { + break + } + } else if !startreg { + regurl = string(append([]rune(regurl), v)) + } + } + if l.regexps.MatchString(regurl) { + ps := strings.Split(regurl, "/") + for _, p := range ps { + url = strings.Replace(url, urlPlaceholder, p, 1) + } + return true, url + toURL(params) + } + } + } + } + } + + return false, "" +} + +func (p *ControllerRegister) execFilter(context *beecontext.Context, pos int, urlPath string) (started bool) { + if p.enableFilter { + if l, ok := p.filters[pos]; ok { + for _, filterR := range l { + if filterR.returnOnOutput && context.ResponseWriter.Started { + return true + } + if ok := filterR.ValidRouter(urlPath, context); ok { + filterR.filterFunc(context) + } + if filterR.returnOnOutput && context.ResponseWriter.Started { + return true + } + } + } + } + return false +} + +// Implement http.Handler interface. +func (p *ControllerRegister) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + startTime := time.Now() + var ( + runRouter reflect.Type + findRouter bool + runMethod string + routerInfo *controllerInfo + ) + context := p.pool.Get().(*beecontext.Context) + context.Reset(rw, r) + defer p.pool.Put(context) + defer p.recoverPanic(context) + + context.Output.EnableGzip = BConfig.EnableGzip + + if BConfig.RunMode == DEV { + context.Output.Header("Server", BConfig.ServerName) + } + + var urlPath string + if !BConfig.RouterCaseSensitive { + urlPath = strings.ToLower(r.URL.Path) + } else { + urlPath = r.URL.Path + } + + // filter wrong http method + if _, ok := HTTPMETHOD[r.Method]; !ok { + http.Error(rw, "Method Not Allowed", 405) + goto Admin + } + + // filter for static file + if p.execFilter(context, BeforeStatic, urlPath) { + goto Admin + } + + serverStaticRouter(context) + if context.ResponseWriter.Started { + findRouter = true + goto Admin + } + + if r.Method != "GET" && r.Method != "HEAD" { + if BConfig.CopyRequestBody && !context.Input.IsUpload() { + context.Input.CopyBody(BConfig.MaxMemory) + } + context.Input.ParseFormOrMulitForm(BConfig.MaxMemory) + } + + // session init + if BConfig.WebConfig.Session.SessionOn { + var err error + context.Input.CruSession, err = GlobalSessions.SessionStart(rw, r) + if err != nil { + Error(err) + exception("503", context) + return + } + defer func() { + if context.Input.CruSession != nil { + context.Input.CruSession.SessionRelease(rw) + } + }() + } + + if p.execFilter(context, BeforeRouter, urlPath) { + goto Admin + } + + if !findRouter { + httpMethod := r.Method + if t, ok := p.routers[httpMethod]; ok { + runObject := t.Match(urlPath, context) + if r, ok := runObject.(*controllerInfo); ok { + routerInfo = r + findRouter = true + if splat := context.Input.Param(":splat"); splat != "" { + for k, v := range strings.Split(splat, "/") { + context.Input.SetParam(strconv.Itoa(k), v) + } + } + } + } + + } + + //if no matches to url, throw a not found exception + if !findRouter { + exception("404", context) + goto Admin + } + + if findRouter { + //execute middleware filters + if p.execFilter(context, BeforeExec, urlPath) { + goto Admin + } + isRunnable := false + if routerInfo != nil { + if routerInfo.routerType == routerTypeRESTFul { + if _, ok := routerInfo.methods[r.Method]; ok { + isRunnable = true + routerInfo.runFunction(context) + } else { + exception("405", context) + goto Admin + } + } else if routerInfo.routerType == routerTypeHandler { + isRunnable = true + routerInfo.handler.ServeHTTP(rw, r) + } else { + runRouter = routerInfo.controllerType + method := r.Method + if r.Method == "POST" && context.Input.Query("_method") == "PUT" { + method = "PUT" + } + if r.Method == "POST" && context.Input.Query("_method") == "DELETE" { + method = "DELETE" + } + if m, ok := routerInfo.methods[method]; ok { + runMethod = m + } else if m, ok = routerInfo.methods["*"]; ok { + runMethod = m + } else { + runMethod = method + } + } + } + + // also defined runRouter & runMethod from filter + if !isRunnable { + //Invoke the request handler + vc := reflect.New(runRouter) + execController, ok := vc.Interface().(ControllerInterface) + if !ok { + panic("controller is not ControllerInterface") + } + + //call the controller init function + execController.Init(context, runRouter.Name(), runMethod, vc.Interface()) + + //call prepare function + execController.Prepare() + + //if XSRF is Enable then check cookie where there has any cookie in the request's cookie _csrf + if BConfig.WebConfig.EnableXSRF { + execController.XSRFToken() + if r.Method == "POST" || r.Method == "DELETE" || r.Method == "PUT" || + (r.Method == "POST" && (context.Input.Query("_method") == "DELETE" || context.Input.Query("_method") == "PUT")) { + execController.CheckXSRFCookie() + } + } + + execController.URLMapping() + + if !context.ResponseWriter.Started { + //exec main logic + switch runMethod { + case "GET": + execController.Get() + case "POST": + execController.Post() + case "DELETE": + execController.Delete() + case "PUT": + execController.Put() + case "HEAD": + execController.Head() + case "PATCH": + execController.Patch() + case "OPTIONS": + execController.Options() + default: + if !execController.HandlerFunc(runMethod) { + var in []reflect.Value + method := vc.MethodByName(runMethod) + method.Call(in) + } + } + + //render template + if !context.ResponseWriter.Started && context.Output.Status == 0 { + if BConfig.WebConfig.AutoRender { + if err := execController.Render(); err != nil { + panic(err) + } + } + } + } + + // finish all runRouter. release resource + execController.Finish() + } + + //execute middleware filters + if p.execFilter(context, AfterExec, urlPath) { + goto Admin + } + } + + p.execFilter(context, FinishRouter, urlPath) + +Admin: + timeDur := time.Since(startTime) + //admin module record QPS + if BConfig.Listen.EnableAdmin { + if FilterMonitorFunc(r.Method, r.URL.Path, timeDur) { + if runRouter != nil { + go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, runRouter.Name(), timeDur) + } else { + go toolbox.StatisticsMap.AddStatistics(r.Method, r.URL.Path, "", timeDur) + } + } + } + + if BConfig.RunMode == DEV || BConfig.Log.AccessLogs { + var devInfo string + if findRouter { + if routerInfo != nil { + devInfo = fmt.Sprintf("| % -10s | % -40s | % -16s | % -10s | % -40s |", r.Method, r.URL.Path, timeDur.String(), "match", routerInfo.pattern) + } else { + devInfo = fmt.Sprintf("| % -10s | % -40s | % -16s | % -10s |", r.Method, r.URL.Path, timeDur.String(), "match") + } + } else { + devInfo = fmt.Sprintf("| % -10s | % -40s | % -16s | % -10s |", r.Method, r.URL.Path, timeDur.String(), "notmatch") + } + if DefaultAccessLogFilter == nil || !DefaultAccessLogFilter.Filter(context) { + Debug(devInfo) + } + } + + // Call WriteHeader if status code has been set changed + if context.Output.Status != 0 { + context.ResponseWriter.WriteHeader(context.Output.Status) + } +} + +func (p *ControllerRegister) recoverPanic(context *beecontext.Context) { + if err := recover(); err != nil { + if err == ErrAbort { + return + } + if !BConfig.RecoverPanic { + panic(err) + } else { + if BConfig.EnableErrorsShow { + if _, ok := ErrorMaps[fmt.Sprint(err)]; ok { + exception(fmt.Sprint(err), context) + return + } + } + var stack string + Critical("the request url is ", context.Input.URL()) + Critical("Handler crashed with error", err) + for i := 1; ; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + Critical(fmt.Sprintf("%s:%d", file, line)) + stack = stack + fmt.Sprintln(fmt.Sprintf("%s:%d", file, line)) + } + if BConfig.RunMode == DEV { + showErr(err, context, stack) + } + } + } +} + +func toURL(params map[string]string) string { + if len(params) == 0 { + return "" + } + u := "?" + for k, v := range params { + u += k + "=" + v + "&" + } + return strings.TrimRight(u, "&") +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/session/README.md new file mode 100644 index 000000000..6d0a297e3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/README.md @@ -0,0 +1,114 @@ +session +============== + +session is a Go session manager. It can use many session providers. Just like the `database/sql` and `database/sql/driver`. + +## How to install? + + go get github.com/astaxie/beego/session + + +## What providers are supported? + +As of now this session manager support memory, file, Redis and MySQL. + + +## How to use it? + +First you must import it + + import ( + "github.com/astaxie/beego/session" + ) + +Then in you web app init the global session manager + + var globalSessions *session.Manager + +* Use **memory** as provider: + + func init() { + globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid","gclifetime":3600}`) + go globalSessions.GC() + } + +* Use **file** as provider, the last param is the path where you want file to be stored: + + func init() { + globalSessions, _ = session.NewManager("file",`{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"./tmp"}`) + go globalSessions.GC() + } + +* Use **Redis** as provider, the last param is the Redis conn address,poolsize,password: + + func init() { + globalSessions, _ = session.NewManager("redis", `{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:6379,100,astaxie"}`) + go globalSessions.GC() + } + +* Use **MySQL** as provider, the last param is the DSN, learn more from [mysql](https://github.com/go-sql-driver/mysql#dsn-data-source-name): + + func init() { + globalSessions, _ = session.NewManager( + "mysql", `{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"username:password@protocol(address)/dbname?param=value"}`) + go globalSessions.GC() + } + +* Use **Cookie** as provider: + + func init() { + globalSessions, _ = session.NewManager( + "cookie", `{"cookieName":"gosessionid","enableSetCookie":false,"gclifetime":3600,"ProviderConfig":"{\"cookieName\":\"gosessionid\",\"securityKey\":\"beegocookiehashkey\"}"}`) + go globalSessions.GC() + } + + +Finally in the handlerfunc you can use it like this + + func login(w http.ResponseWriter, r *http.Request) { + sess := globalSessions.SessionStart(w, r) + defer sess.SessionRelease(w) + username := sess.Get("username") + fmt.Println(username) + if r.Method == "GET" { + t, _ := template.ParseFiles("login.gtpl") + t.Execute(w, nil) + } else { + fmt.Println("username:", r.Form["username"]) + sess.Set("username", r.Form["username"]) + fmt.Println("password:", r.Form["password"]) + } + } + + +## How to write own provider? + +When you develop a web app, maybe you want to write own provider because you must meet the requirements. + +Writing a provider is easy. You only need to define two struct types +(Session and Provider), which satisfy the interface definition. +Maybe you will find the **memory** provider is a good example. + + type SessionStore interface { + Set(key, value interface{}) error //set session value + Get(key interface{}) interface{} //get session value + Delete(key interface{}) error //delete session value + SessionID() string //back current sessionID + SessionRelease(w http.ResponseWriter) // release the resource & save data to provider & return the data + Flush() error //delete all data + } + + type Provider interface { + SessionInit(gclifetime int64, config string) error + SessionRead(sid string) (SessionStore, error) + SessionExist(sid string) bool + SessionRegenerate(oldsid, sid string) (SessionStore, error) + SessionDestroy(sid string) error + SessionAll() int //get all active session + SessionGC() + } + + +## LICENSE + +BSD License http://creativecommons.org/licenses/BSD/ diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/couchbase/sess_couchbase.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/couchbase/sess_couchbase.go new file mode 100644 index 000000000..d5be11d0b --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/couchbase/sess_couchbase.go @@ -0,0 +1,243 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package couchbase for session provider +// +// depend on github.com/couchbaselabs/go-couchbasee +// +// go install github.com/couchbaselabs/go-couchbase +// +// Usage: +// import( +// _ "github.com/astaxie/beego/session/couchbase" +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("couchbase", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"http://host:port/, Pool, Bucket"}``) +// go globalSessions.GC() +// } +// +// more docs: http://beego.me/docs/module/session.md +package couchbase + +import ( + "net/http" + "strings" + "sync" + + couchbase "github.com/couchbase/go-couchbase" + + "github.com/astaxie/beego/session" +) + +var couchbpder = &Provider{} + +// SessionStore store each session +type SessionStore struct { + b *couchbase.Bucket + sid string + lock sync.RWMutex + values map[interface{}]interface{} + maxlifetime int64 +} + +// Provider couchabse provided +type Provider struct { + maxlifetime int64 + savePath string + pool string + bucket string + b *couchbase.Bucket +} + +// Set value to couchabse session +func (cs *SessionStore) Set(key, value interface{}) error { + cs.lock.Lock() + defer cs.lock.Unlock() + cs.values[key] = value + return nil +} + +// Get value from couchabse session +func (cs *SessionStore) Get(key interface{}) interface{} { + cs.lock.RLock() + defer cs.lock.RUnlock() + if v, ok := cs.values[key]; ok { + return v + } + return nil +} + +// Delete value in couchbase session by given key +func (cs *SessionStore) Delete(key interface{}) error { + cs.lock.Lock() + defer cs.lock.Unlock() + delete(cs.values, key) + return nil +} + +// Flush Clean all values in couchbase session +func (cs *SessionStore) Flush() error { + cs.lock.Lock() + defer cs.lock.Unlock() + cs.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID Get couchbase session store id +func (cs *SessionStore) SessionID() string { + return cs.sid +} + +// SessionRelease Write couchbase session with Gob string +func (cs *SessionStore) SessionRelease(w http.ResponseWriter) { + defer cs.b.Close() + + bo, err := session.EncodeGob(cs.values) + if err != nil { + return + } + + cs.b.Set(cs.sid, int(cs.maxlifetime), bo) +} + +func (cp *Provider) getBucket() *couchbase.Bucket { + c, err := couchbase.Connect(cp.savePath) + if err != nil { + return nil + } + + pool, err := c.GetPool(cp.pool) + if err != nil { + return nil + } + + bucket, err := pool.GetBucket(cp.bucket) + if err != nil { + return nil + } + + return bucket +} + +// SessionInit init couchbase session +// savepath like couchbase server REST/JSON URL +// e.g. http://host:port/, Pool, Bucket +func (cp *Provider) SessionInit(maxlifetime int64, savePath string) error { + cp.maxlifetime = maxlifetime + configs := strings.Split(savePath, ",") + if len(configs) > 0 { + cp.savePath = configs[0] + } + if len(configs) > 1 { + cp.pool = configs[1] + } + if len(configs) > 2 { + cp.bucket = configs[2] + } + + return nil +} + +// SessionRead read couchbase session by sid +func (cp *Provider) SessionRead(sid string) (session.Store, error) { + cp.b = cp.getBucket() + + var doc []byte + + err := cp.b.Get(sid, &doc) + var kv map[interface{}]interface{} + if doc == nil { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(doc) + if err != nil { + return nil, err + } + } + + cs := &SessionStore{b: cp.b, sid: sid, values: kv, maxlifetime: cp.maxlifetime} + return cs, nil +} + +// SessionExist Check couchbase session exist. +// it checkes sid exist or not. +func (cp *Provider) SessionExist(sid string) bool { + cp.b = cp.getBucket() + defer cp.b.Close() + + var doc []byte + + if err := cp.b.Get(sid, &doc); err != nil || doc == nil { + return false + } + return true +} + +// SessionRegenerate remove oldsid and use sid to generate new session +func (cp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + cp.b = cp.getBucket() + + var doc []byte + if err := cp.b.Get(oldsid, &doc); err != nil || doc == nil { + cp.b.Set(sid, int(cp.maxlifetime), "") + } else { + err := cp.b.Delete(oldsid) + if err != nil { + return nil, err + } + _, _ = cp.b.Add(sid, int(cp.maxlifetime), doc) + } + + err := cp.b.Get(sid, &doc) + if err != nil { + return nil, err + } + var kv map[interface{}]interface{} + if doc == nil { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(doc) + if err != nil { + return nil, err + } + } + + cs := &SessionStore{b: cp.b, sid: sid, values: kv, maxlifetime: cp.maxlifetime} + return cs, nil +} + +// SessionDestroy Remove bucket in this couchbase +func (cp *Provider) SessionDestroy(sid string) error { + cp.b = cp.getBucket() + defer cp.b.Close() + + cp.b.Delete(sid) + return nil +} + +// SessionGC Recycle +func (cp *Provider) SessionGC() { + return +} + +// SessionAll return all active session +func (cp *Provider) SessionAll() int { + return 0 +} + +func init() { + session.Register("couchbase", couchbpder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/ledis/ledis_session.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/ledis/ledis_session.go new file mode 100644 index 000000000..68f37b08c --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/ledis/ledis_session.go @@ -0,0 +1,179 @@ +// Package ledis provide session Provider +package ledis + +import ( + "net/http" + "strconv" + "strings" + "sync" + + "github.com/astaxie/beego/session" + "github.com/siddontang/ledisdb/config" + "github.com/siddontang/ledisdb/ledis" +) + +var ledispder = &Provider{} +var c *ledis.DB + +// SessionStore ledis session store +type SessionStore struct { + sid string + lock sync.RWMutex + values map[interface{}]interface{} + maxlifetime int64 +} + +// Set value in ledis session +func (ls *SessionStore) Set(key, value interface{}) error { + ls.lock.Lock() + defer ls.lock.Unlock() + ls.values[key] = value + return nil +} + +// Get value in ledis session +func (ls *SessionStore) Get(key interface{}) interface{} { + ls.lock.RLock() + defer ls.lock.RUnlock() + if v, ok := ls.values[key]; ok { + return v + } + return nil +} + +// Delete value in ledis session +func (ls *SessionStore) Delete(key interface{}) error { + ls.lock.Lock() + defer ls.lock.Unlock() + delete(ls.values, key) + return nil +} + +// Flush clear all values in ledis session +func (ls *SessionStore) Flush() error { + ls.lock.Lock() + defer ls.lock.Unlock() + ls.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID get ledis session id +func (ls *SessionStore) SessionID() string { + return ls.sid +} + +// SessionRelease save session values to ledis +func (ls *SessionStore) SessionRelease(w http.ResponseWriter) { + b, err := session.EncodeGob(ls.values) + if err != nil { + return + } + c.Set([]byte(ls.sid), b) + c.Expire([]byte(ls.sid), ls.maxlifetime) +} + +// Provider ledis session provider +type Provider struct { + maxlifetime int64 + savePath string + db int +} + +// SessionInit init ledis session +// savepath like ledis server saveDataPath,pool size +// e.g. 127.0.0.1:6379,100,astaxie +func (lp *Provider) SessionInit(maxlifetime int64, savePath string) error { + var err error + lp.maxlifetime = maxlifetime + configs := strings.Split(savePath, ",") + if len(configs) == 1 { + lp.savePath = configs[0] + } else if len(configs) == 2 { + lp.savePath = configs[0] + lp.db, err = strconv.Atoi(configs[1]) + if err != nil { + return err + } + } + cfg := new(config.Config) + cfg.DataDir = lp.savePath + nowLedis, err := ledis.Open(cfg) + c, err = nowLedis.Select(lp.db) + if err != nil { + println(err) + return nil + } + return nil +} + +// SessionRead read ledis session by sid +func (lp *Provider) SessionRead(sid string) (session.Store, error) { + kvs, err := c.Get([]byte(sid)) + var kv map[interface{}]interface{} + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(kvs) + if err != nil { + return nil, err + } + } + ls := &SessionStore{sid: sid, values: kv, maxlifetime: lp.maxlifetime} + return ls, nil +} + +// SessionExist check ledis session exist by sid +func (lp *Provider) SessionExist(sid string) bool { + count, _ := c.Exists([]byte(sid)) + if count == 0 { + return false + } + return true +} + +// SessionRegenerate generate new sid for ledis session +func (lp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + count, _ := c.Exists([]byte(sid)) + if count == 0 { + // oldsid doesn't exists, set the new sid directly + // ignore error here, since if it return error + // the existed value will be 0 + c.Set([]byte(sid), []byte("")) + c.Expire([]byte(sid), lp.maxlifetime) + } else { + data, _ := c.Get([]byte(oldsid)) + c.Set([]byte(sid), data) + c.Expire([]byte(sid), lp.maxlifetime) + } + kvs, err := c.Get([]byte(sid)) + var kv map[interface{}]interface{} + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob([]byte(kvs)) + if err != nil { + return nil, err + } + } + ls := &SessionStore{sid: sid, values: kv, maxlifetime: lp.maxlifetime} + return ls, nil +} + +// SessionDestroy delete ledis session by id +func (lp *Provider) SessionDestroy(sid string) error { + c.Del([]byte(sid)) + return nil +} + +// SessionGC Impelment method, no used. +func (lp *Provider) SessionGC() { + return +} + +// SessionAll return all active session +func (lp *Provider) SessionAll() int { + return 0 +} +func init() { + session.Register("ledis", ledispder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/memcache/sess_memcache.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/memcache/sess_memcache.go new file mode 100644 index 000000000..f1069bc93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/memcache/sess_memcache.go @@ -0,0 +1,232 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package memcache for session provider +// +// depend on github.com/bradfitz/gomemcache/memcache +// +// go install github.com/bradfitz/gomemcache/memcache +// +// Usage: +// import( +// _ "github.com/astaxie/beego/session/memcache" +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("memcache", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:11211"}``) +// go globalSessions.GC() +// } +// +// more docs: http://beego.me/docs/module/session.md +package memcache + +import ( + "net/http" + "strings" + "sync" + + "github.com/astaxie/beego/session" + + "github.com/bradfitz/gomemcache/memcache" +) + +var mempder = &MemProvider{} +var client *memcache.Client + +// SessionStore memcache session store +type SessionStore struct { + sid string + lock sync.RWMutex + values map[interface{}]interface{} + maxlifetime int64 +} + +// Set value in memcache session +func (rs *SessionStore) Set(key, value interface{}) error { + rs.lock.Lock() + defer rs.lock.Unlock() + rs.values[key] = value + return nil +} + +// Get value in memcache session +func (rs *SessionStore) Get(key interface{}) interface{} { + rs.lock.RLock() + defer rs.lock.RUnlock() + if v, ok := rs.values[key]; ok { + return v + } + return nil +} + +// Delete value in memcache session +func (rs *SessionStore) Delete(key interface{}) error { + rs.lock.Lock() + defer rs.lock.Unlock() + delete(rs.values, key) + return nil +} + +// Flush clear all values in memcache session +func (rs *SessionStore) Flush() error { + rs.lock.Lock() + defer rs.lock.Unlock() + rs.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID get memcache session id +func (rs *SessionStore) SessionID() string { + return rs.sid +} + +// SessionRelease save session values to memcache +func (rs *SessionStore) SessionRelease(w http.ResponseWriter) { + b, err := session.EncodeGob(rs.values) + if err != nil { + return + } + item := memcache.Item{Key: rs.sid, Value: b, Expiration: int32(rs.maxlifetime)} + client.Set(&item) +} + +// MemProvider memcache session provider +type MemProvider struct { + maxlifetime int64 + conninfo []string + poolsize int + password string +} + +// SessionInit init memcache session +// savepath like +// e.g. 127.0.0.1:9090 +func (rp *MemProvider) SessionInit(maxlifetime int64, savePath string) error { + rp.maxlifetime = maxlifetime + rp.conninfo = strings.Split(savePath, ";") + client = memcache.New(rp.conninfo...) + return nil +} + +// SessionRead read memcache session by sid +func (rp *MemProvider) SessionRead(sid string) (session.Store, error) { + if client == nil { + if err := rp.connectInit(); err != nil { + return nil, err + } + } + item, err := client.Get(sid) + if err != nil && err == memcache.ErrCacheMiss { + rs := &SessionStore{sid: sid, values: make(map[interface{}]interface{}), maxlifetime: rp.maxlifetime} + return rs, nil + } + var kv map[interface{}]interface{} + if len(item.Value) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(item.Value) + if err != nil { + return nil, err + } + } + rs := &SessionStore{sid: sid, values: kv, maxlifetime: rp.maxlifetime} + return rs, nil +} + +// SessionExist check memcache session exist by sid +func (rp *MemProvider) SessionExist(sid string) bool { + if client == nil { + if err := rp.connectInit(); err != nil { + return false + } + } + if item, err := client.Get(sid); err != nil || len(item.Value) == 0 { + return false + } + return true +} + +// SessionRegenerate generate new sid for memcache session +func (rp *MemProvider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + if client == nil { + if err := rp.connectInit(); err != nil { + return nil, err + } + } + var contain []byte + if item, err := client.Get(sid); err != nil || len(item.Value) == 0 { + // oldsid doesn't exists, set the new sid directly + // ignore error here, since if it return error + // the existed value will be 0 + item.Key = sid + item.Value = []byte("") + item.Expiration = int32(rp.maxlifetime) + client.Set(item) + } else { + client.Delete(oldsid) + item.Key = sid + item.Expiration = int32(rp.maxlifetime) + client.Set(item) + contain = item.Value + } + + var kv map[interface{}]interface{} + if len(contain) == 0 { + kv = make(map[interface{}]interface{}) + } else { + var err error + kv, err = session.DecodeGob(contain) + if err != nil { + return nil, err + } + } + + rs := &SessionStore{sid: sid, values: kv, maxlifetime: rp.maxlifetime} + return rs, nil +} + +// SessionDestroy delete memcache session by id +func (rp *MemProvider) SessionDestroy(sid string) error { + if client == nil { + if err := rp.connectInit(); err != nil { + return err + } + } + + err := client.Delete(sid) + if err != nil { + return err + } + return nil +} + +func (rp *MemProvider) connectInit() error { + client = memcache.New(rp.conninfo...) + return nil +} + +// SessionGC Impelment method, no used. +func (rp *MemProvider) SessionGC() { + return +} + +// SessionAll return all activeSession +func (rp *MemProvider) SessionAll() int { + return 0 +} + +func init() { + session.Register("memcache", mempder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/mysql/sess_mysql.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/mysql/sess_mysql.go new file mode 100644 index 000000000..969d26c97 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/mysql/sess_mysql.go @@ -0,0 +1,233 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mysql for session provider +// +// depends on github.com/go-sql-driver/mysql: +// +// go install github.com/go-sql-driver/mysql +// +// mysql session support need create table as sql: +// CREATE TABLE `session` ( +// `session_key` char(64) NOT NULL, +// `session_data` blob, +// `session_expiry` int(11) unsigned NOT NULL, +// PRIMARY KEY (`session_key`) +// ) ENGINE=MyISAM DEFAULT CHARSET=utf8; +// +// Usage: +// import( +// _ "github.com/astaxie/beego/session/mysql" +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("mysql", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]"}``) +// go globalSessions.GC() +// } +// +// more docs: http://beego.me/docs/module/session.md +package mysql + +import ( + "database/sql" + "net/http" + "sync" + "time" + + "github.com/astaxie/beego/session" + // import mysql driver + _ "github.com/go-sql-driver/mysql" +) + +var ( + // TableName store the session in MySQL + TableName = "session" + mysqlpder = &Provider{} +) + +// SessionStore mysql session store +type SessionStore struct { + c *sql.DB + sid string + lock sync.RWMutex + values map[interface{}]interface{} +} + +// Set value in mysql session. +// it is temp value in map. +func (st *SessionStore) Set(key, value interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + st.values[key] = value + return nil +} + +// Get value from mysql session +func (st *SessionStore) Get(key interface{}) interface{} { + st.lock.RLock() + defer st.lock.RUnlock() + if v, ok := st.values[key]; ok { + return v + } + return nil +} + +// Delete value in mysql session +func (st *SessionStore) Delete(key interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + delete(st.values, key) + return nil +} + +// Flush clear all values in mysql session +func (st *SessionStore) Flush() error { + st.lock.Lock() + defer st.lock.Unlock() + st.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID get session id of this mysql session store +func (st *SessionStore) SessionID() string { + return st.sid +} + +// SessionRelease save mysql session values to database. +// must call this method to save values to database. +func (st *SessionStore) SessionRelease(w http.ResponseWriter) { + defer st.c.Close() + b, err := session.EncodeGob(st.values) + if err != nil { + return + } + st.c.Exec("UPDATE "+TableName+" set `session_data`=?, `session_expiry`=? where session_key=?", + b, time.Now().Unix(), st.sid) + +} + +// Provider mysql session provider +type Provider struct { + maxlifetime int64 + savePath string +} + +// connect to mysql +func (mp *Provider) connectInit() *sql.DB { + db, e := sql.Open("mysql", mp.savePath) + if e != nil { + return nil + } + return db +} + +// SessionInit init mysql session. +// savepath is the connection string of mysql. +func (mp *Provider) SessionInit(maxlifetime int64, savePath string) error { + mp.maxlifetime = maxlifetime + mp.savePath = savePath + return nil +} + +// SessionRead get mysql session by sid +func (mp *Provider) SessionRead(sid string) (session.Store, error) { + c := mp.connectInit() + row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid) + var sessiondata []byte + err := row.Scan(&sessiondata) + if err == sql.ErrNoRows { + c.Exec("insert into "+TableName+"(`session_key`,`session_data`,`session_expiry`) values(?,?,?)", + sid, "", time.Now().Unix()) + } + var kv map[interface{}]interface{} + if len(sessiondata) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(sessiondata) + if err != nil { + return nil, err + } + } + rs := &SessionStore{c: c, sid: sid, values: kv} + return rs, nil +} + +// SessionExist check mysql session exist +func (mp *Provider) SessionExist(sid string) bool { + c := mp.connectInit() + defer c.Close() + row := c.QueryRow("select session_data from "+TableName+" where session_key=?", sid) + var sessiondata []byte + err := row.Scan(&sessiondata) + if err == sql.ErrNoRows { + return false + } + return true +} + +// SessionRegenerate generate new sid for mysql session +func (mp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + c := mp.connectInit() + row := c.QueryRow("select session_data from "+TableName+" where session_key=?", oldsid) + var sessiondata []byte + err := row.Scan(&sessiondata) + if err == sql.ErrNoRows { + c.Exec("insert into "+TableName+"(`session_key`,`session_data`,`session_expiry`) values(?,?,?)", oldsid, "", time.Now().Unix()) + } + c.Exec("update "+TableName+" set `session_key`=? where session_key=?", sid, oldsid) + var kv map[interface{}]interface{} + if len(sessiondata) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(sessiondata) + if err != nil { + return nil, err + } + } + rs := &SessionStore{c: c, sid: sid, values: kv} + return rs, nil +} + +// SessionDestroy delete mysql session by sid +func (mp *Provider) SessionDestroy(sid string) error { + c := mp.connectInit() + c.Exec("DELETE FROM "+TableName+" where session_key=?", sid) + c.Close() + return nil +} + +// SessionGC delete expired values in mysql session +func (mp *Provider) SessionGC() { + c := mp.connectInit() + c.Exec("DELETE from "+TableName+" where session_expiry < ?", time.Now().Unix()-mp.maxlifetime) + c.Close() + return +} + +// SessionAll count values in mysql session +func (mp *Provider) SessionAll() int { + c := mp.connectInit() + defer c.Close() + var total int + err := c.QueryRow("SELECT count(*) as num from " + TableName).Scan(&total) + if err != nil { + return 0 + } + return total +} + +func init() { + session.Register("mysql", mysqlpder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/postgres/sess_postgresql.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/postgres/sess_postgresql.go new file mode 100644 index 000000000..73f9c13a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/postgres/sess_postgresql.go @@ -0,0 +1,248 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package postgres for session provider +// +// depends on github.com/lib/pq: +// +// go install github.com/lib/pq +// +// +// needs this table in your database: +// +// CREATE TABLE session ( +// session_key char(64) NOT NULL, +// session_data bytea, +// session_expiry timestamp NOT NULL, +// CONSTRAINT session_key PRIMARY KEY(session_key) +// ); +// +// will be activated with these settings in app.conf: +// +// SessionOn = true +// SessionProvider = postgresql +// SessionSavePath = "user=a password=b dbname=c sslmode=disable" +// SessionName = session +// +// +// Usage: +// import( +// _ "github.com/astaxie/beego/session/postgresql" +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("postgresql", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"user=pqgotest dbname=pqgotest sslmode=verify-full"}``) +// go globalSessions.GC() +// } +// +// more docs: http://beego.me/docs/module/session.md +package postgres + +import ( + "database/sql" + "net/http" + "sync" + "time" + + "github.com/astaxie/beego/session" + // import postgresql Driver + _ "github.com/lib/pq" +) + +var postgresqlpder = &Provider{} + +// SessionStore postgresql session store +type SessionStore struct { + c *sql.DB + sid string + lock sync.RWMutex + values map[interface{}]interface{} +} + +// Set value in postgresql session. +// it is temp value in map. +func (st *SessionStore) Set(key, value interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + st.values[key] = value + return nil +} + +// Get value from postgresql session +func (st *SessionStore) Get(key interface{}) interface{} { + st.lock.RLock() + defer st.lock.RUnlock() + if v, ok := st.values[key]; ok { + return v + } + return nil +} + +// Delete value in postgresql session +func (st *SessionStore) Delete(key interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + delete(st.values, key) + return nil +} + +// Flush clear all values in postgresql session +func (st *SessionStore) Flush() error { + st.lock.Lock() + defer st.lock.Unlock() + st.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID get session id of this postgresql session store +func (st *SessionStore) SessionID() string { + return st.sid +} + +// SessionRelease save postgresql session values to database. +// must call this method to save values to database. +func (st *SessionStore) SessionRelease(w http.ResponseWriter) { + defer st.c.Close() + b, err := session.EncodeGob(st.values) + if err != nil { + return + } + st.c.Exec("UPDATE session set session_data=$1, session_expiry=$2 where session_key=$3", + b, time.Now().Format(time.RFC3339), st.sid) + +} + +// Provider postgresql session provider +type Provider struct { + maxlifetime int64 + savePath string +} + +// connect to postgresql +func (mp *Provider) connectInit() *sql.DB { + db, e := sql.Open("postgres", mp.savePath) + if e != nil { + return nil + } + return db +} + +// SessionInit init postgresql session. +// savepath is the connection string of postgresql. +func (mp *Provider) SessionInit(maxlifetime int64, savePath string) error { + mp.maxlifetime = maxlifetime + mp.savePath = savePath + return nil +} + +// SessionRead get postgresql session by sid +func (mp *Provider) SessionRead(sid string) (session.Store, error) { + c := mp.connectInit() + row := c.QueryRow("select session_data from session where session_key=$1", sid) + var sessiondata []byte + err := row.Scan(&sessiondata) + if err == sql.ErrNoRows { + _, err = c.Exec("insert into session(session_key,session_data,session_expiry) values($1,$2,$3)", + sid, "", time.Now().Format(time.RFC3339)) + + if err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + var kv map[interface{}]interface{} + if len(sessiondata) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(sessiondata) + if err != nil { + return nil, err + } + } + rs := &SessionStore{c: c, sid: sid, values: kv} + return rs, nil +} + +// SessionExist check postgresql session exist +func (mp *Provider) SessionExist(sid string) bool { + c := mp.connectInit() + defer c.Close() + row := c.QueryRow("select session_data from session where session_key=$1", sid) + var sessiondata []byte + err := row.Scan(&sessiondata) + + if err == sql.ErrNoRows { + return false + } + return true +} + +// SessionRegenerate generate new sid for postgresql session +func (mp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + c := mp.connectInit() + row := c.QueryRow("select session_data from session where session_key=$1", oldsid) + var sessiondata []byte + err := row.Scan(&sessiondata) + if err == sql.ErrNoRows { + c.Exec("insert into session(session_key,session_data,session_expiry) values($1,$2,$3)", + oldsid, "", time.Now().Format(time.RFC3339)) + } + c.Exec("update session set session_key=$1 where session_key=$2", sid, oldsid) + var kv map[interface{}]interface{} + if len(sessiondata) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob(sessiondata) + if err != nil { + return nil, err + } + } + rs := &SessionStore{c: c, sid: sid, values: kv} + return rs, nil +} + +// SessionDestroy delete postgresql session by sid +func (mp *Provider) SessionDestroy(sid string) error { + c := mp.connectInit() + c.Exec("DELETE FROM session where session_key=$1", sid) + c.Close() + return nil +} + +// SessionGC delete expired values in postgresql session +func (mp *Provider) SessionGC() { + c := mp.connectInit() + c.Exec("DELETE from session where EXTRACT(EPOCH FROM (current_timestamp - session_expiry)) > $1", mp.maxlifetime) + c.Close() + return +} + +// SessionAll count values in postgresql session +func (mp *Provider) SessionAll() int { + c := mp.connectInit() + defer c.Close() + var total int + err := c.QueryRow("SELECT count(*) as num from session").Scan(&total) + if err != nil { + return 0 + } + return total +} + +func init() { + session.Register("postgresql", postgresqlpder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/redis/sess_redis.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/redis/sess_redis.go new file mode 100644 index 000000000..c46fa7cdf --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/redis/sess_redis.go @@ -0,0 +1,256 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package redis for session provider +// +// depend on github.com/garyburd/redigo/redis +// +// go install github.com/garyburd/redigo/redis +// +// Usage: +// import( +// _ "github.com/astaxie/beego/session/redis" +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("redis", ``{"cookieName":"gosessionid","gclifetime":3600,"ProviderConfig":"127.0.0.1:7070"}``) +// go globalSessions.GC() +// } +// +// more docs: http://beego.me/docs/module/session.md +package redis + +import ( + "net/http" + "strconv" + "strings" + "sync" + + "github.com/astaxie/beego/session" + + "github.com/garyburd/redigo/redis" +) + +var redispder = &Provider{} + +// MaxPoolSize redis max pool size +var MaxPoolSize = 100 + +// SessionStore redis session store +type SessionStore struct { + p *redis.Pool + sid string + lock sync.RWMutex + values map[interface{}]interface{} + maxlifetime int64 +} + +// Set value in redis session +func (rs *SessionStore) Set(key, value interface{}) error { + rs.lock.Lock() + defer rs.lock.Unlock() + rs.values[key] = value + return nil +} + +// Get value in redis session +func (rs *SessionStore) Get(key interface{}) interface{} { + rs.lock.RLock() + defer rs.lock.RUnlock() + if v, ok := rs.values[key]; ok { + return v + } + return nil +} + +// Delete value in redis session +func (rs *SessionStore) Delete(key interface{}) error { + rs.lock.Lock() + defer rs.lock.Unlock() + delete(rs.values, key) + return nil +} + +// Flush clear all values in redis session +func (rs *SessionStore) Flush() error { + rs.lock.Lock() + defer rs.lock.Unlock() + rs.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID get redis session id +func (rs *SessionStore) SessionID() string { + return rs.sid +} + +// SessionRelease save session values to redis +func (rs *SessionStore) SessionRelease(w http.ResponseWriter) { + b, err := session.EncodeGob(rs.values) + if err != nil { + return + } + c := rs.p.Get() + defer c.Close() + c.Do("SETEX", rs.sid, rs.maxlifetime, string(b)) +} + +// Provider redis session provider +type Provider struct { + maxlifetime int64 + savePath string + poolsize int + password string + dbNum int + poollist *redis.Pool +} + +// SessionInit init redis session +// savepath like redis server addr,pool size,password,dbnum +// e.g. 127.0.0.1:6379,100,astaxie,0 +func (rp *Provider) SessionInit(maxlifetime int64, savePath string) error { + rp.maxlifetime = maxlifetime + configs := strings.Split(savePath, ",") + if len(configs) > 0 { + rp.savePath = configs[0] + } + if len(configs) > 1 { + poolsize, err := strconv.Atoi(configs[1]) + if err != nil || poolsize <= 0 { + rp.poolsize = MaxPoolSize + } else { + rp.poolsize = poolsize + } + } else { + rp.poolsize = MaxPoolSize + } + if len(configs) > 2 { + rp.password = configs[2] + } + if len(configs) > 3 { + dbnum, err := strconv.Atoi(configs[3]) + if err != nil || dbnum < 0 { + rp.dbNum = 0 + } else { + rp.dbNum = dbnum + } + } else { + rp.dbNum = 0 + } + rp.poollist = redis.NewPool(func() (redis.Conn, error) { + c, err := redis.Dial("tcp", rp.savePath) + if err != nil { + return nil, err + } + if rp.password != "" { + if _, err := c.Do("AUTH", rp.password); err != nil { + c.Close() + return nil, err + } + } + _, err = c.Do("SELECT", rp.dbNum) + if err != nil { + c.Close() + return nil, err + } + return c, err + }, rp.poolsize) + + return rp.poollist.Get().Err() +} + +// SessionRead read redis session by sid +func (rp *Provider) SessionRead(sid string) (session.Store, error) { + c := rp.poollist.Get() + defer c.Close() + + kvs, err := redis.String(c.Do("GET", sid)) + var kv map[interface{}]interface{} + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob([]byte(kvs)) + if err != nil { + return nil, err + } + } + + rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime} + return rs, nil +} + +// SessionExist check redis session exist by sid +func (rp *Provider) SessionExist(sid string) bool { + c := rp.poollist.Get() + defer c.Close() + + if existed, err := redis.Int(c.Do("EXISTS", sid)); err != nil || existed == 0 { + return false + } + return true +} + +// SessionRegenerate generate new sid for redis session +func (rp *Provider) SessionRegenerate(oldsid, sid string) (session.Store, error) { + c := rp.poollist.Get() + defer c.Close() + + if existed, _ := redis.Int(c.Do("EXISTS", oldsid)); existed == 0 { + // oldsid doesn't exists, set the new sid directly + // ignore error here, since if it return error + // the existed value will be 0 + c.Do("SET", sid, "", "EX", rp.maxlifetime) + } else { + c.Do("RENAME", oldsid, sid) + c.Do("EXPIRE", sid, rp.maxlifetime) + } + + kvs, err := redis.String(c.Do("GET", sid)) + var kv map[interface{}]interface{} + if len(kvs) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = session.DecodeGob([]byte(kvs)) + if err != nil { + return nil, err + } + } + + rs := &SessionStore{p: rp.poollist, sid: sid, values: kv, maxlifetime: rp.maxlifetime} + return rs, nil +} + +// SessionDestroy delete redis session by id +func (rp *Provider) SessionDestroy(sid string) error { + c := rp.poollist.Get() + defer c.Close() + + c.Do("DEL", sid) + return nil +} + +// SessionGC Impelment method, no used. +func (rp *Provider) SessionGC() { + return +} + +// SessionAll return all activeSession +func (rp *Provider) SessionAll() int { + return 0 +} + +func init() { + session.Register("redis", redispder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_cookie.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_cookie.go new file mode 100644 index 000000000..3fefa360f --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_cookie.go @@ -0,0 +1,184 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "crypto/aes" + "crypto/cipher" + "encoding/json" + "net/http" + "net/url" + "sync" +) + +var cookiepder = &CookieProvider{} + +// CookieSessionStore Cookie SessionStore +type CookieSessionStore struct { + sid string + values map[interface{}]interface{} // session data + lock sync.RWMutex +} + +// Set value to cookie session. +// the value are encoded as gob with hash block string. +func (st *CookieSessionStore) Set(key, value interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + st.values[key] = value + return nil +} + +// Get value from cookie session +func (st *CookieSessionStore) Get(key interface{}) interface{} { + st.lock.RLock() + defer st.lock.RUnlock() + if v, ok := st.values[key]; ok { + return v + } + return nil +} + +// Delete value in cookie session +func (st *CookieSessionStore) Delete(key interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + delete(st.values, key) + return nil +} + +// Flush Clean all values in cookie session +func (st *CookieSessionStore) Flush() error { + st.lock.Lock() + defer st.lock.Unlock() + st.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID Return id of this cookie session +func (st *CookieSessionStore) SessionID() string { + return st.sid +} + +// SessionRelease Write cookie session to http response cookie +func (st *CookieSessionStore) SessionRelease(w http.ResponseWriter) { + str, err := encodeCookie(cookiepder.block, + cookiepder.config.SecurityKey, + cookiepder.config.SecurityName, + st.values) + if err != nil { + return + } + cookie := &http.Cookie{Name: cookiepder.config.CookieName, + Value: url.QueryEscape(str), + Path: "/", + HttpOnly: true, + Secure: cookiepder.config.Secure, + MaxAge: cookiepder.config.Maxage} + http.SetCookie(w, cookie) + return +} + +type cookieConfig struct { + SecurityKey string `json:"securityKey"` + BlockKey string `json:"blockKey"` + SecurityName string `json:"securityName"` + CookieName string `json:"cookieName"` + Secure bool `json:"secure"` + Maxage int `json:"maxage"` +} + +// CookieProvider Cookie session provider +type CookieProvider struct { + maxlifetime int64 + config *cookieConfig + block cipher.Block +} + +// SessionInit Init cookie session provider with max lifetime and config json. +// maxlifetime is ignored. +// json config: +// securityKey - hash string +// blockKey - gob encode hash string. it's saved as aes crypto. +// securityName - recognized name in encoded cookie string +// cookieName - cookie name +// maxage - cookie max life time. +func (pder *CookieProvider) SessionInit(maxlifetime int64, config string) error { + pder.config = &cookieConfig{} + err := json.Unmarshal([]byte(config), pder.config) + if err != nil { + return err + } + if pder.config.BlockKey == "" { + pder.config.BlockKey = string(generateRandomKey(16)) + } + if pder.config.SecurityName == "" { + pder.config.SecurityName = string(generateRandomKey(20)) + } + pder.block, err = aes.NewCipher([]byte(pder.config.BlockKey)) + if err != nil { + return err + } + pder.maxlifetime = maxlifetime + return nil +} + +// SessionRead Get SessionStore in cooke. +// decode cooke string to map and put into SessionStore with sid. +func (pder *CookieProvider) SessionRead(sid string) (Store, error) { + maps, _ := decodeCookie(pder.block, + pder.config.SecurityKey, + pder.config.SecurityName, + sid, pder.maxlifetime) + if maps == nil { + maps = make(map[interface{}]interface{}) + } + rs := &CookieSessionStore{sid: sid, values: maps} + return rs, nil +} + +// SessionExist Cookie session is always existed +func (pder *CookieProvider) SessionExist(sid string) bool { + return true +} + +// SessionRegenerate Implement method, no used. +func (pder *CookieProvider) SessionRegenerate(oldsid, sid string) (Store, error) { + return nil, nil +} + +// SessionDestroy Implement method, no used. +func (pder *CookieProvider) SessionDestroy(sid string) error { + return nil +} + +// SessionGC Implement method, no used. +func (pder *CookieProvider) SessionGC() { + return +} + +// SessionAll Implement method, return 0. +func (pder *CookieProvider) SessionAll() int { + return 0 +} + +// SessionUpdate Implement method, no used. +func (pder *CookieProvider) SessionUpdate(sid string) error { + return nil +} + +func init() { + Register("cookie", cookiepder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_file.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_file.go new file mode 100644 index 000000000..9265b0304 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_file.go @@ -0,0 +1,283 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sync" + "time" +) + +var ( + filepder = &FileProvider{} + gcmaxlifetime int64 +) + +// FileSessionStore File session store +type FileSessionStore struct { + sid string + lock sync.RWMutex + values map[interface{}]interface{} +} + +// Set value to file session +func (fs *FileSessionStore) Set(key, value interface{}) error { + fs.lock.Lock() + defer fs.lock.Unlock() + fs.values[key] = value + return nil +} + +// Get value from file session +func (fs *FileSessionStore) Get(key interface{}) interface{} { + fs.lock.RLock() + defer fs.lock.RUnlock() + if v, ok := fs.values[key]; ok { + return v + } + return nil +} + +// Delete value in file session by given key +func (fs *FileSessionStore) Delete(key interface{}) error { + fs.lock.Lock() + defer fs.lock.Unlock() + delete(fs.values, key) + return nil +} + +// Flush Clean all values in file session +func (fs *FileSessionStore) Flush() error { + fs.lock.Lock() + defer fs.lock.Unlock() + fs.values = make(map[interface{}]interface{}) + return nil +} + +// SessionID Get file session store id +func (fs *FileSessionStore) SessionID() string { + return fs.sid +} + +// SessionRelease Write file session to local file with Gob string +func (fs *FileSessionStore) SessionRelease(w http.ResponseWriter) { + b, err := EncodeGob(fs.values) + if err != nil { + return + } + _, err = os.Stat(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid)) + var f *os.File + if err == nil { + f, err = os.OpenFile(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid), os.O_RDWR, 0777) + } else if os.IsNotExist(err) { + f, err = os.Create(path.Join(filepder.savePath, string(fs.sid[0]), string(fs.sid[1]), fs.sid)) + } else { + return + } + f.Truncate(0) + f.Seek(0, 0) + f.Write(b) + f.Close() +} + +// FileProvider File session provider +type FileProvider struct { + lock sync.RWMutex + maxlifetime int64 + savePath string +} + +// SessionInit Init file session provider. +// savePath sets the session files path. +func (fp *FileProvider) SessionInit(maxlifetime int64, savePath string) error { + fp.maxlifetime = maxlifetime + fp.savePath = savePath + return nil +} + +// SessionRead Read file session by sid. +// if file is not exist, create it. +// the file path is generated from sid string. +func (fp *FileProvider) SessionRead(sid string) (Store, error) { + filepder.lock.Lock() + defer filepder.lock.Unlock() + + err := os.MkdirAll(path.Join(fp.savePath, string(sid[0]), string(sid[1])), 0777) + if err != nil { + println(err.Error()) + } + _, err = os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + var f *os.File + if err == nil { + f, err = os.OpenFile(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), os.O_RDWR, 0777) + } else if os.IsNotExist(err) { + f, err = os.Create(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + } else { + return nil, err + } + os.Chtimes(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), time.Now(), time.Now()) + var kv map[interface{}]interface{} + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + if len(b) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = DecodeGob(b) + if err != nil { + return nil, err + } + } + f.Close() + ss := &FileSessionStore{sid: sid, values: kv} + return ss, nil +} + +// SessionExist Check file session exist. +// it checkes the file named from sid exist or not. +func (fp *FileProvider) SessionExist(sid string) bool { + filepder.lock.Lock() + defer filepder.lock.Unlock() + + _, err := os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + if err == nil { + return true + } + return false +} + +// SessionDestroy Remove all files in this save path +func (fp *FileProvider) SessionDestroy(sid string) error { + filepder.lock.Lock() + defer filepder.lock.Unlock() + os.Remove(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + return nil +} + +// SessionGC Recycle files in save path +func (fp *FileProvider) SessionGC() { + filepder.lock.Lock() + defer filepder.lock.Unlock() + + gcmaxlifetime = fp.maxlifetime + filepath.Walk(fp.savePath, gcpath) +} + +// SessionAll Get active file session number. +// it walks save path to count files. +func (fp *FileProvider) SessionAll() int { + a := &activeSession{} + err := filepath.Walk(fp.savePath, func(path string, f os.FileInfo, err error) error { + return a.visit(path, f, err) + }) + if err != nil { + fmt.Printf("filepath.Walk() returned %v\n", err) + return 0 + } + return a.total +} + +// SessionRegenerate Generate new sid for file session. +// it delete old file and create new file named from new sid. +func (fp *FileProvider) SessionRegenerate(oldsid, sid string) (Store, error) { + filepder.lock.Lock() + defer filepder.lock.Unlock() + + err := os.MkdirAll(path.Join(fp.savePath, string(oldsid[0]), string(oldsid[1])), 0777) + if err != nil { + println(err.Error()) + } + err = os.MkdirAll(path.Join(fp.savePath, string(sid[0]), string(sid[1])), 0777) + if err != nil { + println(err.Error()) + } + _, err = os.Stat(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + var newf *os.File + if err == nil { + return nil, errors.New("newsid exist") + } else if os.IsNotExist(err) { + newf, err = os.Create(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + } + + _, err = os.Stat(path.Join(fp.savePath, string(oldsid[0]), string(oldsid[1]), oldsid)) + var f *os.File + if err == nil { + f, err = os.OpenFile(path.Join(fp.savePath, string(oldsid[0]), string(oldsid[1]), oldsid), os.O_RDWR, 0777) + io.Copy(newf, f) + } else if os.IsNotExist(err) { + newf, err = os.Create(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid)) + } else { + return nil, err + } + f.Close() + os.Remove(path.Join(fp.savePath, string(oldsid[0]), string(oldsid[1]))) + os.Chtimes(path.Join(fp.savePath, string(sid[0]), string(sid[1]), sid), time.Now(), time.Now()) + var kv map[interface{}]interface{} + b, err := ioutil.ReadAll(newf) + if err != nil { + return nil, err + } + if len(b) == 0 { + kv = make(map[interface{}]interface{}) + } else { + kv, err = DecodeGob(b) + if err != nil { + return nil, err + } + } + ss := &FileSessionStore{sid: sid, values: kv} + return ss, nil +} + +// remove file in save path if expired +func gcpath(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + if (info.ModTime().Unix() + gcmaxlifetime) < time.Now().Unix() { + os.Remove(path) + } + return nil +} + +type activeSession struct { + total int +} + +func (as *activeSession) visit(paths string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if f.IsDir() { + return nil + } + as.total = as.total + 1 + return nil +} + +func init() { + Register("file", filepder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_mem.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_mem.go new file mode 100644 index 000000000..dd61ef573 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_mem.go @@ -0,0 +1,196 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "container/list" + "net/http" + "sync" + "time" +) + +var mempder = &MemProvider{list: list.New(), sessions: make(map[string]*list.Element)} + +// MemSessionStore memory session store. +// it saved sessions in a map in memory. +type MemSessionStore struct { + sid string //session id + timeAccessed time.Time //last access time + value map[interface{}]interface{} //session store + lock sync.RWMutex +} + +// Set value to memory session +func (st *MemSessionStore) Set(key, value interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + st.value[key] = value + return nil +} + +// Get value from memory session by key +func (st *MemSessionStore) Get(key interface{}) interface{} { + st.lock.RLock() + defer st.lock.RUnlock() + if v, ok := st.value[key]; ok { + return v + } + return nil +} + +// Delete in memory session by key +func (st *MemSessionStore) Delete(key interface{}) error { + st.lock.Lock() + defer st.lock.Unlock() + delete(st.value, key) + return nil +} + +// Flush clear all values in memory session +func (st *MemSessionStore) Flush() error { + st.lock.Lock() + defer st.lock.Unlock() + st.value = make(map[interface{}]interface{}) + return nil +} + +// SessionID get this id of memory session store +func (st *MemSessionStore) SessionID() string { + return st.sid +} + +// SessionRelease Implement method, no used. +func (st *MemSessionStore) SessionRelease(w http.ResponseWriter) { +} + +// MemProvider Implement the provider interface +type MemProvider struct { + lock sync.RWMutex // locker + sessions map[string]*list.Element // map in memory + list *list.List // for gc + maxlifetime int64 + savePath string +} + +// SessionInit init memory session +func (pder *MemProvider) SessionInit(maxlifetime int64, savePath string) error { + pder.maxlifetime = maxlifetime + pder.savePath = savePath + return nil +} + +// SessionRead get memory session store by sid +func (pder *MemProvider) SessionRead(sid string) (Store, error) { + pder.lock.RLock() + if element, ok := pder.sessions[sid]; ok { + go pder.SessionUpdate(sid) + pder.lock.RUnlock() + return element.Value.(*MemSessionStore), nil + } + pder.lock.RUnlock() + pder.lock.Lock() + newsess := &MemSessionStore{sid: sid, timeAccessed: time.Now(), value: make(map[interface{}]interface{})} + element := pder.list.PushBack(newsess) + pder.sessions[sid] = element + pder.lock.Unlock() + return newsess, nil +} + +// SessionExist check session store exist in memory session by sid +func (pder *MemProvider) SessionExist(sid string) bool { + pder.lock.RLock() + defer pder.lock.RUnlock() + if _, ok := pder.sessions[sid]; ok { + return true + } + return false +} + +// SessionRegenerate generate new sid for session store in memory session +func (pder *MemProvider) SessionRegenerate(oldsid, sid string) (Store, error) { + pder.lock.RLock() + if element, ok := pder.sessions[oldsid]; ok { + go pder.SessionUpdate(oldsid) + pder.lock.RUnlock() + pder.lock.Lock() + element.Value.(*MemSessionStore).sid = sid + pder.sessions[sid] = element + delete(pder.sessions, oldsid) + pder.lock.Unlock() + return element.Value.(*MemSessionStore), nil + } + pder.lock.RUnlock() + pder.lock.Lock() + newsess := &MemSessionStore{sid: sid, timeAccessed: time.Now(), value: make(map[interface{}]interface{})} + element := pder.list.PushBack(newsess) + pder.sessions[sid] = element + pder.lock.Unlock() + return newsess, nil +} + +// SessionDestroy delete session store in memory session by id +func (pder *MemProvider) SessionDestroy(sid string) error { + pder.lock.Lock() + defer pder.lock.Unlock() + if element, ok := pder.sessions[sid]; ok { + delete(pder.sessions, sid) + pder.list.Remove(element) + return nil + } + return nil +} + +// SessionGC clean expired session stores in memory session +func (pder *MemProvider) SessionGC() { + pder.lock.RLock() + for { + element := pder.list.Back() + if element == nil { + break + } + if (element.Value.(*MemSessionStore).timeAccessed.Unix() + pder.maxlifetime) < time.Now().Unix() { + pder.lock.RUnlock() + pder.lock.Lock() + pder.list.Remove(element) + delete(pder.sessions, element.Value.(*MemSessionStore).sid) + pder.lock.Unlock() + pder.lock.RLock() + } else { + break + } + } + pder.lock.RUnlock() +} + +// SessionAll get count number of memory session +func (pder *MemProvider) SessionAll() int { + return pder.list.Len() +} + +// SessionUpdate expand time of session store by id in memory session +func (pder *MemProvider) SessionUpdate(sid string) error { + pder.lock.Lock() + defer pder.lock.Unlock() + if element, ok := pder.sessions[sid]; ok { + element.Value.(*MemSessionStore).timeAccessed = time.Now() + pder.list.MoveToFront(element) + return nil + } + return nil +} + +func init() { + Register("memory", mempder) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_utils.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_utils.go new file mode 100644 index 000000000..d7db5ba8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/sess_utils.go @@ -0,0 +1,207 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package session + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/rand" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "encoding/gob" + "errors" + "fmt" + "io" + "strconv" + "time" + + "github.com/astaxie/beego/utils" +) + +func init() { + gob.Register([]interface{}{}) + gob.Register(map[int]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register(map[interface{}]interface{}{}) + gob.Register(map[string]string{}) + gob.Register(map[int]string{}) + gob.Register(map[int]int{}) + gob.Register(map[int]int64{}) +} + +// EncodeGob encode the obj to gob +func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) { + for _, v := range obj { + gob.Register(v) + } + buf := bytes.NewBuffer(nil) + enc := gob.NewEncoder(buf) + err := enc.Encode(obj) + if err != nil { + return []byte(""), err + } + return buf.Bytes(), nil +} + +// DecodeGob decode data to map +func DecodeGob(encoded []byte) (map[interface{}]interface{}, error) { + buf := bytes.NewBuffer(encoded) + dec := gob.NewDecoder(buf) + var out map[interface{}]interface{} + err := dec.Decode(&out) + if err != nil { + return nil, err + } + return out, nil +} + +// generateRandomKey creates a random key with the given strength. +func generateRandomKey(strength int) []byte { + k := make([]byte, strength) + if n, err := io.ReadFull(rand.Reader, k); n != strength || err != nil { + return utils.RandomCreateBytes(strength) + } + return k +} + +// Encryption ----------------------------------------------------------------- + +// encrypt encrypts a value using the given block in counter mode. +// +// A random initialization vector (http://goo.gl/zF67k) with the length of the +// block size is prepended to the resulting ciphertext. +func encrypt(block cipher.Block, value []byte) ([]byte, error) { + iv := generateRandomKey(block.BlockSize()) + if iv == nil { + return nil, errors.New("encrypt: failed to generate random iv") + } + // Encrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + // Return iv + ciphertext. + return append(iv, value...), nil +} + +// decrypt decrypts a value using the given block in counter mode. +// +// The value to be decrypted must be prepended by a initialization vector +// (http://goo.gl/zF67k) with the length of the block size. +func decrypt(block cipher.Block, value []byte) ([]byte, error) { + size := block.BlockSize() + if len(value) > size { + // Extract iv. + iv := value[:size] + // Extract ciphertext. + value = value[size:] + // Decrypt it. + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(value, value) + return value, nil + } + return nil, errors.New("decrypt: the value could not be decrypted") +} + +func encodeCookie(block cipher.Block, hashKey, name string, value map[interface{}]interface{}) (string, error) { + var err error + var b []byte + // 1. EncodeGob. + if b, err = EncodeGob(value); err != nil { + return "", err + } + // 2. Encrypt (optional). + if b, err = encrypt(block, b); err != nil { + return "", err + } + b = encode(b) + // 3. Create MAC for "name|date|value". Extra pipe to be used later. + b = []byte(fmt.Sprintf("%s|%d|%s|", name, time.Now().UTC().Unix(), b)) + h := hmac.New(sha1.New, []byte(hashKey)) + h.Write(b) + sig := h.Sum(nil) + // Append mac, remove name. + b = append(b, sig...)[len(name)+1:] + // 4. Encode to base64. + b = encode(b) + // Done. + return string(b), nil +} + +func decodeCookie(block cipher.Block, hashKey, name, value string, gcmaxlifetime int64) (map[interface{}]interface{}, error) { + // 1. Decode from base64. + b, err := decode([]byte(value)) + if err != nil { + return nil, err + } + // 2. Verify MAC. Value is "date|value|mac". + parts := bytes.SplitN(b, []byte("|"), 3) + if len(parts) != 3 { + return nil, errors.New("Decode: invalid value %v") + } + + b = append([]byte(name+"|"), b[:len(b)-len(parts[2])]...) + h := hmac.New(sha1.New, []byte(hashKey)) + h.Write(b) + sig := h.Sum(nil) + if len(sig) != len(parts[2]) || subtle.ConstantTimeCompare(sig, parts[2]) != 1 { + return nil, errors.New("Decode: the value is not valid") + } + // 3. Verify date ranges. + var t1 int64 + if t1, err = strconv.ParseInt(string(parts[0]), 10, 64); err != nil { + return nil, errors.New("Decode: invalid timestamp") + } + t2 := time.Now().UTC().Unix() + if t1 > t2 { + return nil, errors.New("Decode: timestamp is too new") + } + if t1 < t2-gcmaxlifetime { + return nil, errors.New("Decode: expired timestamp") + } + // 4. Decrypt (optional). + b, err = decode(parts[1]) + if err != nil { + return nil, err + } + if b, err = decrypt(block, b); err != nil { + return nil, err + } + // 5. DecodeGob. + dst, err := DecodeGob(b) + if err != nil { + return nil, err + } + return dst, nil +} + +// Encoding ------------------------------------------------------------------- + +// encode encodes a value using base64. +func encode(value []byte) []byte { + encoded := make([]byte, base64.URLEncoding.EncodedLen(len(value))) + base64.URLEncoding.Encode(encoded, value) + return encoded +} + +// decode decodes a cookie using base64. +func decode(value []byte) ([]byte, error) { + decoded := make([]byte, base64.URLEncoding.DecodedLen(len(value))) + b, err := base64.URLEncoding.Decode(decoded, value) + if err != nil { + return nil, err + } + return decoded[:b], nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/session/session.go b/Godeps/_workspace/src/github.com/astaxie/beego/session/session.go new file mode 100644 index 000000000..39d475fc1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/session/session.go @@ -0,0 +1,296 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package session provider +// +// Usage: +// import( +// "github.com/astaxie/beego/session" +// ) +// +// func init() { +// globalSessions, _ = session.NewManager("memory", `{"cookieName":"gosessionid", "enableSetCookie,omitempty": true, "gclifetime":3600, "maxLifetime": 3600, "secure": false, "cookieLifeTime": 3600, "providerConfig": ""}`) +// go globalSessions.GC() +// } +// +// more docs: http://beego.me/docs/module/session.md +package session + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" +) + +// Store contains all data for one session process with specific id. +type Store interface { + Set(key, value interface{}) error //set session value + Get(key interface{}) interface{} //get session value + Delete(key interface{}) error //delete session value + SessionID() string //back current sessionID + SessionRelease(w http.ResponseWriter) // release the resource & save data to provider & return the data + Flush() error //delete all data +} + +// Provider contains global session methods and saved SessionStores. +// it can operate a SessionStore by its id. +type Provider interface { + SessionInit(gclifetime int64, config string) error + SessionRead(sid string) (Store, error) + SessionExist(sid string) bool + SessionRegenerate(oldsid, sid string) (Store, error) + SessionDestroy(sid string) error + SessionAll() int //get all active session + SessionGC() +} + +var provides = make(map[string]Provider) + +// Register makes a session provide available by the provided name. +// If Register is called twice with the same name or if driver is nil, +// it panics. +func Register(name string, provide Provider) { + if provide == nil { + panic("session: Register provide is nil") + } + if _, dup := provides[name]; dup { + panic("session: Register called twice for provider " + name) + } + provides[name] = provide +} + +type managerConfig struct { + CookieName string `json:"cookieName"` + EnableSetCookie bool `json:"enableSetCookie,omitempty"` + Gclifetime int64 `json:"gclifetime"` + Maxlifetime int64 `json:"maxLifetime"` + Secure bool `json:"secure"` + CookieLifeTime int `json:"cookieLifeTime"` + ProviderConfig string `json:"providerConfig"` + Domain string `json:"domain"` + SessionIDLength int64 `json:"sessionIDLength"` +} + +// Manager contains Provider and its configuration. +type Manager struct { + provider Provider + config *managerConfig +} + +// NewManager Create new Manager with provider name and json config string. +// provider name: +// 1. cookie +// 2. file +// 3. memory +// 4. redis +// 5. mysql +// json config: +// 1. is https default false +// 2. hashfunc default sha1 +// 3. hashkey default beegosessionkey +// 4. maxage default is none +func NewManager(provideName, config string) (*Manager, error) { + provider, ok := provides[provideName] + if !ok { + return nil, fmt.Errorf("session: unknown provide %q (forgotten import?)", provideName) + } + cf := new(managerConfig) + cf.EnableSetCookie = true + err := json.Unmarshal([]byte(config), cf) + if err != nil { + return nil, err + } + if cf.Maxlifetime == 0 { + cf.Maxlifetime = cf.Gclifetime + } + err = provider.SessionInit(cf.Maxlifetime, cf.ProviderConfig) + if err != nil { + return nil, err + } + + if cf.SessionIDLength == 0 { + cf.SessionIDLength = 16 + } + + return &Manager{ + provider, + cf, + }, nil +} + +// getSid retrieves session identifier from HTTP Request. +// First try to retrieve id by reading from cookie, session cookie name is configurable, +// if not exist, then retrieve id from querying parameters. +// +// error is not nil when there is anything wrong. +// sid is empty when need to generate a new session id +// otherwise return an valid session id. +func (manager *Manager) getSid(r *http.Request) (string, error) { + cookie, errs := r.Cookie(manager.config.CookieName) + if errs != nil || cookie.Value == "" || cookie.MaxAge < 0 { + errs := r.ParseForm() + if errs != nil { + return "", errs + } + + sid := r.FormValue(manager.config.CookieName) + return sid, nil + } + + // HTTP Request contains cookie for sessionid info. + return url.QueryUnescape(cookie.Value) +} + +// SessionStart generate or read the session id from http request. +// if session id exists, return SessionStore with this id. +func (manager *Manager) SessionStart(w http.ResponseWriter, r *http.Request) (session Store, err error) { + sid, errs := manager.getSid(r) + if errs != nil { + return nil, errs + } + + if sid != "" && manager.provider.SessionExist(sid) { + return manager.provider.SessionRead(sid) + } + + // Generate a new session + sid, errs = manager.sessionID() + if errs != nil { + return nil, errs + } + + session, err = manager.provider.SessionRead(sid) + cookie := &http.Cookie{ + Name: manager.config.CookieName, + Value: url.QueryEscape(sid), + Path: "/", + HttpOnly: true, + Secure: manager.isSecure(r), + Domain: manager.config.Domain, + } + if manager.config.CookieLifeTime > 0 { + cookie.MaxAge = manager.config.CookieLifeTime + cookie.Expires = time.Now().Add(time.Duration(manager.config.CookieLifeTime) * time.Second) + } + if manager.config.EnableSetCookie { + http.SetCookie(w, cookie) + } + r.AddCookie(cookie) + + return +} + +// SessionDestroy Destroy session by its id in http request cookie. +func (manager *Manager) SessionDestroy(w http.ResponseWriter, r *http.Request) { + cookie, err := r.Cookie(manager.config.CookieName) + if err != nil || cookie.Value == "" { + return + } + manager.provider.SessionDestroy(cookie.Value) + if manager.config.EnableSetCookie { + expiration := time.Now() + cookie = &http.Cookie{Name: manager.config.CookieName, + Path: "/", + HttpOnly: true, + Expires: expiration, + MaxAge: -1} + + http.SetCookie(w, cookie) + } +} + +// GetSessionStore Get SessionStore by its id. +func (manager *Manager) GetSessionStore(sid string) (sessions Store, err error) { + sessions, err = manager.provider.SessionRead(sid) + return +} + +// GC Start session gc process. +// it can do gc in times after gc lifetime. +func (manager *Manager) GC() { + manager.provider.SessionGC() + time.AfterFunc(time.Duration(manager.config.Gclifetime)*time.Second, func() { manager.GC() }) +} + +// SessionRegenerateID Regenerate a session id for this SessionStore who's id is saving in http request. +func (manager *Manager) SessionRegenerateID(w http.ResponseWriter, r *http.Request) (session Store) { + sid, err := manager.sessionID() + if err != nil { + return + } + cookie, err := r.Cookie(manager.config.CookieName) + if err != nil || cookie.Value == "" { + //delete old cookie + session, _ = manager.provider.SessionRead(sid) + cookie = &http.Cookie{Name: manager.config.CookieName, + Value: url.QueryEscape(sid), + Path: "/", + HttpOnly: true, + Secure: manager.isSecure(r), + Domain: manager.config.Domain, + } + } else { + oldsid, _ := url.QueryUnescape(cookie.Value) + session, _ = manager.provider.SessionRegenerate(oldsid, sid) + cookie.Value = url.QueryEscape(sid) + cookie.HttpOnly = true + cookie.Path = "/" + } + if manager.config.CookieLifeTime > 0 { + cookie.MaxAge = manager.config.CookieLifeTime + cookie.Expires = time.Now().Add(time.Duration(manager.config.CookieLifeTime) * time.Second) + } + if manager.config.EnableSetCookie { + http.SetCookie(w, cookie) + } + r.AddCookie(cookie) + return +} + +// GetActiveSession Get all active sessions count number. +func (manager *Manager) GetActiveSession() int { + return manager.provider.SessionAll() +} + +// SetSecure Set cookie with https. +func (manager *Manager) SetSecure(secure bool) { + manager.config.Secure = secure +} + +func (manager *Manager) sessionID() (string, error) { + b := make([]byte, manager.config.SessionIDLength) + n, err := rand.Read(b) + if n != len(b) || err != nil { + return "", fmt.Errorf("Could not successfully read from the system CSPRNG.") + } + return hex.EncodeToString(b), nil +} + +// Set cookie with https. +func (manager *Manager) isSecure(req *http.Request) bool { + if !manager.config.Secure { + return false + } + if req.URL.Scheme != "" { + return req.URL.Scheme == "https" + } + if req.TLS == nil { + return false + } + return true +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/staticfile.go b/Godeps/_workspace/src/github.com/astaxie/beego/staticfile.go new file mode 100644 index 000000000..9534ce914 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/staticfile.go @@ -0,0 +1,195 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "bytes" + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/astaxie/beego/context" +) + +var errNotStaticRequest = errors.New("request not a static file request") + +func serverStaticRouter(ctx *context.Context) { + if ctx.Input.Method() != "GET" && ctx.Input.Method() != "HEAD" { + return + } + + forbidden, filePath, fileInfo, err := lookupFile(ctx) + if err == errNotStaticRequest { + return + } + + if forbidden { + exception("403", ctx) + return + } + + if filePath == "" || fileInfo == nil { + if BConfig.RunMode == DEV { + Warn("Can't find/open the file:", filePath, err) + } + http.NotFound(ctx.ResponseWriter, ctx.Request) + return + } + if fileInfo.IsDir() { + //serveFile will list dir + http.ServeFile(ctx.ResponseWriter, ctx.Request, filePath) + return + } + + var enableCompress = BConfig.EnableGzip && isStaticCompress(filePath) + var acceptEncoding string + if enableCompress { + acceptEncoding = context.ParseEncoding(ctx.Request) + } + b, n, sch, err := openFile(filePath, fileInfo, acceptEncoding) + if err != nil { + if BConfig.RunMode == DEV { + Warn("Can't compress the file:", filePath, err) + } + http.NotFound(ctx.ResponseWriter, ctx.Request) + return + } + + if b { + ctx.Output.Header("Content-Encoding", n) + } else { + ctx.Output.Header("Content-Length", strconv.FormatInt(sch.size, 10)) + } + + http.ServeContent(ctx.ResponseWriter, ctx.Request, filePath, sch.modTime, sch) + return + +} + +type serveContentHolder struct { + *bytes.Reader + modTime time.Time + size int64 + encoding string +} + +var ( + staticFileMap = make(map[string]*serveContentHolder) + mapLock sync.Mutex +) + +func openFile(filePath string, fi os.FileInfo, acceptEncoding string) (bool, string, *serveContentHolder, error) { + mapKey := acceptEncoding + ":" + filePath + mapFile, _ := staticFileMap[mapKey] + if isOk(mapFile, fi) { + return mapFile.encoding != "", mapFile.encoding, mapFile, nil + } + mapLock.Lock() + defer mapLock.Unlock() + if mapFile, _ = staticFileMap[mapKey]; !isOk(mapFile, fi) { + file, err := os.Open(filePath) + if err != nil { + return false, "", nil, err + } + defer file.Close() + var bufferWriter bytes.Buffer + _, n, err := context.WriteFile(acceptEncoding, &bufferWriter, file) + if err != nil { + return false, "", nil, err + } + mapFile = &serveContentHolder{Reader: bytes.NewReader(bufferWriter.Bytes()), modTime: fi.ModTime(), size: int64(bufferWriter.Len()), encoding: n} + staticFileMap[mapKey] = mapFile + } + + return mapFile.encoding != "", mapFile.encoding, mapFile, nil +} + +func isOk(s *serveContentHolder, fi os.FileInfo) bool { + if s == nil { + return false + } + return s.modTime == fi.ModTime() && s.size == fi.Size() +} + +// isStaticCompress detect static files +func isStaticCompress(filePath string) bool { + for _, statExtension := range BConfig.WebConfig.StaticExtensionsToGzip { + if strings.HasSuffix(strings.ToLower(filePath), strings.ToLower(statExtension)) { + return true + } + } + return false +} + +// searchFile search the file by url path +// if none the static file prefix matches ,return notStaticRequestErr +func searchFile(ctx *context.Context) (string, os.FileInfo, error) { + requestPath := filepath.ToSlash(filepath.Clean(ctx.Request.URL.Path)) + // special processing : favicon.ico/robots.txt can be in any static dir + if requestPath == "/favicon.ico" || requestPath == "/robots.txt" { + file := path.Join(".", requestPath) + if fi, _ := os.Stat(file); fi != nil { + return file, fi, nil + } + for _, staticDir := range BConfig.WebConfig.StaticDir { + filePath := path.Join(staticDir, requestPath) + if fi, _ := os.Stat(filePath); fi != nil { + return filePath, fi, nil + } + } + return "", nil, errors.New(requestPath + " file not find") + } + + for prefix, staticDir := range BConfig.WebConfig.StaticDir { + if len(prefix) == 0 { + continue + } + if !strings.Contains(requestPath, prefix) { + continue + } + if len(requestPath) > len(prefix) && requestPath[len(prefix)] != '/' { + continue + } + filePath := path.Join(staticDir, requestPath[len(prefix):]) + if fi, err := os.Stat(filePath); fi != nil { + return filePath, fi, err + } + } + return "", nil, errNotStaticRequest +} + +// lookupFile find the file to serve +// if the file is dir ,search the index.html as default file( MUST NOT A DIR also) +// if the index.html not exist or is a dir, give a forbidden response depending on DirectoryIndex +func lookupFile(ctx *context.Context) (bool, string, os.FileInfo, error) { + fp, fi, err := searchFile(ctx) + if fp == "" || fi == nil { + return false, "", nil, err + } + if !fi.IsDir() { + return false, fp, fi, err + } + ifp := filepath.Join(fp, "index.html") + if ifi, _ := os.Stat(ifp); ifi != nil && ifi.Mode().IsRegular() { + return false, ifp, ifi, err + } + return !BConfig.WebConfig.DirectoryIndex, fp, fi, err +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/swagger/docs_spec.go b/Godeps/_workspace/src/github.com/astaxie/beego/swagger/docs_spec.go new file mode 100644 index 000000000..680324dc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/swagger/docs_spec.go @@ -0,0 +1,160 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package swagger struct definition +package swagger + +// SwaggerVersion show the current swagger version +const SwaggerVersion = "1.2" + +// ResourceListing list the resource +type ResourceListing struct { + APIVersion string `json:"apiVersion"` + SwaggerVersion string `json:"swaggerVersion"` // e.g 1.2 + // BasePath string `json:"basePath"` obsolete in 1.1 + APIs []APIRef `json:"apis"` + Info Information `json:"info"` +} + +// APIRef description the api path and description +type APIRef struct { + Path string `json:"path"` // relative or absolute, must start with / + Description string `json:"description"` +} + +// Information show the API Information +type Information struct { + Title string `json:"title,omitempty"` + Description string `json:"description,omitempty"` + Contact string `json:"contact,omitempty"` + TermsOfServiceURL string `json:"termsOfServiceUrl,omitempty"` + License string `json:"license,omitempty"` + LicenseURL string `json:"licenseUrl,omitempty"` +} + +// APIDeclaration see https://github.com/wordnik/swagger-core/blob/scala_2.10-1.3-RC3/schemas/api-declaration-schema.json +type APIDeclaration struct { + APIVersion string `json:"apiVersion"` + SwaggerVersion string `json:"swaggerVersion"` + BasePath string `json:"basePath"` + ResourcePath string `json:"resourcePath"` // must start with / + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + APIs []API `json:"apis,omitempty"` + Models map[string]Model `json:"models,omitempty"` +} + +// API show tha API struct +type API struct { + Path string `json:"path"` // relative or absolute, must start with / + Description string `json:"description"` + Operations []Operation `json:"operations,omitempty"` +} + +// Operation desc the Operation +type Operation struct { + HTTPMethod string `json:"httpMethod"` + Nickname string `json:"nickname"` + Type string `json:"type"` // in 1.1 = DataType + // ResponseClass string `json:"responseClass"` obsolete in 1.2 + Summary string `json:"summary,omitempty"` + Notes string `json:"notes,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + ResponseMessages []ResponseMessage `json:"responseMessages,omitempty"` // optional + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Authorizations []Authorization `json:"authorizations,omitempty"` + Protocols []Protocol `json:"protocols,omitempty"` +} + +// Protocol support which Protocol +type Protocol struct { +} + +// ResponseMessage Show the +type ResponseMessage struct { + Code int `json:"code"` + Message string `json:"message"` + ResponseModel string `json:"responseModel"` +} + +// Parameter desc the request parameters +type Parameter struct { + ParamType string `json:"paramType"` // path,query,body,header,form + Name string `json:"name"` + Description string `json:"description"` + DataType string `json:"dataType"` // 1.2 needed? + Type string `json:"type"` // integer + Format string `json:"format"` // int64 + AllowMultiple bool `json:"allowMultiple"` + Required bool `json:"required"` + Minimum int `json:"minimum"` + Maximum int `json:"maximum"` +} + +// ErrorResponse desc response +type ErrorResponse struct { + Code int `json:"code"` + Reason string `json:"reason"` +} + +// Model define the data model +type Model struct { + ID string `json:"id"` + Required []string `json:"required,omitempty"` + Properties map[string]ModelProperty `json:"properties"` +} + +// ModelProperty define the properties +type ModelProperty struct { + Type string `json:"type"` + Description string `json:"description"` + Items map[string]string `json:"items,omitempty"` + Format string `json:"format"` +} + +// Authorization see https://github.com/wordnik/swagger-core/wiki/authorizations +type Authorization struct { + LocalOAuth OAuth `json:"local-oauth"` + APIKey APIKey `json:"apiKey"` +} + +// OAuth see https://github.com/wordnik/swagger-core/wiki/authorizations +type OAuth struct { + Type string `json:"type"` // e.g. oauth2 + Scopes []string `json:"scopes"` // e.g. PUBLIC + GrantTypes map[string]GrantType `json:"grantTypes"` +} + +// GrantType see https://github.com/wordnik/swagger-core/wiki/authorizations +type GrantType struct { + LoginEndpoint Endpoint `json:"loginEndpoint"` + TokenName string `json:"tokenName"` // e.g. access_code + TokenRequestEndpoint Endpoint `json:"tokenRequestEndpoint"` + TokenEndpoint Endpoint `json:"tokenEndpoint"` +} + +// Endpoint see https://github.com/wordnik/swagger-core/wiki/authorizations +type Endpoint struct { + URL string `json:"url"` + ClientIDName string `json:"clientIdName"` + ClientSecretName string `json:"clientSecretName"` + TokenName string `json:"tokenName"` +} + +// APIKey see https://github.com/wordnik/swagger-core/wiki/authorizations +type APIKey struct { + Type string `json:"type"` // e.g. apiKey + PassAs string `json:"passAs"` // e.g. header +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/template.go b/Godeps/_workspace/src/github.com/astaxie/beego/template.go new file mode 100644 index 000000000..363c67541 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/template.go @@ -0,0 +1,288 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "errors" + "fmt" + "html/template" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/astaxie/beego/utils" +) + +var ( + beegoTplFuncMap = make(template.FuncMap) + // BeeTemplates caching map and supported template file extensions. + BeeTemplates = make(map[string]*template.Template) + // BeeTemplateExt stores the template extension which will build + BeeTemplateExt = []string{"tpl", "html"} +) + +func init() { + beegoTplFuncMap["dateformat"] = DateFormat + beegoTplFuncMap["date"] = Date + beegoTplFuncMap["compare"] = Compare + beegoTplFuncMap["compare_not"] = CompareNot + beegoTplFuncMap["not_nil"] = NotNil + beegoTplFuncMap["not_null"] = NotNil + beegoTplFuncMap["substr"] = Substr + beegoTplFuncMap["html2str"] = HTML2str + beegoTplFuncMap["str2html"] = Str2html + beegoTplFuncMap["htmlquote"] = Htmlquote + beegoTplFuncMap["htmlunquote"] = Htmlunquote + beegoTplFuncMap["renderform"] = RenderForm + beegoTplFuncMap["assets_js"] = AssetsJs + beegoTplFuncMap["assets_css"] = AssetsCSS + beegoTplFuncMap["config"] = GetConfig + beegoTplFuncMap["map_get"] = MapGet + + // go1.2 added template funcs + // Comparisons + beegoTplFuncMap["eq"] = eq // == + beegoTplFuncMap["ge"] = ge // >= + beegoTplFuncMap["gt"] = gt // > + beegoTplFuncMap["le"] = le // <= + beegoTplFuncMap["lt"] = lt // < + beegoTplFuncMap["ne"] = ne // != + + beegoTplFuncMap["urlfor"] = URLFor // != +} + +// AddFuncMap let user to register a func in the template. +func AddFuncMap(key string, funname interface{}) error { + beegoTplFuncMap[key] = funname + return nil +} + +type templatefile struct { + root string + files map[string][]string +} + +func (tf *templatefile) visit(paths string, f os.FileInfo, err error) error { + if f == nil { + return err + } + if f.IsDir() || (f.Mode()&os.ModeSymlink) > 0 { + return nil + } + if !HasTemplateExt(paths) { + return nil + } + + replace := strings.NewReplacer("\\", "/") + a := []byte(paths) + a = a[len([]byte(tf.root)):] + file := strings.TrimLeft(replace.Replace(string(a)), "/") + subdir := filepath.Dir(file) + if _, ok := tf.files[subdir]; ok { + tf.files[subdir] = append(tf.files[subdir], file) + } else { + m := make([]string, 1) + m[0] = file + tf.files[subdir] = m + } + + return nil +} + +// HasTemplateExt return this path contains supported template extension of beego or not. +func HasTemplateExt(paths string) bool { + for _, v := range BeeTemplateExt { + if strings.HasSuffix(paths, "."+v) { + return true + } + } + return false +} + +// AddTemplateExt add new extension for template. +func AddTemplateExt(ext string) { + for _, v := range BeeTemplateExt { + if v == ext { + return + } + } + BeeTemplateExt = append(BeeTemplateExt, ext) +} + +// BuildTemplate will build all template files in a directory. +// it makes beego can render any template file in view directory. +func BuildTemplate(dir string, files ...string) error { + if _, err := os.Stat(dir); err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.New("dir open err") + } + self := &templatefile{ + root: dir, + files: make(map[string][]string), + } + err := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error { + return self.visit(path, f, err) + }) + if err != nil { + fmt.Printf("filepath.Walk() returned %v\n", err) + return err + } + for _, v := range self.files { + for _, file := range v { + if len(files) == 0 || utils.InSlice(file, files) { + t, err := getTemplate(self.root, file, v...) + if err != nil { + Trace("parse template err:", file, err) + } else { + BeeTemplates[file] = t + } + } + } + } + return nil +} + +func getTplDeep(root, file, parent string, t *template.Template) (*template.Template, [][]string, error) { + var fileabspath string + if filepath.HasPrefix(file, "../") { + fileabspath = filepath.Join(root, filepath.Dir(parent), file) + } else { + fileabspath = filepath.Join(root, file) + } + if e := utils.FileExists(fileabspath); !e { + panic("can't find template file:" + file) + } + data, err := ioutil.ReadFile(fileabspath) + if err != nil { + return nil, [][]string{}, err + } + t, err = t.New(file).Parse(string(data)) + if err != nil { + return nil, [][]string{}, err + } + reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*template[ ]+\"([^\"]+)\"") + allsub := reg.FindAllStringSubmatch(string(data), -1) + for _, m := range allsub { + if len(m) == 2 { + tlook := t.Lookup(m[1]) + if tlook != nil { + continue + } + if !HasTemplateExt(m[1]) { + continue + } + t, _, err = getTplDeep(root, m[1], file, t) + if err != nil { + return nil, [][]string{}, err + } + } + } + return t, allsub, nil +} + +func getTemplate(root, file string, others ...string) (t *template.Template, err error) { + t = template.New(file).Delims(BConfig.WebConfig.TemplateLeft, BConfig.WebConfig.TemplateRight).Funcs(beegoTplFuncMap) + var submods [][]string + t, submods, err = getTplDeep(root, file, "", t) + if err != nil { + return nil, err + } + t, err = _getTemplate(t, root, submods, others...) + + if err != nil { + return nil, err + } + return +} + +func _getTemplate(t0 *template.Template, root string, submods [][]string, others ...string) (t *template.Template, err error) { + t = t0 + for _, m := range submods { + if len(m) == 2 { + templ := t.Lookup(m[1]) + if templ != nil { + continue + } + //first check filename + for _, otherfile := range others { + if otherfile == m[1] { + var submods1 [][]string + t, submods1, err = getTplDeep(root, otherfile, "", t) + if err != nil { + Trace("template parse file err:", err) + } else if submods1 != nil && len(submods1) > 0 { + t, err = _getTemplate(t, root, submods1, others...) + } + break + } + } + //second check define + for _, otherfile := range others { + fileabspath := filepath.Join(root, otherfile) + data, err := ioutil.ReadFile(fileabspath) + if err != nil { + continue + } + reg := regexp.MustCompile(BConfig.WebConfig.TemplateLeft + "[ ]*define[ ]+\"([^\"]+)\"") + allsub := reg.FindAllStringSubmatch(string(data), -1) + for _, sub := range allsub { + if len(sub) == 2 && sub[1] == m[1] { + var submods1 [][]string + t, submods1, err = getTplDeep(root, otherfile, "", t) + if err != nil { + Trace("template parse file err:", err) + } else if submods1 != nil && len(submods1) > 0 { + t, err = _getTemplate(t, root, submods1, others...) + } + break + } + } + } + } + + } + return +} + +// SetViewsPath sets view directory path in beego application. +func SetViewsPath(path string) *App { + BConfig.WebConfig.ViewsPath = path + return BeeApp +} + +// SetStaticPath sets static directory path and proper url pattern in beego application. +// if beego.SetStaticPath("static","public"), visit /static/* to load static file in folder "public". +func SetStaticPath(url string, path string) *App { + if !strings.HasPrefix(url, "/") { + url = "/" + url + } + url = strings.TrimRight(url, "/") + BConfig.WebConfig.StaticDir[url] = path + return BeeApp +} + +// DelStaticPath removes the static folder setting in this url pattern in beego application. +func DelStaticPath(url string) *App { + if !strings.HasPrefix(url, "/") { + url = "/" + url + } + url = strings.TrimRight(url, "/") + delete(BConfig.WebConfig.StaticDir, url) + return BeeApp +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/templatefunc.go b/Godeps/_workspace/src/github.com/astaxie/beego/templatefunc.go new file mode 100644 index 000000000..8558733f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/templatefunc.go @@ -0,0 +1,729 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "errors" + "fmt" + "html/template" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +// Substr returns the substr from start to length. +func Substr(s string, start, length int) string { + bt := []rune(s) + if start < 0 { + start = 0 + } + if start > len(bt) { + start = start % len(bt) + } + var end int + if (start + length) > (len(bt) - 1) { + end = len(bt) + } else { + end = start + length + } + return string(bt[start:end]) +} + +// HTML2str returns escaping text convert from html. +func HTML2str(html string) string { + src := string(html) + + re, _ := regexp.Compile("\\<[\\S\\s]+?\\>") + src = re.ReplaceAllStringFunc(src, strings.ToLower) + + //remove STYLE + re, _ = regexp.Compile("\\") + src = re.ReplaceAllString(src, "") + + //remove SCRIPT + re, _ = regexp.Compile("\\") + src = re.ReplaceAllString(src, "") + + re, _ = regexp.Compile("\\<[\\S\\s]+?\\>") + src = re.ReplaceAllString(src, "\n") + + re, _ = regexp.Compile("\\s{2,}") + src = re.ReplaceAllString(src, "\n") + + return strings.TrimSpace(src) +} + +// DateFormat takes a time and a layout string and returns a string with the formatted date. Used by the template parser as "dateformat" +func DateFormat(t time.Time, layout string) (datestring string) { + datestring = t.Format(layout) + return +} + +// DateFormat pattern rules. +var datePatterns = []string{ + // year + "Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003 + "y", "06", //A two digit representation of a year Examples: 99 or 03 + + // month + "m", "01", // Numeric representation of a month, with leading zeros 01 through 12 + "n", "1", // Numeric representation of a month, without leading zeros 1 through 12 + "M", "Jan", // A short textual representation of a month, three letters Jan through Dec + "F", "January", // A full textual representation of a month, such as January or March January through December + + // day + "d", "02", // Day of the month, 2 digits with leading zeros 01 to 31 + "j", "2", // Day of the month without leading zeros 1 to 31 + + // week + "D", "Mon", // A textual representation of a day, three letters Mon through Sun + "l", "Monday", // A full textual representation of the day of the week Sunday through Saturday + + // time + "g", "3", // 12-hour format of an hour without leading zeros 1 through 12 + "G", "15", // 24-hour format of an hour without leading zeros 0 through 23 + "h", "03", // 12-hour format of an hour with leading zeros 01 through 12 + "H", "15", // 24-hour format of an hour with leading zeros 00 through 23 + + "a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm + "A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM + + "i", "04", // Minutes with leading zeros 00 to 59 + "s", "05", // Seconds, with leading zeros 00 through 59 + + // time zone + "T", "MST", + "P", "-07:00", + "O", "-0700", + + // RFC 2822 + "r", time.RFC1123Z, +} + +// DateParse Parse Date use PHP time format. +func DateParse(dateString, format string) (time.Time, error) { + replacer := strings.NewReplacer(datePatterns...) + format = replacer.Replace(format) + return time.ParseInLocation(format, dateString, time.Local) +} + +// Date takes a PHP like date func to Go's time format. +func Date(t time.Time, format string) string { + replacer := strings.NewReplacer(datePatterns...) + format = replacer.Replace(format) + return t.Format(format) +} + +// Compare is a quick and dirty comparison function. It will convert whatever you give it to strings and see if the two values are equal. +// Whitespace is trimmed. Used by the template parser as "eq". +func Compare(a, b interface{}) (equal bool) { + equal = false + if strings.TrimSpace(fmt.Sprintf("%v", a)) == strings.TrimSpace(fmt.Sprintf("%v", b)) { + equal = true + } + return +} + +// CompareNot !Compare +func CompareNot(a, b interface{}) (equal bool) { + return !Compare(a, b) +} + +// NotNil the same as CompareNot +func NotNil(a interface{}) (isNil bool) { + return CompareNot(a, nil) +} + +// GetConfig get the Appconfig +func GetConfig(returnType, key string, defaultVal interface{}) (value interface{}, err error) { + switch returnType { + case "String": + value = AppConfig.String(key) + case "Bool": + value, err = AppConfig.Bool(key) + case "Int": + value, err = AppConfig.Int(key) + case "Int64": + value, err = AppConfig.Int64(key) + case "Float": + value, err = AppConfig.Float(key) + case "DIY": + value, err = AppConfig.DIY(key) + default: + err = errors.New("Config keys must be of type String, Bool, Int, Int64, Float, or DIY") + } + + if err != nil { + if reflect.TypeOf(returnType) != reflect.TypeOf(defaultVal) { + err = errors.New("defaultVal type does not match returnType") + } else { + value, err = defaultVal, nil + } + } else if reflect.TypeOf(value).Kind() == reflect.String { + if value == "" { + if reflect.TypeOf(defaultVal).Kind() != reflect.String { + err = errors.New("defaultVal type must be a String if the returnType is a String") + } else { + value = defaultVal.(string) + } + } + } + + return +} + +// Str2html Convert string to template.HTML type. +func Str2html(raw string) template.HTML { + return template.HTML(raw) +} + +// Htmlquote returns quoted html string. +func Htmlquote(src string) string { + //HTML编码为实体符号 + /* + Encodes `text` for raw use in HTML. + >>> htmlquote("<'&\\">") + '<'&">' + */ + + text := string(src) + + text = strings.Replace(text, "&", "&", -1) // Must be done first! + text = strings.Replace(text, "<", "<", -1) + text = strings.Replace(text, ">", ">", -1) + text = strings.Replace(text, "'", "'", -1) + text = strings.Replace(text, "\"", """, -1) + text = strings.Replace(text, "“", "“", -1) + text = strings.Replace(text, "”", "”", -1) + text = strings.Replace(text, " ", " ", -1) + + return strings.TrimSpace(text) +} + +// Htmlunquote returns unquoted html string. +func Htmlunquote(src string) string { + //实体符号解释为HTML + /* + Decodes `text` that's HTML quoted. + >>> htmlunquote('<'&">') + '<\\'&">' + */ + + // strings.Replace(s, old, new, n) + // 在s字符串中,把old字符串替换为new字符串,n表示替换的次数,小于0表示全部替换 + + text := string(src) + text = strings.Replace(text, " ", " ", -1) + text = strings.Replace(text, "”", "”", -1) + text = strings.Replace(text, "“", "“", -1) + text = strings.Replace(text, """, "\"", -1) + text = strings.Replace(text, "'", "'", -1) + text = strings.Replace(text, ">", ">", -1) + text = strings.Replace(text, "<", "<", -1) + text = strings.Replace(text, "&", "&", -1) // Must be done last! + + return strings.TrimSpace(text) +} + +// URLFor returns url string with another registered controller handler with params. +// usage: +// +// URLFor(".index") +// print URLFor("index") +// router /login +// print URLFor("login") +// print URLFor("login", "next","/"") +// router /profile/:username +// print UrlFor("profile", ":username","John Doe") +// result: +// / +// /login +// /login?next=/ +// /user/John%20Doe +// +// more detail http://beego.me/docs/mvc/controller/urlbuilding.md +func URLFor(endpoint string, values ...interface{}) string { + return BeeApp.Handlers.URLFor(endpoint, values...) +} + +// AssetsJs returns script tag with src string. +func AssetsJs(src string) template.HTML { + text := string(src) + + text = "" + + return template.HTML(text) +} + +// AssetsCSS returns stylesheet link tag with src string. +func AssetsCSS(src string) template.HTML { + text := string(src) + + text = "" + + return template.HTML(text) +} + +// ParseForm will parse form values to struct via tag. +func ParseForm(form url.Values, obj interface{}) error { + objT := reflect.TypeOf(obj) + objV := reflect.ValueOf(obj) + if !isStructPtr(objT) { + return fmt.Errorf("%v must be a struct pointer", obj) + } + objT = objT.Elem() + objV = objV.Elem() + + for i := 0; i < objT.NumField(); i++ { + fieldV := objV.Field(i) + if !fieldV.CanSet() { + continue + } + + fieldT := objT.Field(i) + tags := strings.Split(fieldT.Tag.Get("form"), ",") + var tag string + if len(tags) == 0 || len(tags[0]) == 0 { + tag = fieldT.Name + } else if tags[0] == "-" { + continue + } else { + tag = tags[0] + } + + value := form.Get(tag) + if len(value) == 0 { + continue + } + + switch fieldT.Type.Kind() { + case reflect.Bool: + if strings.ToLower(value) == "on" || strings.ToLower(value) == "1" || strings.ToLower(value) == "yes" { + fieldV.SetBool(true) + continue + } + if strings.ToLower(value) == "off" || strings.ToLower(value) == "0" || strings.ToLower(value) == "no" { + fieldV.SetBool(false) + continue + } + b, err := strconv.ParseBool(value) + if err != nil { + return err + } + fieldV.SetBool(b) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + fieldV.SetInt(x) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + x, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return err + } + fieldV.SetUint(x) + case reflect.Float32, reflect.Float64: + x, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + fieldV.SetFloat(x) + case reflect.Interface: + fieldV.Set(reflect.ValueOf(value)) + case reflect.String: + fieldV.SetString(value) + case reflect.Struct: + switch fieldT.Type.String() { + case "time.Time": + format := time.RFC3339 + if len(tags) > 1 { + format = tags[1] + } + t, err := time.ParseInLocation(format, value, time.Local) + if err != nil { + return err + } + fieldV.Set(reflect.ValueOf(t)) + } + case reflect.Slice: + if fieldT.Type == sliceOfInts { + formVals := form[tag] + fieldV.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf(int(1))), len(formVals), len(formVals))) + for i := 0; i < len(formVals); i++ { + val, err := strconv.Atoi(formVals[i]) + if err != nil { + return err + } + fieldV.Index(i).SetInt(int64(val)) + } + } else if fieldT.Type == sliceOfStrings { + formVals := form[tag] + fieldV.Set(reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("")), len(formVals), len(formVals))) + for i := 0; i < len(formVals); i++ { + fieldV.Index(i).SetString(formVals[i]) + } + } + } + } + return nil +} + +var sliceOfInts = reflect.TypeOf([]int(nil)) +var sliceOfStrings = reflect.TypeOf([]string(nil)) + +var unKind = map[reflect.Kind]bool{ + reflect.Uintptr: true, + reflect.Complex64: true, + reflect.Complex128: true, + reflect.Array: true, + reflect.Chan: true, + reflect.Func: true, + reflect.Map: true, + reflect.Ptr: true, + reflect.Slice: true, + reflect.Struct: true, + reflect.UnsafePointer: true, +} + +// RenderForm will render object to form html. +// obj must be a struct pointer. +func RenderForm(obj interface{}) template.HTML { + objT := reflect.TypeOf(obj) + objV := reflect.ValueOf(obj) + if !isStructPtr(objT) { + return template.HTML("") + } + objT = objT.Elem() + objV = objV.Elem() + + var raw []string + for i := 0; i < objT.NumField(); i++ { + fieldV := objV.Field(i) + if !fieldV.CanSet() || unKind[fieldV.Kind()] { + continue + } + + fieldT := objT.Field(i) + + label, name, fType, id, class, ignored := parseFormTag(fieldT) + if ignored { + continue + } + + raw = append(raw, renderFormField(label, name, fType, fieldV.Interface(), id, class)) + } + return template.HTML(strings.Join(raw, "
")) +} + +// renderFormField returns a string containing HTML of a single form field. +func renderFormField(label, name, fType string, value interface{}, id string, class string) string { + if id != "" { + id = " id=\"" + id + "\"" + } + + if class != "" { + class = " class=\"" + class + "\"" + } + + if isValidForInput(fType) { + return fmt.Sprintf(`%v`, label, id, class, name, fType, value) + } + + return fmt.Sprintf(`%v<%v%v%v name="%v">%v`, label, fType, id, class, name, value, fType) +} + +// isValidForInput checks if fType is a valid value for the `type` property of an HTML input element. +func isValidForInput(fType string) bool { + validInputTypes := strings.Fields("text password checkbox radio submit reset hidden image file button search email url tel number range date month week time datetime datetime-local color") + for _, validType := range validInputTypes { + if fType == validType { + return true + } + } + return false +} + +// parseFormTag takes the stuct-tag of a StructField and parses the `form` value. +// returned are the form label, name-property, type and wether the field should be ignored. +func parseFormTag(fieldT reflect.StructField) (label, name, fType string, id string, class string, ignored bool) { + tags := strings.Split(fieldT.Tag.Get("form"), ",") + label = fieldT.Name + ": " + name = fieldT.Name + fType = "text" + ignored = false + id = fieldT.Tag.Get("id") + class = fieldT.Tag.Get("class") + + switch len(tags) { + case 1: + if tags[0] == "-" { + ignored = true + } + if len(tags[0]) > 0 { + name = tags[0] + } + case 2: + if len(tags[0]) > 0 { + name = tags[0] + } + if len(tags[1]) > 0 { + fType = tags[1] + } + case 3: + if len(tags[0]) > 0 { + name = tags[0] + } + if len(tags[1]) > 0 { + fType = tags[1] + } + if len(tags[2]) > 0 { + label = tags[2] + } + } + return +} + +func isStructPtr(t reflect.Type) bool { + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +// go1.2 added template funcs. begin +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + if k1 != k2 { + return false, errBadComparison + } + truth := false + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + if k1 != k2 { + return false, errBadComparison + } + truth := false + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// MapGet getting value from map by keys +// usage: +// Data["m"] = map[string]interface{} { +// "a": 1, +// "1": map[string]float64{ +// "c": 4, +// }, +// } +// +// {{ map_get m "a" }} // return 1 +// {{ map_get m 1 "c" }} // return 4 +func MapGet(arg1 interface{}, arg2 ...interface{}) (interface{}, error) { + arg1Type := reflect.TypeOf(arg1) + arg1Val := reflect.ValueOf(arg1) + + if arg1Type.Kind() == reflect.Map && len(arg2) > 0 { + // check whether arg2[0] type equals to arg1 key type + // if they are different, make conversion + arg2Val := reflect.ValueOf(arg2[0]) + arg2Type := reflect.TypeOf(arg2[0]) + if arg2Type.Kind() != arg1Type.Key().Kind() { + // convert arg2Value to string + var arg2ConvertedVal interface{} + arg2String := fmt.Sprintf("%v", arg2[0]) + + // convert string representation to any other type + switch arg1Type.Key().Kind() { + case reflect.Bool: + arg2ConvertedVal, _ = strconv.ParseBool(arg2String) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + arg2ConvertedVal, _ = strconv.ParseInt(arg2String, 0, 64) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + arg2ConvertedVal, _ = strconv.ParseUint(arg2String, 0, 64) + case reflect.Float32, reflect.Float64: + arg2ConvertedVal, _ = strconv.ParseFloat(arg2String, 64) + case reflect.String: + arg2ConvertedVal = arg2String + default: + arg2ConvertedVal = arg2Val.Interface() + } + arg2Val = reflect.ValueOf(arg2ConvertedVal) + } + + storedVal := arg1Val.MapIndex(arg2Val) + + if storedVal.IsValid() { + var result interface{} + + switch arg1Type.Elem().Kind() { + case reflect.Bool: + result = storedVal.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + result = storedVal.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + result = storedVal.Uint() + case reflect.Float32, reflect.Float64: + result = storedVal.Float() + case reflect.String: + result = storedVal.String() + default: + result = storedVal.Interface() + } + + // if there is more keys, handle this recursively + if len(arg2) > 1 { + return MapGet(result, arg2[1:]...) + } + return result, nil + } + return nil, nil + + } + return nil, nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/testing/assertions.go b/Godeps/_workspace/src/github.com/astaxie/beego/testing/assertions.go new file mode 100644 index 000000000..96c5d4ddc --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/testing/assertions.go @@ -0,0 +1,15 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/testing/client.go b/Godeps/_workspace/src/github.com/astaxie/beego/testing/client.go new file mode 100644 index 000000000..c3737e9c6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/testing/client.go @@ -0,0 +1,65 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testing + +import ( + "github.com/astaxie/beego/config" + "github.com/astaxie/beego/httplib" +) + +var port = "" +var baseURL = "http://localhost:" + +// TestHTTPRequest beego test request client +type TestHTTPRequest struct { + httplib.BeegoHTTPRequest +} + +func getPort() string { + if port == "" { + config, err := config.NewConfig("ini", "../conf/app.conf") + if err != nil { + return "8080" + } + port = config.String("httpport") + return port + } + return port +} + +// Get returns test client in GET method +func Get(path string) *TestHTTPRequest { + return &TestHTTPRequest{*httplib.Get(baseURL + getPort() + path)} +} + +// Post returns test client in POST method +func Post(path string) *TestHTTPRequest { + return &TestHTTPRequest{*httplib.Post(baseURL + getPort() + path)} +} + +// Put returns test client in PUT method +func Put(path string) *TestHTTPRequest { + return &TestHTTPRequest{*httplib.Put(baseURL + getPort() + path)} +} + +// Delete returns test client in DELETE method +func Delete(path string) *TestHTTPRequest { + return &TestHTTPRequest{*httplib.Delete(baseURL + getPort() + path)} +} + +// Head returns test client in HEAD method +func Head(path string) *TestHTTPRequest { + return &TestHTTPRequest{*httplib.Head(baseURL + getPort() + path)} +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/healthcheck.go b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/healthcheck.go new file mode 100644 index 000000000..e3544b3ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/healthcheck.go @@ -0,0 +1,48 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package toolbox healthcheck +// +// type DatabaseCheck struct { +// } +// +// func (dc *DatabaseCheck) Check() error { +// if dc.isConnected() { +// return nil +// } else { +// return errors.New("can't connect database") +// } +// } +// +// AddHealthCheck("database",&DatabaseCheck{}) +// +// more docs: http://beego.me/docs/module/toolbox.md +package toolbox + +// AdminCheckList holds health checker map +var AdminCheckList map[string]HealthChecker + +// HealthChecker health checker interface +type HealthChecker interface { + Check() error +} + +// AddHealthCheck add health checker with name string +func AddHealthCheck(name string, hc HealthChecker) { + AdminCheckList[name] = hc +} + +func init() { + AdminCheckList = make(map[string]HealthChecker) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/profile.go b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/profile.go new file mode 100644 index 000000000..06e40ede7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/profile.go @@ -0,0 +1,184 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toolbox + +import ( + "fmt" + "io" + "log" + "os" + "path" + "runtime" + "runtime/debug" + "runtime/pprof" + "strconv" + "time" +) + +var startTime = time.Now() +var pid int + +func init() { + pid = os.Getpid() +} + +// ProcessInput parse input command string +func ProcessInput(input string, w io.Writer) { + switch input { + case "lookup goroutine": + p := pprof.Lookup("goroutine") + p.WriteTo(w, 2) + case "lookup heap": + p := pprof.Lookup("heap") + p.WriteTo(w, 2) + case "lookup threadcreate": + p := pprof.Lookup("threadcreate") + p.WriteTo(w, 2) + case "lookup block": + p := pprof.Lookup("block") + p.WriteTo(w, 2) + case "get cpuprof": + GetCPUProfile(w) + case "get memprof": + MemProf(w) + case "gc summary": + PrintGCSummary(w) + } +} + +// MemProf record memory profile in pprof +func MemProf(w io.Writer) { + filename := "mem-" + strconv.Itoa(pid) + ".memprof" + if f, err := os.Create(filename); err != nil { + fmt.Fprintf(w, "create file %s error %s\n", filename, err.Error()) + log.Fatal("record heap profile failed: ", err) + } else { + runtime.GC() + pprof.WriteHeapProfile(f) + f.Close() + fmt.Fprintf(w, "create heap profile %s \n", filename) + _, fl := path.Split(os.Args[0]) + fmt.Fprintf(w, "Now you can use this to check it: go tool pprof %s %s\n", fl, filename) + } +} + +// GetCPUProfile start cpu profile monitor +func GetCPUProfile(w io.Writer) { + sec := 30 + filename := "cpu-" + strconv.Itoa(pid) + ".pprof" + f, err := os.Create(filename) + if err != nil { + fmt.Fprintf(w, "Could not enable CPU profiling: %s\n", err) + log.Fatal("record cpu profile failed: ", err) + } + pprof.StartCPUProfile(f) + time.Sleep(time.Duration(sec) * time.Second) + pprof.StopCPUProfile() + + fmt.Fprintf(w, "create cpu profile %s \n", filename) + _, fl := path.Split(os.Args[0]) + fmt.Fprintf(w, "Now you can use this to check it: go tool pprof %s %s\n", fl, filename) +} + +// PrintGCSummary print gc information to io.Writer +func PrintGCSummary(w io.Writer) { + memStats := &runtime.MemStats{} + runtime.ReadMemStats(memStats) + gcstats := &debug.GCStats{PauseQuantiles: make([]time.Duration, 100)} + debug.ReadGCStats(gcstats) + + printGC(memStats, gcstats, w) +} + +func printGC(memStats *runtime.MemStats, gcstats *debug.GCStats, w io.Writer) { + + if gcstats.NumGC > 0 { + lastPause := gcstats.Pause[0] + elapsed := time.Now().Sub(startTime) + overhead := float64(gcstats.PauseTotal) / float64(elapsed) * 100 + allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds() + + fmt.Fprintf(w, "NumGC:%d Pause:%s Pause(Avg):%s Overhead:%3.2f%% Alloc:%s Sys:%s Alloc(Rate):%s/s Histogram:%s %s %s \n", + gcstats.NumGC, + toS(lastPause), + toS(avg(gcstats.Pause)), + overhead, + toH(memStats.Alloc), + toH(memStats.Sys), + toH(uint64(allocatedRate)), + toS(gcstats.PauseQuantiles[94]), + toS(gcstats.PauseQuantiles[98]), + toS(gcstats.PauseQuantiles[99])) + } else { + // while GC has disabled + elapsed := time.Now().Sub(startTime) + allocatedRate := float64(memStats.TotalAlloc) / elapsed.Seconds() + + fmt.Fprintf(w, "Alloc:%s Sys:%s Alloc(Rate):%s/s\n", + toH(memStats.Alloc), + toH(memStats.Sys), + toH(uint64(allocatedRate))) + } +} + +func avg(items []time.Duration) time.Duration { + var sum time.Duration + for _, item := range items { + sum += item + } + return time.Duration(int64(sum) / int64(len(items))) +} + +// format bytes number friendly +func toH(bytes uint64) string { + switch { + case bytes < 1024: + return fmt.Sprintf("%dB", bytes) + case bytes < 1024*1024: + return fmt.Sprintf("%.2fK", float64(bytes)/1024) + case bytes < 1024*1024*1024: + return fmt.Sprintf("%.2fM", float64(bytes)/1024/1024) + default: + return fmt.Sprintf("%.2fG", float64(bytes)/1024/1024/1024) + } +} + +// short string format +func toS(d time.Duration) string { + + u := uint64(d) + if u < uint64(time.Second) { + switch { + case u == 0: + return "0" + case u < uint64(time.Microsecond): + return fmt.Sprintf("%.2fns", float64(u)) + case u < uint64(time.Millisecond): + return fmt.Sprintf("%.2fus", float64(u)/1000) + default: + return fmt.Sprintf("%.2fms", float64(u)/1000/1000) + } + } else { + switch { + case u < uint64(time.Minute): + return fmt.Sprintf("%.2fs", float64(u)/1000/1000/1000) + case u < uint64(time.Hour): + return fmt.Sprintf("%.2fm", float64(u)/1000/1000/1000/60) + default: + return fmt.Sprintf("%.2fh", float64(u)/1000/1000/1000/60/60) + } + } + +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/statistics.go b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/statistics.go new file mode 100644 index 000000000..32eb7e23a --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/statistics.go @@ -0,0 +1,143 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toolbox + +import ( + "fmt" + "sync" + "time" +) + +// Statistics struct +type Statistics struct { + RequestURL string + RequestController string + RequestNum int64 + MinTime time.Duration + MaxTime time.Duration + TotalTime time.Duration +} + +// URLMap contains several statistics struct to log different data +type URLMap struct { + lock sync.RWMutex + LengthLimit int //limit the urlmap's length if it's equal to 0 there's no limit + urlmap map[string]map[string]*Statistics +} + +// AddStatistics add statistics task. +// it needs request method, request url, request controller and statistics time duration +func (m *URLMap) AddStatistics(requestMethod, requestURL, requestController string, requesttime time.Duration) { + m.lock.Lock() + defer m.lock.Unlock() + if method, ok := m.urlmap[requestURL]; ok { + if s, ok := method[requestMethod]; ok { + s.RequestNum++ + if s.MaxTime < requesttime { + s.MaxTime = requesttime + } + if s.MinTime > requesttime { + s.MinTime = requesttime + } + s.TotalTime += requesttime + } else { + nb := &Statistics{ + RequestURL: requestURL, + RequestController: requestController, + RequestNum: 1, + MinTime: requesttime, + MaxTime: requesttime, + TotalTime: requesttime, + } + m.urlmap[requestURL][requestMethod] = nb + } + + } else { + if m.LengthLimit > 0 && m.LengthLimit <= len(m.urlmap) { + return + } + methodmap := make(map[string]*Statistics) + nb := &Statistics{ + RequestURL: requestURL, + RequestController: requestController, + RequestNum: 1, + MinTime: requesttime, + MaxTime: requesttime, + TotalTime: requesttime, + } + methodmap[requestMethod] = nb + m.urlmap[requestURL] = methodmap + } +} + +// GetMap put url statistics result in io.Writer +func (m *URLMap) GetMap() map[string]interface{} { + m.lock.RLock() + defer m.lock.RUnlock() + + var fields = []string{"requestUrl", "method", "times", "used", "max used", "min used", "avg used"} + + var resultLists [][]string + content := make(map[string]interface{}) + content["Fields"] = fields + + for k, v := range m.urlmap { + for kk, vv := range v { + result := []string{ + fmt.Sprintf("% -50s", k), + fmt.Sprintf("% -10s", kk), + fmt.Sprintf("% -16d", vv.RequestNum), + fmt.Sprintf("% -16s", toS(vv.TotalTime)), + fmt.Sprintf("% -16s", toS(vv.MaxTime)), + fmt.Sprintf("% -16s", toS(vv.MinTime)), + fmt.Sprintf("% -16s", toS(time.Duration(int64(vv.TotalTime)/vv.RequestNum))), + } + resultLists = append(resultLists, result) + } + } + content["Data"] = resultLists + return content +} + +// GetMapData return all mapdata +func (m *URLMap) GetMapData() []map[string]interface{} { + + var resultLists []map[string]interface{} + + for k, v := range m.urlmap { + for kk, vv := range v { + result := map[string]interface{}{ + "request_url": k, + "method": kk, + "times": vv.RequestNum, + "total_time": toS(vv.TotalTime), + "max_time": toS(vv.MaxTime), + "min_time": toS(vv.MinTime), + "avg_time": toS(time.Duration(int64(vv.TotalTime) / vv.RequestNum)), + } + resultLists = append(resultLists, result) + } + } + return resultLists +} + +// StatisticsMap hosld global statistics data map +var StatisticsMap *URLMap + +func init() { + StatisticsMap = &URLMap{ + urlmap: make(map[string]map[string]*Statistics), + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/task.go b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/task.go new file mode 100644 index 000000000..537de4288 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/toolbox/task.go @@ -0,0 +1,607 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package toolbox + +import ( + "log" + "math" + "sort" + "strconv" + "strings" + "time" +) + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + AdminTaskList map[string]Tasker + stop chan bool + changed chan bool + isstart bool + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + days = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + weeks = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Schedule time taks schedule +type Schedule struct { + Second uint64 + Minute uint64 + Hour uint64 + Day uint64 + Month uint64 + Week uint64 +} + +// TaskFunc task func type +type TaskFunc func() error + +// Tasker task interface +type Tasker interface { + GetSpec() string + GetStatus() string + Run() error + SetNext(time.Time) + GetNext() time.Time + SetPrev(time.Time) + GetPrev() time.Time +} + +// task error +type taskerr struct { + t time.Time + errinfo string +} + +// Task task struct +type Task struct { + Taskname string + Spec *Schedule + SpecStr string + DoFunc TaskFunc + Prev time.Time + Next time.Time + Errlist []*taskerr // like errtime:errinfo + ErrLimit int // max length for the errlist, 0 stand for no limit +} + +// NewTask add new task with name, time and func +func NewTask(tname string, spec string, f TaskFunc) *Task { + + task := &Task{ + Taskname: tname, + DoFunc: f, + ErrLimit: 100, + SpecStr: spec, + } + task.SetCron(spec) + return task +} + +// GetSpec get spec string +func (t *Task) GetSpec() string { + return t.SpecStr +} + +// GetStatus get current task status +func (t *Task) GetStatus() string { + var str string + for _, v := range t.Errlist { + str += v.t.String() + ":" + v.errinfo + "
" + } + return str +} + +// Run run all tasks +func (t *Task) Run() error { + err := t.DoFunc() + if err != nil { + if t.ErrLimit > 0 && t.ErrLimit > len(t.Errlist) { + t.Errlist = append(t.Errlist, &taskerr{t: t.Next, errinfo: err.Error()}) + } + } + return err +} + +// SetNext set next time for this task +func (t *Task) SetNext(now time.Time) { + t.Next = t.Spec.Next(now) +} + +// GetNext get the next call time of this task +func (t *Task) GetNext() time.Time { + return t.Next +} + +// SetPrev set prev time of this task +func (t *Task) SetPrev(now time.Time) { + t.Prev = now +} + +// GetPrev get prev time of this task +func (t *Task) GetPrev() time.Time { + return t.Prev +} + +// six columns mean: +// second:0-59 +// minute:0-59 +// hour:1-23 +// day:1-31 +// month:1-12 +// week:0-6(0 means Sunday) + +// SetCron some signals: +// *: any time +// ,:  separate signal +//   -:duration +// /n : do as n times of time duration +///////////////////////////////////////////////////////// +// 0/30 * * * * * every 30s +// 0 43 21 * * * 21:43 +// 0 15 05 * * *    05:15 +// 0 0 17 * * * 17:00 +// 0 0 17 * * 1 17:00 in every Monday +// 0 0,10 17 * * 0,2,3 17:00 and 17:10 in every Sunday, Tuesday and Wednesday +// 0 0-10 17 1 * * 17:00 to 17:10 in 1 min duration each time on the first day of month +// 0 0 0 1,15 * 1 0:00 on the 1st day and 15th day of month +// 0 42 4 1 * *     4:42 on the 1st day of month +// 0 0 21 * * 1-6   21:00 from Monday to Saturday +// 0 0,10,20,30,40,50 * * * *  every 10 min duration +// 0 */10 * * * *        every 10 min duration +// 0 * 1 * * *         1:00 to 1:59 in 1 min duration each time +// 0 0 1 * * *         1:00 +// 0 0 */1 * * *        0 min of hour in 1 hour duration +// 0 0 * * * *         0 min of hour in 1 hour duration +// 0 2 8-20/3 * * *       8:02, 11:02, 14:02, 17:02, 20:02 +// 0 30 5 1,15 * *       5:30 on the 1st day and 15th day of month +func (t *Task) SetCron(spec string) { + t.Spec = t.parse(spec) +} + +func (t *Task) parse(spec string) *Schedule { + if len(spec) > 0 && spec[0] == '@' { + return t.parseSpec(spec) + } + // Split on whitespace. We require 5 or 6 fields. + // (second) (minute) (hour) (day of month) (month) (day of week, optional) + fields := strings.Fields(spec) + if len(fields) != 5 && len(fields) != 6 { + log.Panicf("Expected 5 or 6 fields, found %d: %s", len(fields), spec) + } + + // If a sixth field is not provided (DayOfWeek), then it is equivalent to star. + if len(fields) == 5 { + fields = append(fields, "*") + } + + schedule := &Schedule{ + Second: getField(fields[0], seconds), + Minute: getField(fields[1], minutes), + Hour: getField(fields[2], hours), + Day: getField(fields[3], days), + Month: getField(fields[4], months), + Week: getField(fields[5], weeks), + } + + return schedule +} + +func (t *Task) parseSpec(spec string) *Schedule { + switch spec { + case "@yearly", "@annually": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: 1 << days.min, + Month: 1 << months.min, + Week: all(weeks), + } + + case "@monthly": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: 1 << days.min, + Month: all(months), + Week: all(weeks), + } + + case "@weekly": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: all(days), + Month: all(months), + Week: 1 << weeks.min, + } + + case "@daily", "@midnight": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Day: all(days), + Month: all(months), + Week: all(weeks), + } + + case "@hourly": + return &Schedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Day: all(days), + Month: all(months), + Week: all(weeks), + } + } + log.Panicf("Unrecognized descriptor: %s", spec) + return nil +} + +// Next set schedule to next time +func (s *Schedule) Next(t time.Time) time.Time { + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch = 1< 0 + ) + + if s.Day&starBit > 0 || s.Week&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} + +// StartTask start all tasks +func StartTask() { + isstart = true + go run() +} + +func run() { + now := time.Now().Local() + for _, t := range AdminTaskList { + t.SetNext(now) + } + + for { + sortList := NewMapSorter(AdminTaskList) + sortList.Sort() + var effective time.Time + if len(AdminTaskList) == 0 || sortList.Vals[0].GetNext().IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + effective = now.AddDate(10, 0, 0) + } else { + effective = sortList.Vals[0].GetNext() + } + select { + case now = <-time.After(effective.Sub(now)): + // Run every entry whose next time was this effective time. + for _, e := range sortList.Vals { + if e.GetNext() != effective { + break + } + go e.Run() + e.SetPrev(e.GetNext()) + e.SetNext(effective) + } + continue + case <-changed: + continue + case <-stop: + return + } + } +} + +// StopTask stop all tasks +func StopTask() { + isstart = false + stop <- true +} + +// AddTask add task with name +func AddTask(taskname string, t Tasker) { + AdminTaskList[taskname] = t + if isstart { + changed <- true + } +} + +// DeleteTask delete task with name +func DeleteTask(taskname string) { + delete(AdminTaskList, taskname) + if isstart { + changed <- true + } +} + +// MapSorter sort map for tasker +type MapSorter struct { + Keys []string + Vals []Tasker +} + +// NewMapSorter create new tasker map +func NewMapSorter(m map[string]Tasker) *MapSorter { + ms := &MapSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]Tasker, 0, len(m)), + } + for k, v := range m { + ms.Keys = append(ms.Keys, k) + ms.Vals = append(ms.Vals, v) + } + return ms +} + +// Sort sort tasker map +func (ms *MapSorter) Sort() { + sort.Sort(ms) +} + +func (ms *MapSorter) Len() int { return len(ms.Keys) } +func (ms *MapSorter) Less(i, j int) bool { + if ms.Vals[i].GetNext().IsZero() { + return false + } + if ms.Vals[j].GetNext().IsZero() { + return true + } + return ms.Vals[i].GetNext().Before(ms.Vals[j].GetNext()) +} +func (ms *MapSorter) Swap(i, j int) { + ms.Vals[i], ms.Vals[j] = ms.Vals[j], ms.Vals[i] + ms.Keys[i], ms.Keys[j] = ms.Keys[j], ms.Keys[i] +} + +func getField(field string, r bounds) uint64 { + // list = range {"," range} + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bits |= getRange(expr, r) + } + return bits +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +func getRange(expr string, r bounds) uint64 { + + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + ) + + var extrastar uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extrastar = starBit + } else { + start = parseIntOrName(lowAndHigh[0], r.names) + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end = parseIntOrName(lowAndHigh[1], r.names) + default: + log.Panicf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step = mustParseInt(rangeAndStep[1]) + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + log.Panicf("Too many slashes: %s", expr) + } + + if start < r.min { + log.Panicf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + log.Panicf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + log.Panicf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + + return getBits(start, end, step) | extrastar +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) uint { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or panics. +func mustParseInt(expr string) uint { + num, err := strconv.Atoi(expr) + if err != nil { + log.Panicf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + log.Panicf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num) +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +func init() { + AdminTaskList = make(map[string]Tasker) + stop = make(chan bool) + changed = make(chan bool) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/tree.go b/Godeps/_workspace/src/github.com/astaxie/beego/tree.go new file mode 100644 index 000000000..a6ffe062e --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/tree.go @@ -0,0 +1,575 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package beego + +import ( + "path" + "regexp" + "strings" + + "github.com/astaxie/beego/context" + "github.com/astaxie/beego/utils" +) + +var ( + allowSuffixExt = []string{".json", ".xml", ".html"} +) + +// Tree has three elements: FixRouter/wildcard/leaves +// fixRouter sotres Fixed Router +// wildcard stores params +// leaves store the endpoint information +type Tree struct { + //prefix set for static router + prefix string + //search fix route first + fixrouters []*Tree + //if set, failure to match fixrouters search then search wildcard + wildcard *Tree + //if set, failure to match wildcard search + leaves []*leafInfo +} + +// NewTree return a new Tree +func NewTree() *Tree { + return &Tree{} +} + +// AddTree will add tree to the exist Tree +// prefix should has no params +func (t *Tree) AddTree(prefix string, tree *Tree) { + t.addtree(splitPath(prefix), tree, nil, "") +} + +func (t *Tree) addtree(segments []string, tree *Tree, wildcards []string, reg string) { + if len(segments) == 0 { + panic("prefix should has path") + } + seg := segments[0] + iswild, params, regexpStr := splitSegment(seg) + // if it's ? meaning can igone this, so add one more rule for it + if len(params) > 0 && params[0] == ":" { + params = params[1:] + if len(segments[1:]) > 0 { + t.addtree(segments[1:], tree, append(wildcards, params...), reg) + } else { + filterTreeWithPrefix(tree, wildcards, reg) + } + } + //Rule: /login/*/access match /login/2009/11/access + //if already has *, and when loop the access, should as a regexpStr + if !iswild && utils.InSlice(":splat", wildcards) { + iswild = true + regexpStr = seg + } + //Rule: /user/:id/* + if seg == "*" && len(wildcards) > 0 && reg == "" { + regexpStr = "(.+)" + } + if len(segments) == 1 { + if iswild { + if regexpStr != "" { + if reg == "" { + rr := "" + for _, w := range wildcards { + if w == ":splat" { + rr = rr + "(.+)/" + } else { + rr = rr + "([^/]+)/" + } + } + regexpStr = rr + regexpStr + } else { + regexpStr = "/" + regexpStr + } + } else if reg != "" { + if seg == "*.*" { + regexpStr = "([^.]+).(.+)" + } else { + for _, w := range params { + if w == "." || w == ":" { + continue + } + regexpStr = "([^/]+)/" + regexpStr + } + } + } + reg = strings.Trim(reg+"/"+regexpStr, "/") + filterTreeWithPrefix(tree, append(wildcards, params...), reg) + t.wildcard = tree + } else { + reg = strings.Trim(reg+"/"+regexpStr, "/") + filterTreeWithPrefix(tree, append(wildcards, params...), reg) + tree.prefix = seg + t.fixrouters = append(t.fixrouters, tree) + } + return + } + + if iswild { + if t.wildcard == nil { + t.wildcard = NewTree() + } + if regexpStr != "" { + if reg == "" { + rr := "" + for _, w := range wildcards { + if w == ":splat" { + rr = rr + "(.+)/" + } else { + rr = rr + "([^/]+)/" + } + } + regexpStr = rr + regexpStr + } else { + regexpStr = "/" + regexpStr + } + } else if reg != "" { + if seg == "*.*" { + regexpStr = "([^.]+).(.+)" + params = params[1:] + } else { + for range params { + regexpStr = "([^/]+)/" + regexpStr + } + } + } else { + if seg == "*.*" { + params = params[1:] + } + } + reg = strings.TrimRight(strings.TrimRight(reg, "/")+"/"+regexpStr, "/") + t.wildcard.addtree(segments[1:], tree, append(wildcards, params...), reg) + } else { + subTree := NewTree() + subTree.prefix = seg + t.fixrouters = append(t.fixrouters, subTree) + subTree.addtree(segments[1:], tree, append(wildcards, params...), reg) + } +} + +func filterTreeWithPrefix(t *Tree, wildcards []string, reg string) { + for _, v := range t.fixrouters { + filterTreeWithPrefix(v, wildcards, reg) + } + if t.wildcard != nil { + filterTreeWithPrefix(t.wildcard, wildcards, reg) + } + for _, l := range t.leaves { + if reg != "" { + if l.regexps != nil { + l.wildcards = append(wildcards, l.wildcards...) + l.regexps = regexp.MustCompile("^" + reg + "/" + strings.Trim(l.regexps.String(), "^$") + "$") + } else { + for _, v := range l.wildcards { + if v == ":splat" { + reg = reg + "/(.+)" + } else { + reg = reg + "/([^/]+)" + } + } + l.regexps = regexp.MustCompile("^" + reg + "$") + l.wildcards = append(wildcards, l.wildcards...) + } + } else { + l.wildcards = append(wildcards, l.wildcards...) + if l.regexps != nil { + for _, w := range wildcards { + if w == ":splat" { + reg = "(.+)/" + reg + } else { + reg = "([^/]+)/" + reg + } + } + l.regexps = regexp.MustCompile("^" + reg + strings.Trim(l.regexps.String(), "^$") + "$") + } + } + } +} + +// AddRouter call addseg function +func (t *Tree) AddRouter(pattern string, runObject interface{}) { + t.addseg(splitPath(pattern), runObject, nil, "") +} + +// "/" +// "admin" -> +func (t *Tree) addseg(segments []string, route interface{}, wildcards []string, reg string) { + if len(segments) == 0 { + if reg != "" { + t.leaves = append(t.leaves, &leafInfo{runObject: route, wildcards: wildcards, regexps: regexp.MustCompile("^" + reg + "$")}) + } else { + t.leaves = append(t.leaves, &leafInfo{runObject: route, wildcards: wildcards}) + } + } else { + seg := segments[0] + iswild, params, regexpStr := splitSegment(seg) + // if it's ? meaning can igone this, so add one more rule for it + if len(params) > 0 && params[0] == ":" { + t.addseg(segments[1:], route, wildcards, reg) + params = params[1:] + } + //Rule: /login/*/access match /login/2009/11/access + //if already has *, and when loop the access, should as a regexpStr + if !iswild && utils.InSlice(":splat", wildcards) { + iswild = true + regexpStr = seg + } + //Rule: /user/:id/* + if seg == "*" && len(wildcards) > 0 && reg == "" { + regexpStr = "(.+)" + } + if iswild { + if t.wildcard == nil { + t.wildcard = NewTree() + } + if regexpStr != "" { + if reg == "" { + rr := "" + for _, w := range wildcards { + if w == ":splat" { + rr = rr + "(.+)/" + } else { + rr = rr + "([^/]+)/" + } + } + regexpStr = rr + regexpStr + } else { + regexpStr = "/" + regexpStr + } + } else if reg != "" { + if seg == "*.*" { + regexpStr = "/([^.]+).(.+)" + params = params[1:] + } else { + for range params { + regexpStr = "/([^/]+)" + regexpStr + } + } + } else { + if seg == "*.*" { + params = params[1:] + } + } + t.wildcard.addseg(segments[1:], route, append(wildcards, params...), reg+regexpStr) + } else { + var ok bool + var subTree *Tree + for _, subTree = range t.fixrouters { + if t.prefix == seg { + ok = true + break + } + } + if !ok { + subTree = NewTree() + subTree.prefix = seg + t.fixrouters = append(t.fixrouters, subTree) + } + subTree.addseg(segments[1:], route, wildcards, reg) + } + } +} + +// Match router to runObject & params +func (t *Tree) Match(pattern string, ctx *context.Context) (runObject interface{}) { + if len(pattern) == 0 || pattern[0] != '/' { + return nil + } + w := make([]string, 0, 20) + return t.match(pattern, w, ctx) +} + +func (t *Tree) match(pattern string, wildcardValues []string, ctx *context.Context) (runObject interface{}) { + if len(pattern) > 0 { + i := 0 + for ; i < len(pattern) && pattern[i] == '/'; i++ { + } + pattern = pattern[i:] + } + // Handle leaf nodes: + if len(pattern) == 0 { + for _, l := range t.leaves { + if ok := l.match(wildcardValues, ctx); ok { + return l.runObject + } + } + if t.wildcard != nil { + for _, l := range t.wildcard.leaves { + if ok := l.match(wildcardValues, ctx); ok { + return l.runObject + } + } + } + return nil + } + var seg string + i, l := 0, len(pattern) + for ; i < l && pattern[i] != '/'; i++ { + } + if i == 0 { + seg = pattern + pattern = "" + } else { + seg = pattern[:i] + pattern = pattern[i:] + } + for _, subTree := range t.fixrouters { + if subTree.prefix == seg { + runObject = subTree.match(pattern, wildcardValues, ctx) + if runObject != nil { + break + } + } + } + if runObject == nil && len(t.fixrouters) > 0 { + // Filter the .json .xml .html extension + for _, str := range allowSuffixExt { + if strings.HasSuffix(seg, str) { + for _, subTree := range t.fixrouters { + if subTree.prefix == seg[:len(seg)-len(str)] { + runObject = subTree.match(pattern, wildcardValues, ctx) + if runObject != nil { + ctx.Input.SetParam(":ext", str[1:]) + } + } + } + } + } + } + if runObject == nil && t.wildcard != nil { + runObject = t.wildcard.match(pattern, append(wildcardValues, seg), ctx) + } + + if runObject == nil && len(t.leaves) > 0 { + wildcardValues = append(wildcardValues, seg) + start, i := 0, 0 + for ; i < len(pattern); i++ { + if pattern[i] == '/' { + if i != 0 && start < len(pattern) { + wildcardValues = append(wildcardValues, pattern[start:i]) + } + start = i + 1 + continue + } + } + if start > 0 { + wildcardValues = append(wildcardValues, pattern[start:i]) + } + for _, l := range t.leaves { + if ok := l.match(wildcardValues, ctx); ok { + return l.runObject + } + } + } + return runObject +} + +type leafInfo struct { + // names of wildcards that lead to this leaf. eg, ["id" "name"] for the wildcard ":id" and ":name" + wildcards []string + + // if the leaf is regexp + regexps *regexp.Regexp + + runObject interface{} +} + +func (leaf *leafInfo) match(wildcardValues []string, ctx *context.Context) (ok bool) { + //fmt.Println("Leaf:", wildcardValues, leaf.wildcards, leaf.regexps) + if leaf.regexps == nil { + if len(wildcardValues) == 0 { // static path + return true + } + // match * + if len(leaf.wildcards) == 1 && leaf.wildcards[0] == ":splat" { + ctx.Input.SetParam(":splat", path.Join(wildcardValues...)) + return true + } + // match *.* or :id + if len(leaf.wildcards) >= 2 && leaf.wildcards[len(leaf.wildcards)-2] == ":path" && leaf.wildcards[len(leaf.wildcards)-1] == ":ext" { + if len(leaf.wildcards) == 2 { + lastone := wildcardValues[len(wildcardValues)-1] + strs := strings.SplitN(lastone, ".", 2) + if len(strs) == 2 { + ctx.Input.SetParam(":ext", strs[1]) + } + ctx.Input.SetParam(":path", path.Join(path.Join(wildcardValues[:len(wildcardValues)-1]...), strs[0])) + return true + } else if len(wildcardValues) < 2 { + return false + } + var index int + for index = 0; index < len(leaf.wildcards)-2; index++ { + ctx.Input.SetParam(leaf.wildcards[index], wildcardValues[index]) + } + lastone := wildcardValues[len(wildcardValues)-1] + strs := strings.SplitN(lastone, ".", 2) + if len(strs) == 2 { + ctx.Input.SetParam(":ext", strs[1]) + } + ctx.Input.SetParam(":path", path.Join(path.Join(wildcardValues[index:len(wildcardValues)-1]...), strs[0])) + return true + } + // match :id + if len(leaf.wildcards) != len(wildcardValues) { + return false + } + for j, v := range leaf.wildcards { + ctx.Input.SetParam(v, wildcardValues[j]) + } + return true + } + + if !leaf.regexps.MatchString(path.Join(wildcardValues...)) { + return false + } + matches := leaf.regexps.FindStringSubmatch(path.Join(wildcardValues...)) + for i, match := range matches[1:] { + ctx.Input.SetParam(leaf.wildcards[i], match) + } + return true +} + +// "/" -> [] +// "/admin" -> ["admin"] +// "/admin/" -> ["admin"] +// "/admin/users" -> ["admin", "users"] +func splitPath(key string) []string { + if key == "" { + return []string{} + } + elements := strings.Split(key, "/") + if elements[0] == "" { + elements = elements[1:] + } + if elements[len(elements)-1] == "" { + elements = elements[:len(elements)-1] + } + return elements +} + +// "admin" -> false, nil, "" +// ":id" -> true, [:id], "" +// "?:id" -> true, [: :id], "" : meaning can empty +// ":id:int" -> true, [:id], ([0-9]+) +// ":name:string" -> true, [:name], ([\w]+) +// ":id([0-9]+)" -> true, [:id], ([0-9]+) +// ":id([0-9]+)_:name" -> true, [:id :name], ([0-9]+)_(.+) +// "cms_:id_:page.html" -> true, [:id_ :page], cms_(.+)(.+).html +// "cms_:id(.+)_:page.html" -> true, [:id :page], cms_(.+)_(.+).html +// "*" -> true, [:splat], "" +// "*.*" -> true,[. :path :ext], "" . meaning separator +func splitSegment(key string) (bool, []string, string) { + if strings.HasPrefix(key, "*") { + if key == "*.*" { + return true, []string{".", ":path", ":ext"}, "" + } + return true, []string{":splat"}, "" + } + if strings.ContainsAny(key, ":") { + var paramsNum int + var out []rune + var start bool + var startexp bool + var param []rune + var expt []rune + var skipnum int + params := []string{} + reg := regexp.MustCompile(`[a-zA-Z0-9_]+`) + for i, v := range key { + if skipnum > 0 { + skipnum-- + continue + } + if start { + //:id:int and :name:string + if v == ':' { + if len(key) >= i+4 { + if key[i+1:i+4] == "int" { + out = append(out, []rune("([0-9]+)")...) + params = append(params, ":"+string(param)) + start = false + startexp = false + skipnum = 3 + param = make([]rune, 0) + paramsNum++ + continue + } + } + if len(key) >= i+7 { + if key[i+1:i+7] == "string" { + out = append(out, []rune(`([\w]+)`)...) + params = append(params, ":"+string(param)) + paramsNum++ + start = false + startexp = false + skipnum = 6 + param = make([]rune, 0) + continue + } + } + } + // params only support a-zA-Z0-9 + if reg.MatchString(string(v)) { + param = append(param, v) + continue + } + if v != '(' { + out = append(out, []rune(`(.+)`)...) + params = append(params, ":"+string(param)) + param = make([]rune, 0) + paramsNum++ + start = false + startexp = false + } + } + if startexp { + if v != ')' { + expt = append(expt, v) + continue + } + } + if v == ':' { + param = make([]rune, 0) + start = true + } else if v == '(' { + startexp = true + start = false + params = append(params, ":"+string(param)) + paramsNum++ + expt = make([]rune, 0) + expt = append(expt, '(') + } else if v == ')' { + startexp = false + expt = append(expt, ')') + out = append(out, expt...) + param = make([]rune, 0) + } else if v == '?' { + params = append(params, ":") + } else { + out = append(out, v) + } + } + if len(param) > 0 { + if paramsNum > 0 { + out = append(out, []rune(`(.+)`)...) + } + params = append(params, ":"+string(param)) + } + return true, params, string(out) + } + return false, nil, "" +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/caller.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/caller.go new file mode 100644 index 000000000..73c52a620 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/caller.go @@ -0,0 +1,25 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "reflect" + "runtime" +) + +// GetFuncName get function name +func GetFuncName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/LICENSE b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/LICENSE new file mode 100644 index 000000000..0ad73ae0e --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2014 Dmitry Chestnykh + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/README.md new file mode 100644 index 000000000..dbc2026b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/README.md @@ -0,0 +1,45 @@ +# Captcha + +an example for use captcha + +``` +package controllers + +import ( + "github.com/astaxie/beego" + "github.com/astaxie/beego/cache" + "github.com/astaxie/beego/utils/captcha" +) + +var cpt *captcha.Captcha + +func init() { + // use beego cache system store the captcha data + store := cache.NewMemoryCache() + cpt = captcha.NewWithFilter("/captcha/", store) +} + +type MainController struct { + beego.Controller +} + +func (this *MainController) Get() { + this.TplName = "index.tpl" +} + +func (this *MainController) Post() { + this.TplName = "index.tpl" + + this.Data["Success"] = cpt.VerifyReq(this.Ctx.Request) +} +``` + +template usage + +``` +{{.Success}} +
+ {{create_captcha}} + +
+``` diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/captcha.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/captcha.go new file mode 100644 index 000000000..1a4a6edcb --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/captcha.go @@ -0,0 +1,269 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package captcha implements generation and verification of image CAPTCHAs. +// an example for use captcha +// +// ``` +// package controllers +// +// import ( +// "github.com/astaxie/beego" +// "github.com/astaxie/beego/cache" +// "github.com/astaxie/beego/utils/captcha" +// ) +// +// var cpt *captcha.Captcha +// +// func init() { +// // use beego cache system store the captcha data +// store := cache.NewMemoryCache() +// cpt = captcha.NewWithFilter("/captcha/", store) +// } +// +// type MainController struct { +// beego.Controller +// } +// +// func (this *MainController) Get() { +// this.TplName = "index.tpl" +// } +// +// func (this *MainController) Post() { +// this.TplName = "index.tpl" +// +// this.Data["Success"] = cpt.VerifyReq(this.Ctx.Request) +// } +// ``` +// +// template usage +// +// ``` +// {{.Success}} +//
+// {{create_captcha}} +// +//
+// ``` +package captcha + +import ( + "fmt" + "html/template" + "net/http" + "path" + "strings" + "time" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/cache" + "github.com/astaxie/beego/context" + "github.com/astaxie/beego/utils" +) + +var ( + defaultChars = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} +) + +const ( + // default captcha attributes + challengeNums = 6 + expiration = 600 * time.Second + fieldIDName = "captcha_id" + fieldCaptchaName = "captcha" + cachePrefix = "captcha_" + defaultURLPrefix = "/captcha/" +) + +// Captcha struct +type Captcha struct { + // beego cache store + store cache.Cache + + // url prefix for captcha image + URLPrefix string + + // specify captcha id input field name + FieldIDName string + // specify captcha result input field name + FieldCaptchaName string + + // captcha image width and height + StdWidth int + StdHeight int + + // captcha chars nums + ChallengeNums int + + // captcha expiration seconds + Expiration time.Duration + + // cache key prefix + CachePrefix string +} + +// generate key string +func (c *Captcha) key(id string) string { + return c.CachePrefix + id +} + +// generate rand chars with default chars +func (c *Captcha) genRandChars() []byte { + return utils.RandomCreateBytes(c.ChallengeNums, defaultChars...) +} + +// Handler beego filter handler for serve captcha image +func (c *Captcha) Handler(ctx *context.Context) { + var chars []byte + + id := path.Base(ctx.Request.RequestURI) + if i := strings.Index(id, "."); i != -1 { + id = id[:i] + } + + key := c.key(id) + + if len(ctx.Input.Query("reload")) > 0 { + chars = c.genRandChars() + if err := c.store.Put(key, chars, c.Expiration); err != nil { + ctx.Output.SetStatus(500) + ctx.WriteString("captcha reload error") + beego.Error("Reload Create Captcha Error:", err) + return + } + } else { + if v, ok := c.store.Get(key).([]byte); ok { + chars = v + } else { + ctx.Output.SetStatus(404) + ctx.WriteString("captcha not found") + return + } + } + + img := NewImage(chars, c.StdWidth, c.StdHeight) + if _, err := img.WriteTo(ctx.ResponseWriter); err != nil { + beego.Error("Write Captcha Image Error:", err) + } +} + +// CreateCaptchaHTML template func for output html +func (c *Captcha) CreateCaptchaHTML() template.HTML { + value, err := c.CreateCaptcha() + if err != nil { + beego.Error("Create Captcha Error:", err) + return "" + } + + // create html + return template.HTML(fmt.Sprintf(``+ + ``+ + ``+ + ``, c.FieldIDName, value, c.URLPrefix, value, c.URLPrefix, value)) +} + +// CreateCaptcha create a new captcha id +func (c *Captcha) CreateCaptcha() (string, error) { + // generate captcha id + id := string(utils.RandomCreateBytes(15)) + + // get the captcha chars + chars := c.genRandChars() + + // save to store + if err := c.store.Put(c.key(id), chars, c.Expiration); err != nil { + return "", err + } + + return id, nil +} + +// VerifyReq verify from a request +func (c *Captcha) VerifyReq(req *http.Request) bool { + req.ParseForm() + return c.Verify(req.Form.Get(c.FieldIDName), req.Form.Get(c.FieldCaptchaName)) +} + +// Verify direct verify id and challenge string +func (c *Captcha) Verify(id string, challenge string) (success bool) { + if len(challenge) == 0 || len(id) == 0 { + return + } + + var chars []byte + + key := c.key(id) + + if v, ok := c.store.Get(key).([]byte); ok { + chars = v + } else { + return + } + + defer func() { + // finally remove it + c.store.Delete(key) + }() + + if len(chars) != len(challenge) { + return + } + // verify challenge + for i, c := range chars { + if c != challenge[i]-48 { + return + } + } + + return true +} + +// NewCaptcha create a new captcha.Captcha +func NewCaptcha(urlPrefix string, store cache.Cache) *Captcha { + cpt := &Captcha{} + cpt.store = store + cpt.FieldIDName = fieldIDName + cpt.FieldCaptchaName = fieldCaptchaName + cpt.ChallengeNums = challengeNums + cpt.Expiration = expiration + cpt.CachePrefix = cachePrefix + cpt.StdWidth = stdWidth + cpt.StdHeight = stdHeight + + if len(urlPrefix) == 0 { + urlPrefix = defaultURLPrefix + } + + if urlPrefix[len(urlPrefix)-1] != '/' { + urlPrefix += "/" + } + + cpt.URLPrefix = urlPrefix + + return cpt +} + +// NewWithFilter create a new captcha.Captcha and auto AddFilter for serve captacha image +// and add a template func for output html +func NewWithFilter(urlPrefix string, store cache.Cache) *Captcha { + cpt := NewCaptcha(urlPrefix, store) + + // create filter for serve captcha image + beego.InsertFilter(cpt.URLPrefix+"*", beego.BeforeRouter, cpt.Handler) + + // add to template func map + beego.AddFuncMap("create_captcha", cpt.CreateCaptchaHTML) + + return cpt +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/image.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/image.go new file mode 100644 index 000000000..1057192aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/image.go @@ -0,0 +1,498 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package captcha + +import ( + "bytes" + "image" + "image/color" + "image/png" + "io" + "math" +) + +const ( + fontWidth = 11 + fontHeight = 18 + blackChar = 1 + + // Standard width and height of a captcha image. + stdWidth = 240 + stdHeight = 80 + // Maximum absolute skew factor of a single digit. + maxSkew = 0.7 + // Number of background circles. + circleCount = 20 +) + +var font = [][]byte{ + { // 0 + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + }, + { // 1 + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + }, + { // 2 + 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + }, + { // 3 + 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, + }, + { // 4 + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, + 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + }, + { // 5 + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, + }, + { // 6 + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, + 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + }, + { // 7 + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, + }, + { // 8 + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + }, + { // 9 + 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, + 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, + 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, + 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, + }, +} + +// Image struct +type Image struct { + *image.Paletted + numWidth int + numHeight int + dotSize int +} + +var prng = &siprng{} + +// randIntn returns a pseudorandom non-negative int in range [0, n). +func randIntn(n int) int { + return prng.Intn(n) +} + +// randInt returns a pseudorandom int in range [from, to]. +func randInt(from, to int) int { + return prng.Intn(to+1-from) + from +} + +// randFloat returns a pseudorandom float64 in range [from, to]. +func randFloat(from, to float64) float64 { + return (to-from)*prng.Float64() + from +} + +func randomPalette() color.Palette { + p := make([]color.Color, circleCount+1) + // Transparent color. + p[0] = color.RGBA{0xFF, 0xFF, 0xFF, 0x00} + // Primary color. + prim := color.RGBA{ + uint8(randIntn(129)), + uint8(randIntn(129)), + uint8(randIntn(129)), + 0xFF, + } + p[1] = prim + // Circle colors. + for i := 2; i <= circleCount; i++ { + p[i] = randomBrightness(prim, 255) + } + return p +} + +// NewImage returns a new captcha image of the given width and height with the +// given digits, where each digit must be in range 0-9. +func NewImage(digits []byte, width, height int) *Image { + m := new(Image) + m.Paletted = image.NewPaletted(image.Rect(0, 0, width, height), randomPalette()) + m.calculateSizes(width, height, len(digits)) + // Randomly position captcha inside the image. + maxx := width - (m.numWidth+m.dotSize)*len(digits) - m.dotSize + maxy := height - m.numHeight - m.dotSize*2 + var border int + if width > height { + border = height / 5 + } else { + border = width / 5 + } + x := randInt(border, maxx-border) + y := randInt(border, maxy-border) + // Draw digits. + for _, n := range digits { + m.drawDigit(font[n], x, y) + x += m.numWidth + m.dotSize + } + // Draw strike-through line. + m.strikeThrough() + // Apply wave distortion. + m.distort(randFloat(5, 10), randFloat(100, 200)) + // Fill image with random circles. + m.fillWithCircles(circleCount, m.dotSize) + return m +} + +// encodedPNG encodes an image to PNG and returns +// the result as a byte slice. +func (m *Image) encodedPNG() []byte { + var buf bytes.Buffer + if err := png.Encode(&buf, m.Paletted); err != nil { + panic(err.Error()) + } + return buf.Bytes() +} + +// WriteTo writes captcha image in PNG format into the given writer. +func (m *Image) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(m.encodedPNG()) + return int64(n), err +} + +func (m *Image) calculateSizes(width, height, ncount int) { + // Goal: fit all digits inside the image. + var border int + if width > height { + border = height / 4 + } else { + border = width / 4 + } + // Convert everything to floats for calculations. + w := float64(width - border*2) + h := float64(height - border*2) + // fw takes into account 1-dot spacing between digits. + fw := float64(fontWidth + 1) + fh := float64(fontHeight) + nc := float64(ncount) + // Calculate the width of a single digit taking into account only the + // width of the image. + nw := w / nc + // Calculate the height of a digit from this width. + nh := nw * fh / fw + // Digit too high? + if nh > h { + // Fit digits based on height. + nh = h + nw = fw / fh * nh + } + // Calculate dot size. + m.dotSize = int(nh / fh) + // Save everything, making the actual width smaller by 1 dot to account + // for spacing between digits. + m.numWidth = int(nw) - m.dotSize + m.numHeight = int(nh) +} + +func (m *Image) drawHorizLine(fromX, toX, y int, colorIdx uint8) { + for x := fromX; x <= toX; x++ { + m.SetColorIndex(x, y, colorIdx) + } +} + +func (m *Image) drawCircle(x, y, radius int, colorIdx uint8) { + f := 1 - radius + dfx := 1 + dfy := -2 * radius + xo := 0 + yo := radius + + m.SetColorIndex(x, y+radius, colorIdx) + m.SetColorIndex(x, y-radius, colorIdx) + m.drawHorizLine(x-radius, x+radius, y, colorIdx) + + for xo < yo { + if f >= 0 { + yo-- + dfy += 2 + f += dfy + } + xo++ + dfx += 2 + f += dfx + m.drawHorizLine(x-xo, x+xo, y+yo, colorIdx) + m.drawHorizLine(x-xo, x+xo, y-yo, colorIdx) + m.drawHorizLine(x-yo, x+yo, y+xo, colorIdx) + m.drawHorizLine(x-yo, x+yo, y-xo, colorIdx) + } +} + +func (m *Image) fillWithCircles(n, maxradius int) { + maxx := m.Bounds().Max.X + maxy := m.Bounds().Max.Y + for i := 0; i < n; i++ { + colorIdx := uint8(randInt(1, circleCount-1)) + r := randInt(1, maxradius) + m.drawCircle(randInt(r, maxx-r), randInt(r, maxy-r), r, colorIdx) + } +} + +func (m *Image) strikeThrough() { + maxx := m.Bounds().Max.X + maxy := m.Bounds().Max.Y + y := randInt(maxy/3, maxy-maxy/3) + amplitude := randFloat(5, 20) + period := randFloat(80, 180) + dx := 2.0 * math.Pi / period + for x := 0; x < maxx; x++ { + xo := amplitude * math.Cos(float64(y)*dx) + yo := amplitude * math.Sin(float64(x)*dx) + for yn := 0; yn < m.dotSize; yn++ { + r := randInt(0, m.dotSize) + m.drawCircle(x+int(xo), y+int(yo)+(yn*m.dotSize), r/2, 1) + } + } +} + +func (m *Image) drawDigit(digit []byte, x, y int) { + skf := randFloat(-maxSkew, maxSkew) + xs := float64(x) + r := m.dotSize / 2 + y += randInt(-r, r) + for yo := 0; yo < fontHeight; yo++ { + for xo := 0; xo < fontWidth; xo++ { + if digit[yo*fontWidth+xo] != blackChar { + continue + } + m.drawCircle(x+xo*m.dotSize, y+yo*m.dotSize, r, 1) + } + xs += skf + x = int(xs) + } +} + +func (m *Image) distort(amplude float64, period float64) { + w := m.Bounds().Max.X + h := m.Bounds().Max.Y + + oldm := m.Paletted + newm := image.NewPaletted(image.Rect(0, 0, w, h), oldm.Palette) + + dx := 2.0 * math.Pi / period + for x := 0; x < w; x++ { + for y := 0; y < h; y++ { + xo := amplude * math.Sin(float64(y)*dx) + yo := amplude * math.Cos(float64(x)*dx) + newm.SetColorIndex(x, y, oldm.ColorIndexAt(x+int(xo), y+int(yo))) + } + } + m.Paletted = newm +} + +func randomBrightness(c color.RGBA, max uint8) color.RGBA { + minc := min3(c.R, c.G, c.B) + maxc := max3(c.R, c.G, c.B) + if maxc > max { + return c + } + n := randIntn(int(max-maxc)) - int(minc) + return color.RGBA{ + uint8(int(c.R) + n), + uint8(int(c.G) + n), + uint8(int(c.B) + n), + uint8(c.A), + } +} + +func min3(x, y, z uint8) (m uint8) { + m = x + if y < m { + m = y + } + if z < m { + m = z + } + return +} + +func max3(x, y, z uint8) (m uint8) { + m = x + if y > m { + m = y + } + if z > m { + m = z + } + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/siprng.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/siprng.go new file mode 100644 index 000000000..5e256cf93 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/captcha/siprng.go @@ -0,0 +1,277 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package captcha + +import ( + "crypto/rand" + "encoding/binary" + "io" + "sync" +) + +// siprng is PRNG based on SipHash-2-4. +type siprng struct { + mu sync.Mutex + k0, k1, ctr uint64 +} + +// siphash implements SipHash-2-4, accepting a uint64 as a message. +func siphash(k0, k1, m uint64) uint64 { + // Initialization. + v0 := k0 ^ 0x736f6d6570736575 + v1 := k1 ^ 0x646f72616e646f6d + v2 := k0 ^ 0x6c7967656e657261 + v3 := k1 ^ 0x7465646279746573 + t := uint64(8) << 56 + + // Compression. + v3 ^= m + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + v0 ^= m + + // Compress last block. + v3 ^= t + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + v0 ^= t + + // Finalization. + v2 ^= 0xff + + // Round 1. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + // Round 2. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + // Round 3. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + // Round 4. + v0 += v1 + v1 = v1<<13 | v1>>(64-13) + v1 ^= v0 + v0 = v0<<32 | v0>>(64-32) + + v2 += v3 + v3 = v3<<16 | v3>>(64-16) + v3 ^= v2 + + v0 += v3 + v3 = v3<<21 | v3>>(64-21) + v3 ^= v0 + + v2 += v1 + v1 = v1<<17 | v1>>(64-17) + v1 ^= v2 + v2 = v2<<32 | v2>>(64-32) + + return v0 ^ v1 ^ v2 ^ v3 +} + +// rekey sets a new PRNG key, which is read from crypto/rand. +func (p *siprng) rekey() { + var k [16]byte + if _, err := io.ReadFull(rand.Reader, k[:]); err != nil { + panic(err.Error()) + } + p.k0 = binary.LittleEndian.Uint64(k[0:8]) + p.k1 = binary.LittleEndian.Uint64(k[8:16]) + p.ctr = 1 +} + +// Uint64 returns a new pseudorandom uint64. +// It rekeys PRNG on the first call and every 64 MB of generated data. +func (p *siprng) Uint64() uint64 { + p.mu.Lock() + if p.ctr == 0 || p.ctr > 8*1024*1024 { + p.rekey() + } + v := siphash(p.k0, p.k1, p.ctr) + p.ctr++ + p.mu.Unlock() + return v +} + +func (p *siprng) Int63() int64 { + return int64(p.Uint64() & 0x7fffffffffffffff) +} + +func (p *siprng) Uint32() uint32 { + return uint32(p.Uint64()) +} + +func (p *siprng) Int31() int32 { + return int32(p.Uint32() & 0x7fffffff) +} + +func (p *siprng) Intn(n int) int { + if n <= 0 { + panic("invalid argument to Intn") + } + if n <= 1<<31-1 { + return int(p.Int31n(int32(n))) + } + return int(p.Int63n(int64(n))) +} + +func (p *siprng) Int63n(n int64) int64 { + if n <= 0 { + panic("invalid argument to Int63n") + } + max := int64((1 << 63) - 1 - (1<<63)%uint64(n)) + v := p.Int63() + for v > max { + v = p.Int63() + } + return v % n +} + +func (p *siprng) Int31n(n int32) int32 { + if n <= 0 { + panic("invalid argument to Int31n") + } + max := int32((1 << 31) - 1 - (1<<31)%uint32(n)) + v := p.Int31() + for v > max { + v = p.Int31() + } + return v % n +} + +func (p *siprng) Float64() float64 { return float64(p.Int63()) / (1 << 63) } diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/debug.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/debug.go new file mode 100644 index 000000000..93c27b70d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/debug.go @@ -0,0 +1,478 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "log" + "reflect" + "runtime" +) + +var ( + dunno = []byte("???") + centerDot = []byte("·") + dot = []byte(".") +) + +type pointerInfo struct { + prev *pointerInfo + n int + addr uintptr + pos int + used []int +} + +// Display print the data in console +func Display(data ...interface{}) { + display(true, data...) +} + +// GetDisplayString return data print string +func GetDisplayString(data ...interface{}) string { + return display(false, data...) +} + +func display(displayed bool, data ...interface{}) string { + var pc, file, line, ok = runtime.Caller(2) + + if !ok { + return "" + } + + var buf = new(bytes.Buffer) + + fmt.Fprintf(buf, "[Debug] at %s() [%s:%d]\n", function(pc), file, line) + + fmt.Fprintf(buf, "\n[Variables]\n") + + for i := 0; i < len(data); i += 2 { + var output = fomateinfo(len(data[i].(string))+3, data[i+1]) + fmt.Fprintf(buf, "%s = %s", data[i], output) + } + + if displayed { + log.Print(buf) + } + return buf.String() +} + +// return data dump and format bytes +func fomateinfo(headlen int, data ...interface{}) []byte { + var buf = new(bytes.Buffer) + + if len(data) > 1 { + fmt.Fprint(buf, " ") + + fmt.Fprint(buf, "[") + + fmt.Fprintln(buf) + } + + for k, v := range data { + var buf2 = new(bytes.Buffer) + var pointers *pointerInfo + var interfaces = make([]reflect.Value, 0, 10) + + printKeyValue(buf2, reflect.ValueOf(v), &pointers, &interfaces, nil, true, " ", 1) + + if k < len(data)-1 { + fmt.Fprint(buf2, ", ") + } + + fmt.Fprintln(buf2) + + buf.Write(buf2.Bytes()) + } + + if len(data) > 1 { + fmt.Fprintln(buf) + + fmt.Fprint(buf, " ") + + fmt.Fprint(buf, "]") + } + + return buf.Bytes() +} + +// check data is golang basic type +func isSimpleType(val reflect.Value, kind reflect.Kind, pointers **pointerInfo, interfaces *[]reflect.Value) bool { + switch kind { + case reflect.Bool: + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.Complex64, reflect.Complex128: + return true + case reflect.String: + return true + case reflect.Chan: + return true + case reflect.Invalid: + return true + case reflect.Interface: + for _, in := range *interfaces { + if reflect.DeepEqual(in, val) { + return true + } + } + return false + case reflect.UnsafePointer: + if val.IsNil() { + return true + } + + var elem = val.Elem() + + if isSimpleType(elem, elem.Kind(), pointers, interfaces) { + return true + } + + var addr = val.Elem().UnsafeAddr() + + for p := *pointers; p != nil; p = p.prev { + if addr == p.addr { + return true + } + } + + return false + } + + return false +} + +// dump value +func printKeyValue(buf *bytes.Buffer, val reflect.Value, pointers **pointerInfo, interfaces *[]reflect.Value, structFilter func(string, string) bool, formatOutput bool, indent string, level int) { + var t = val.Kind() + + switch t { + case reflect.Bool: + fmt.Fprint(buf, val.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fmt.Fprint(buf, val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64: + fmt.Fprint(buf, val.Uint()) + case reflect.Float32, reflect.Float64: + fmt.Fprint(buf, val.Float()) + case reflect.Complex64, reflect.Complex128: + fmt.Fprint(buf, val.Complex()) + case reflect.UnsafePointer: + fmt.Fprintf(buf, "unsafe.Pointer(0x%X)", val.Pointer()) + case reflect.Ptr: + if val.IsNil() { + fmt.Fprint(buf, "nil") + return + } + + var addr = val.Elem().UnsafeAddr() + + for p := *pointers; p != nil; p = p.prev { + if addr == p.addr { + p.used = append(p.used, buf.Len()) + fmt.Fprintf(buf, "0x%X", addr) + return + } + } + + *pointers = &pointerInfo{ + prev: *pointers, + addr: addr, + pos: buf.Len(), + used: make([]int, 0), + } + + fmt.Fprint(buf, "&") + + printKeyValue(buf, val.Elem(), pointers, interfaces, structFilter, formatOutput, indent, level) + case reflect.String: + fmt.Fprint(buf, "\"", val.String(), "\"") + case reflect.Interface: + var value = val.Elem() + + if !value.IsValid() { + fmt.Fprint(buf, "nil") + } else { + for _, in := range *interfaces { + if reflect.DeepEqual(in, val) { + fmt.Fprint(buf, "repeat") + return + } + } + + *interfaces = append(*interfaces, val) + + printKeyValue(buf, value, pointers, interfaces, structFilter, formatOutput, indent, level+1) + } + case reflect.Struct: + var t = val.Type() + + fmt.Fprint(buf, t) + fmt.Fprint(buf, "{") + + for i := 0; i < val.NumField(); i++ { + if formatOutput { + fmt.Fprintln(buf) + } else { + fmt.Fprint(buf, " ") + } + + var name = t.Field(i).Name + + if formatOutput { + for ind := 0; ind < level; ind++ { + fmt.Fprint(buf, indent) + } + } + + fmt.Fprint(buf, name) + fmt.Fprint(buf, ": ") + + if structFilter != nil && structFilter(t.String(), name) { + fmt.Fprint(buf, "ignore") + } else { + printKeyValue(buf, val.Field(i), pointers, interfaces, structFilter, formatOutput, indent, level+1) + } + + fmt.Fprint(buf, ",") + } + + if formatOutput { + fmt.Fprintln(buf) + + for ind := 0; ind < level-1; ind++ { + fmt.Fprint(buf, indent) + } + } else { + fmt.Fprint(buf, " ") + } + + fmt.Fprint(buf, "}") + case reflect.Array, reflect.Slice: + fmt.Fprint(buf, val.Type()) + fmt.Fprint(buf, "{") + + var allSimple = true + + for i := 0; i < val.Len(); i++ { + var elem = val.Index(i) + + var isSimple = isSimpleType(elem, elem.Kind(), pointers, interfaces) + + if !isSimple { + allSimple = false + } + + if formatOutput && !isSimple { + fmt.Fprintln(buf) + } else { + fmt.Fprint(buf, " ") + } + + if formatOutput && !isSimple { + for ind := 0; ind < level; ind++ { + fmt.Fprint(buf, indent) + } + } + + printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1) + + if i != val.Len()-1 || !allSimple { + fmt.Fprint(buf, ",") + } + } + + if formatOutput && !allSimple { + fmt.Fprintln(buf) + + for ind := 0; ind < level-1; ind++ { + fmt.Fprint(buf, indent) + } + } else { + fmt.Fprint(buf, " ") + } + + fmt.Fprint(buf, "}") + case reflect.Map: + var t = val.Type() + var keys = val.MapKeys() + + fmt.Fprint(buf, t) + fmt.Fprint(buf, "{") + + var allSimple = true + + for i := 0; i < len(keys); i++ { + var elem = val.MapIndex(keys[i]) + + var isSimple = isSimpleType(elem, elem.Kind(), pointers, interfaces) + + if !isSimple { + allSimple = false + } + + if formatOutput && !isSimple { + fmt.Fprintln(buf) + } else { + fmt.Fprint(buf, " ") + } + + if formatOutput && !isSimple { + for ind := 0; ind <= level; ind++ { + fmt.Fprint(buf, indent) + } + } + + printKeyValue(buf, keys[i], pointers, interfaces, structFilter, formatOutput, indent, level+1) + fmt.Fprint(buf, ": ") + printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1) + + if i != val.Len()-1 || !allSimple { + fmt.Fprint(buf, ",") + } + } + + if formatOutput && !allSimple { + fmt.Fprintln(buf) + + for ind := 0; ind < level-1; ind++ { + fmt.Fprint(buf, indent) + } + } else { + fmt.Fprint(buf, " ") + } + + fmt.Fprint(buf, "}") + case reflect.Chan: + fmt.Fprint(buf, val.Type()) + case reflect.Invalid: + fmt.Fprint(buf, "invalid") + default: + fmt.Fprint(buf, "unknow") + } +} + +// PrintPointerInfo dump pointer value +func PrintPointerInfo(buf *bytes.Buffer, headlen int, pointers *pointerInfo) { + var anyused = false + var pointerNum = 0 + + for p := pointers; p != nil; p = p.prev { + if len(p.used) > 0 { + anyused = true + } + pointerNum++ + p.n = pointerNum + } + + if anyused { + var pointerBufs = make([][]rune, pointerNum+1) + + for i := 0; i < len(pointerBufs); i++ { + var pointerBuf = make([]rune, buf.Len()+headlen) + + for j := 0; j < len(pointerBuf); j++ { + pointerBuf[j] = ' ' + } + + pointerBufs[i] = pointerBuf + } + + for pn := 0; pn <= pointerNum; pn++ { + for p := pointers; p != nil; p = p.prev { + if len(p.used) > 0 && p.n >= pn { + if pn == p.n { + pointerBufs[pn][p.pos+headlen] = '└' + + var maxpos = 0 + + for i, pos := range p.used { + if i < len(p.used)-1 { + pointerBufs[pn][pos+headlen] = '┴' + } else { + pointerBufs[pn][pos+headlen] = '┘' + } + + maxpos = pos + } + + for i := 0; i < maxpos-p.pos-1; i++ { + if pointerBufs[pn][i+p.pos+headlen+1] == ' ' { + pointerBufs[pn][i+p.pos+headlen+1] = '─' + } + } + } else { + pointerBufs[pn][p.pos+headlen] = '│' + + for _, pos := range p.used { + if pointerBufs[pn][pos+headlen] == ' ' { + pointerBufs[pn][pos+headlen] = '│' + } else { + pointerBufs[pn][pos+headlen] = '┼' + } + } + } + } + } + + buf.WriteString(string(pointerBufs[pn]) + "\n") + } + } +} + +// Stack get stack bytes +func Stack(skip int, indent string) []byte { + var buf = new(bytes.Buffer) + + for i := skip; ; i++ { + var pc, file, line, ok = runtime.Caller(i) + + if !ok { + break + } + + buf.WriteString(indent) + + fmt.Fprintf(buf, "at %s() [%s:%d]\n", function(pc), file, line) + } + + return buf.Bytes() +} + +// return the name of the function containing the PC if possible, +func function(pc uintptr) []byte { + fn := runtime.FuncForPC(pc) + if fn == nil { + return dunno + } + name := []byte(fn.Name()) + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + if period := bytes.Index(name, dot); period >= 0 { + name = name[period+1:] + } + name = bytes.Replace(name, centerDot, dot, -1) + return name +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/file.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/file.go new file mode 100644 index 000000000..db1978828 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/file.go @@ -0,0 +1,101 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bufio" + "errors" + "io" + "os" + "path/filepath" + "regexp" +) + +// SelfPath gets compiled executable file absolute path +func SelfPath() string { + path, _ := filepath.Abs(os.Args[0]) + return path +} + +// SelfDir gets compiled executable file directory +func SelfDir() string { + return filepath.Dir(SelfPath()) +} + +// FileExists reports whether the named file or directory exists. +func FileExists(name string) bool { + if _, err := os.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// SearchFile Search a file in paths. +// this is often used in search config file in /etc ~/ +func SearchFile(filename string, paths ...string) (fullpath string, err error) { + for _, path := range paths { + if fullpath = filepath.Join(path, filename); FileExists(fullpath) { + return + } + } + err = errors.New(fullpath + " not found in paths") + return +} + +// GrepFile like command grep -E +// for example: GrepFile(`^hello`, "hello.txt") +// \n is striped while read +func GrepFile(patten string, filename string) (lines []string, err error) { + re, err := regexp.Compile(patten) + if err != nil { + return + } + + fd, err := os.Open(filename) + if err != nil { + return + } + lines = make([]string, 0) + reader := bufio.NewReader(fd) + prefix := "" + isLongLine := false + for { + byteLine, isPrefix, er := reader.ReadLine() + if er != nil && er != io.EOF { + return nil, er + } + if er == io.EOF { + break + } + line := string(byteLine) + if isPrefix { + prefix += line + continue + } else { + isLongLine = true + } + + line = prefix + line + if isLongLine { + prefix = "" + } + if re.MatchString(line) { + lines = append(lines, line) + } + } + return lines, nil +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/mail.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/mail.go new file mode 100644 index 000000000..1d80a0391 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/mail.go @@ -0,0 +1,344 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "mime" + "mime/multipart" + "net/mail" + "net/smtp" + "net/textproto" + "os" + "path" + "path/filepath" + "strconv" + "strings" +) + +const ( + maxLineLength = 76 +) + +// Email is the type used for email messages +type Email struct { + Auth smtp.Auth + Identity string `json:"identity"` + Username string `json:"username"` + Password string `json:"password"` + Host string `json:"host"` + Port int `json:"port"` + From string `json:"from"` + To []string + Bcc []string + Cc []string + Subject string + Text string // Plaintext message (optional) + HTML string // Html message (optional) + Headers textproto.MIMEHeader + Attachments []*Attachment + ReadReceipt []string +} + +// Attachment is a struct representing an email attachment. +// Based on the mime/multipart.FileHeader struct, Attachment contains the name, MIMEHeader, and content of the attachment in question +type Attachment struct { + Filename string + Header textproto.MIMEHeader + Content []byte +} + +// NewEMail create new Email struct with config json. +// config json is followed from Email struct fields. +func NewEMail(config string) *Email { + e := new(Email) + e.Headers = textproto.MIMEHeader{} + err := json.Unmarshal([]byte(config), e) + if err != nil { + return nil + } + if e.From == "" { + e.From = e.Username + } + return e +} + +// Bytes Make all send information to byte +func (e *Email) Bytes() ([]byte, error) { + buff := &bytes.Buffer{} + w := multipart.NewWriter(buff) + // Set the appropriate headers (overwriting any conflicts) + // Leave out Bcc (only included in envelope headers) + e.Headers.Set("To", strings.Join(e.To, ",")) + if e.Cc != nil { + e.Headers.Set("Cc", strings.Join(e.Cc, ",")) + } + e.Headers.Set("From", e.From) + e.Headers.Set("Subject", e.Subject) + if len(e.ReadReceipt) != 0 { + e.Headers.Set("Disposition-Notification-To", strings.Join(e.ReadReceipt, ",")) + } + e.Headers.Set("MIME-Version", "1.0") + + // Write the envelope headers (including any custom headers) + if err := headerToBytes(buff, e.Headers); err != nil { + return nil, fmt.Errorf("Failed to render message headers: %s", err) + } + + e.Headers.Set("Content-Type", fmt.Sprintf("multipart/mixed;\r\n boundary=%s\r\n", w.Boundary())) + fmt.Fprintf(buff, "%s:", "Content-Type") + fmt.Fprintf(buff, " %s\r\n", fmt.Sprintf("multipart/mixed;\r\n boundary=%s\r\n", w.Boundary())) + + // Start the multipart/mixed part + fmt.Fprintf(buff, "--%s\r\n", w.Boundary()) + header := textproto.MIMEHeader{} + // Check to see if there is a Text or HTML field + if e.Text != "" || e.HTML != "" { + subWriter := multipart.NewWriter(buff) + // Create the multipart alternative part + header.Set("Content-Type", fmt.Sprintf("multipart/alternative;\r\n boundary=%s\r\n", subWriter.Boundary())) + // Write the header + if err := headerToBytes(buff, header); err != nil { + return nil, fmt.Errorf("Failed to render multipart message headers: %s", err) + } + // Create the body sections + if e.Text != "" { + header.Set("Content-Type", fmt.Sprintf("text/plain; charset=UTF-8")) + header.Set("Content-Transfer-Encoding", "quoted-printable") + if _, err := subWriter.CreatePart(header); err != nil { + return nil, err + } + // Write the text + if err := quotePrintEncode(buff, e.Text); err != nil { + return nil, err + } + } + if e.HTML != "" { + header.Set("Content-Type", fmt.Sprintf("text/html; charset=UTF-8")) + header.Set("Content-Transfer-Encoding", "quoted-printable") + if _, err := subWriter.CreatePart(header); err != nil { + return nil, err + } + // Write the text + if err := quotePrintEncode(buff, e.HTML); err != nil { + return nil, err + } + } + if err := subWriter.Close(); err != nil { + return nil, err + } + } + // Create attachment part, if necessary + for _, a := range e.Attachments { + ap, err := w.CreatePart(a.Header) + if err != nil { + return nil, err + } + // Write the base64Wrapped content to the part + base64Wrap(ap, a.Content) + } + if err := w.Close(); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// AttachFile Add attach file to the send mail +func (e *Email) AttachFile(args ...string) (a *Attachment, err error) { + if len(args) < 1 && len(args) > 2 { + err = errors.New("Must specify a file name and number of parameters can not exceed at least two") + return + } + filename := args[0] + id := "" + if len(args) > 1 { + id = args[1] + } + f, err := os.Open(filename) + if err != nil { + return + } + ct := mime.TypeByExtension(filepath.Ext(filename)) + basename := path.Base(filename) + return e.Attach(f, basename, ct, id) +} + +// Attach is used to attach content from an io.Reader to the email. +// Parameters include an io.Reader, the desired filename for the attachment, and the Content-Type. +func (e *Email) Attach(r io.Reader, filename string, args ...string) (a *Attachment, err error) { + if len(args) < 1 && len(args) > 2 { + err = errors.New("Must specify the file type and number of parameters can not exceed at least two") + return + } + c := args[0] //Content-Type + id := "" + if len(args) > 1 { + id = args[1] //Content-ID + } + var buffer bytes.Buffer + if _, err = io.Copy(&buffer, r); err != nil { + return + } + at := &Attachment{ + Filename: filename, + Header: textproto.MIMEHeader{}, + Content: buffer.Bytes(), + } + // Get the Content-Type to be used in the MIMEHeader + if c != "" { + at.Header.Set("Content-Type", c) + } else { + // If the Content-Type is blank, set the Content-Type to "application/octet-stream" + at.Header.Set("Content-Type", "application/octet-stream") + } + if id != "" { + at.Header.Set("Content-Disposition", fmt.Sprintf("inline;\r\n filename=\"%s\"", filename)) + at.Header.Set("Content-ID", fmt.Sprintf("<%s>", id)) + } else { + at.Header.Set("Content-Disposition", fmt.Sprintf("attachment;\r\n filename=\"%s\"", filename)) + } + at.Header.Set("Content-Transfer-Encoding", "base64") + e.Attachments = append(e.Attachments, at) + return at, nil +} + +// Send will send out the mail +func (e *Email) Send() error { + if e.Auth == nil { + e.Auth = smtp.PlainAuth(e.Identity, e.Username, e.Password, e.Host) + } + // Merge the To, Cc, and Bcc fields + to := make([]string, 0, len(e.To)+len(e.Cc)+len(e.Bcc)) + to = append(append(append(to, e.To...), e.Cc...), e.Bcc...) + // Check to make sure there is at least one recipient and one "From" address + if e.From == "" || len(to) == 0 { + return errors.New("Must specify at least one From address and one To address") + } + from, err := mail.ParseAddress(e.From) + if err != nil { + return err + } + e.From = from.String() + raw, err := e.Bytes() + if err != nil { + return err + } + return smtp.SendMail(e.Host+":"+strconv.Itoa(e.Port), e.Auth, from.Address, to, raw) +} + +// quotePrintEncode writes the quoted-printable text to the IO Writer (according to RFC 2045) +func quotePrintEncode(w io.Writer, s string) error { + var buf [3]byte + mc := 0 + for i := 0; i < len(s); i++ { + c := s[i] + // We're assuming Unix style text formats as input (LF line break), and + // quoted-printble uses CRLF line breaks. (Literal CRs will become + // "=0D", but probably shouldn't be there to begin with!) + if c == '\n' { + io.WriteString(w, "\r\n") + mc = 0 + continue + } + + var nextOut []byte + if isPrintable(c) { + nextOut = append(buf[:0], c) + } else { + nextOut = buf[:] + qpEscape(nextOut, c) + } + + // Add a soft line break if the next (encoded) byte would push this line + // to or past the limit. + if mc+len(nextOut) >= maxLineLength { + if _, err := io.WriteString(w, "=\r\n"); err != nil { + return err + } + mc = 0 + } + + if _, err := w.Write(nextOut); err != nil { + return err + } + mc += len(nextOut) + } + // No trailing end-of-line?? Soft line break, then. TODO: is this sane? + if mc > 0 { + io.WriteString(w, "=\r\n") + } + return nil +} + +// isPrintable returns true if the rune given is "printable" according to RFC 2045, false otherwise +func isPrintable(c byte) bool { + return (c >= '!' && c <= '<') || (c >= '>' && c <= '~') || (c == ' ' || c == '\n' || c == '\t') +} + +// qpEscape is a helper function for quotePrintEncode which escapes a +// non-printable byte. Expects len(dest) == 3. +func qpEscape(dest []byte, c byte) { + const nums = "0123456789ABCDEF" + dest[0] = '=' + dest[1] = nums[(c&0xf0)>>4] + dest[2] = nums[(c & 0xf)] +} + +// headerToBytes enumerates the key and values in the header, and writes the results to the IO Writer +func headerToBytes(w io.Writer, t textproto.MIMEHeader) error { + for k, v := range t { + // Write the header key + _, err := fmt.Fprintf(w, "%s:", k) + if err != nil { + return err + } + // Write each value in the header + for _, c := range v { + _, err := fmt.Fprintf(w, " %s\r\n", c) + if err != nil { + return err + } + } + } + return nil +} + +// base64Wrap encodes the attachment content, and wraps it according to RFC 2045 standards (every 76 chars) +// The output is then written to the specified io.Writer +func base64Wrap(w io.Writer, b []byte) { + // 57 raw bytes per 76-byte base64 line. + const maxRaw = 57 + // Buffer for each line, including trailing CRLF. + var buffer [maxLineLength + len("\r\n")]byte + copy(buffer[maxLineLength:], "\r\n") + // Process raw chunks until there's no longer enough to fill a line. + for len(b) >= maxRaw { + base64.StdEncoding.Encode(buffer[:], b[:maxRaw]) + w.Write(buffer[:]) + b = b[maxRaw:] + } + // Handle the last chunk of bytes. + if len(b) > 0 { + out := buffer[:base64.StdEncoding.EncodedLen(len(b))] + base64.StdEncoding.Encode(out, b) + out = append(out, "\r\n"...) + w.Write(out) + } +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/controller.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/controller.go new file mode 100644 index 000000000..1d99cac5c --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/controller.go @@ -0,0 +1,26 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pagination + +import ( + "github.com/astaxie/beego/context" +) + +// SetPaginator Instantiates a Paginator and assigns it to context.Input.Data["paginator"]. +func SetPaginator(context *context.Context, per int, nums int64) (paginator *Paginator) { + paginator = NewPaginator(context.Request, per, nums) + context.Input.SetData("paginator", &paginator) + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/doc.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/doc.go new file mode 100644 index 000000000..9abc6d782 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/doc.go @@ -0,0 +1,58 @@ +/* +Package pagination provides utilities to setup a paginator within the +context of a http request. + +Usage + +In your beego.Controller: + + package controllers + + import "github.com/astaxie/beego/utils/pagination" + + type PostsController struct { + beego.Controller + } + + func (this *PostsController) ListAllPosts() { + // sets this.Data["paginator"] with the current offset (from the url query param) + postsPerPage := 20 + paginator := pagination.SetPaginator(this.Ctx, postsPerPage, CountPosts()) + + // fetch the next 20 posts + this.Data["posts"] = ListPostsByOffsetAndLimit(paginator.Offset(), postsPerPage) + } + + +In your view templates: + + {{if .paginator.HasPages}} + + {{end}} + +See also + +http://beego.me/docs/mvc/view/page.md + +*/ +package pagination diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/paginator.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/paginator.go new file mode 100644 index 000000000..c6db31e08 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/paginator.go @@ -0,0 +1,189 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pagination + +import ( + "math" + "net/http" + "net/url" + "strconv" +) + +// Paginator within the state of a http request. +type Paginator struct { + Request *http.Request + PerPageNums int + MaxPages int + + nums int64 + pageRange []int + pageNums int + page int +} + +// PageNums Returns the total number of pages. +func (p *Paginator) PageNums() int { + if p.pageNums != 0 { + return p.pageNums + } + pageNums := math.Ceil(float64(p.nums) / float64(p.PerPageNums)) + if p.MaxPages > 0 { + pageNums = math.Min(pageNums, float64(p.MaxPages)) + } + p.pageNums = int(pageNums) + return p.pageNums +} + +// Nums Returns the total number of items (e.g. from doing SQL count). +func (p *Paginator) Nums() int64 { + return p.nums +} + +// SetNums Sets the total number of items. +func (p *Paginator) SetNums(nums interface{}) { + p.nums, _ = toInt64(nums) +} + +// Page Returns the current page. +func (p *Paginator) Page() int { + if p.page != 0 { + return p.page + } + if p.Request.Form == nil { + p.Request.ParseForm() + } + p.page, _ = strconv.Atoi(p.Request.Form.Get("p")) + if p.page > p.PageNums() { + p.page = p.PageNums() + } + if p.page <= 0 { + p.page = 1 + } + return p.page +} + +// Pages Returns a list of all pages. +// +// Usage (in a view template): +// +// {{range $index, $page := .paginator.Pages}} +// +// {{$page}} +// +// {{end}} +func (p *Paginator) Pages() []int { + if p.pageRange == nil && p.nums > 0 { + var pages []int + pageNums := p.PageNums() + page := p.Page() + switch { + case page >= pageNums-4 && pageNums > 9: + start := pageNums - 9 + 1 + pages = make([]int, 9) + for i := range pages { + pages[i] = start + i + } + case page >= 5 && pageNums > 9: + start := page - 5 + 1 + pages = make([]int, int(math.Min(9, float64(page+4+1)))) + for i := range pages { + pages[i] = start + i + } + default: + pages = make([]int, int(math.Min(9, float64(pageNums)))) + for i := range pages { + pages[i] = i + 1 + } + } + p.pageRange = pages + } + return p.pageRange +} + +// PageLink Returns URL for a given page index. +func (p *Paginator) PageLink(page int) string { + link, _ := url.ParseRequestURI(p.Request.URL.String()) + values := link.Query() + if page == 1 { + values.Del("p") + } else { + values.Set("p", strconv.Itoa(page)) + } + link.RawQuery = values.Encode() + return link.String() +} + +// PageLinkPrev Returns URL to the previous page. +func (p *Paginator) PageLinkPrev() (link string) { + if p.HasPrev() { + link = p.PageLink(p.Page() - 1) + } + return +} + +// PageLinkNext Returns URL to the next page. +func (p *Paginator) PageLinkNext() (link string) { + if p.HasNext() { + link = p.PageLink(p.Page() + 1) + } + return +} + +// PageLinkFirst Returns URL to the first page. +func (p *Paginator) PageLinkFirst() (link string) { + return p.PageLink(1) +} + +// PageLinkLast Returns URL to the last page. +func (p *Paginator) PageLinkLast() (link string) { + return p.PageLink(p.PageNums()) +} + +// HasPrev Returns true if the current page has a predecessor. +func (p *Paginator) HasPrev() bool { + return p.Page() > 1 +} + +// HasNext Returns true if the current page has a successor. +func (p *Paginator) HasNext() bool { + return p.Page() < p.PageNums() +} + +// IsActive Returns true if the given page index points to the current page. +func (p *Paginator) IsActive(page int) bool { + return p.Page() == page +} + +// Offset Returns the current offset. +func (p *Paginator) Offset() int { + return (p.Page() - 1) * p.PerPageNums +} + +// HasPages Returns true if there is more than one page. +func (p *Paginator) HasPages() bool { + return p.PageNums() > 1 +} + +// NewPaginator Instantiates a paginator struct for the current http request. +func NewPaginator(req *http.Request, per int, nums interface{}) *Paginator { + p := Paginator{} + p.Request = req + if per <= 0 { + per = 10 + } + p.PerPageNums = per + p.SetNums(nums) + return &p +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/utils.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/utils.go new file mode 100644 index 000000000..686e68b0d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/pagination/utils.go @@ -0,0 +1,34 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pagination + +import ( + "fmt" + "reflect" +) + +// ToInt64 convert any numeric value to int64 +func toInt64(value interface{}) (d int64, err error) { + val := reflect.ValueOf(value) + switch value.(type) { + case int, int8, int16, int32, int64: + d = val.Int() + case uint, uint8, uint16, uint32, uint64: + d = int64(val.Uint()) + default: + err = fmt.Errorf("ToInt64 need numeric not `%T`", value) + } + return +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/rand.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/rand.go new file mode 100644 index 000000000..74bb41210 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/rand.go @@ -0,0 +1,48 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "crypto/rand" + r "math/rand" + "time" +) + +// RandomCreateBytes generate random []byte by specify chars. +func RandomCreateBytes(n int, alphabets ...byte) []byte { + const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + var randby bool + if num, err := rand.Read(bytes); num != n || err != nil { + r.Seed(time.Now().UnixNano()) + randby = true + } + for i, b := range bytes { + if len(alphabets) == 0 { + if randby { + bytes[i] = alphanum[r.Intn(len(alphanum))] + } else { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + } else { + if randby { + bytes[i] = alphabets[r.Intn(len(alphabets))] + } else { + bytes[i] = alphabets[b%byte(len(alphabets))] + } + } + } + return bytes +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/safemap.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/safemap.go new file mode 100644 index 000000000..2e438f2c3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/safemap.go @@ -0,0 +1,86 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "sync" +) + +// BeeMap is a map with lock +type BeeMap struct { + lock *sync.RWMutex + bm map[interface{}]interface{} +} + +// NewBeeMap return new safemap +func NewBeeMap() *BeeMap { + return &BeeMap{ + lock: new(sync.RWMutex), + bm: make(map[interface{}]interface{}), + } +} + +// Get from maps return the k's value +func (m *BeeMap) Get(k interface{}) interface{} { + m.lock.RLock() + defer m.lock.RUnlock() + if val, ok := m.bm[k]; ok { + return val + } + return nil +} + +// Set Maps the given key and value. Returns false +// if the key is already in the map and changes nothing. +func (m *BeeMap) Set(k interface{}, v interface{}) bool { + m.lock.Lock() + defer m.lock.Unlock() + if val, ok := m.bm[k]; !ok { + m.bm[k] = v + } else if val != v { + m.bm[k] = v + } else { + return false + } + return true +} + +// Check Returns true if k is exist in the map. +func (m *BeeMap) Check(k interface{}) bool { + m.lock.RLock() + defer m.lock.RUnlock() + if _, ok := m.bm[k]; !ok { + return false + } + return true +} + +// Delete the given key and value. +func (m *BeeMap) Delete(k interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + delete(m.bm, k) +} + +// Items returns all items in safemap. +func (m *BeeMap) Items() map[interface{}]interface{} { + m.lock.RLock() + defer m.lock.RUnlock() + r := make(map[interface{}]interface{}) + for k, v := range m.bm { + r[k] = v + } + return r +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/utils/slice.go b/Godeps/_workspace/src/github.com/astaxie/beego/utils/slice.go new file mode 100644 index 000000000..8f2cef980 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/utils/slice.go @@ -0,0 +1,170 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "math/rand" + "time" +) + +type reducetype func(interface{}) interface{} +type filtertype func(interface{}) bool + +// InSlice checks given string in string slice or not. +func InSlice(v string, sl []string) bool { + for _, vv := range sl { + if vv == v { + return true + } + } + return false +} + +// InSliceIface checks given interface in interface slice. +func InSliceIface(v interface{}, sl []interface{}) bool { + for _, vv := range sl { + if vv == v { + return true + } + } + return false +} + +// SliceRandList generate an int slice from min to max. +func SliceRandList(min, max int) []int { + if max < min { + min, max = max, min + } + length := max - min + 1 + t0 := time.Now() + rand.Seed(int64(t0.Nanosecond())) + list := rand.Perm(length) + for index := range list { + list[index] += min + } + return list +} + +// SliceMerge merges interface slices to one slice. +func SliceMerge(slice1, slice2 []interface{}) (c []interface{}) { + c = append(slice1, slice2...) + return +} + +// SliceReduce generates a new slice after parsing every value by reduce function +func SliceReduce(slice []interface{}, a reducetype) (dslice []interface{}) { + for _, v := range slice { + dslice = append(dslice, a(v)) + } + return +} + +// SliceRand returns random one from slice. +func SliceRand(a []interface{}) (b interface{}) { + randnum := rand.Intn(len(a)) + b = a[randnum] + return +} + +// SliceSum sums all values in int64 slice. +func SliceSum(intslice []int64) (sum int64) { + for _, v := range intslice { + sum += v + } + return +} + +// SliceFilter generates a new slice after filter function. +func SliceFilter(slice []interface{}, a filtertype) (ftslice []interface{}) { + for _, v := range slice { + if a(v) { + ftslice = append(ftslice, v) + } + } + return +} + +// SliceDiff returns diff slice of slice1 - slice2. +func SliceDiff(slice1, slice2 []interface{}) (diffslice []interface{}) { + for _, v := range slice1 { + if !InSliceIface(v, slice2) { + diffslice = append(diffslice, v) + } + } + return +} + +// SliceIntersect returns slice that are present in all the slice1 and slice2. +func SliceIntersect(slice1, slice2 []interface{}) (diffslice []interface{}) { + for _, v := range slice1 { + if InSliceIface(v, slice2) { + diffslice = append(diffslice, v) + } + } + return +} + +// SliceChunk separates one slice to some sized slice. +func SliceChunk(slice []interface{}, size int) (chunkslice [][]interface{}) { + if size >= len(slice) { + chunkslice = append(chunkslice, slice) + return + } + end := size + for i := 0; i <= (len(slice) - size); i += size { + chunkslice = append(chunkslice, slice[i:end]) + end += size + } + return +} + +// SliceRange generates a new slice from begin to end with step duration of int64 number. +func SliceRange(start, end, step int64) (intslice []int64) { + for i := start; i <= end; i += step { + intslice = append(intslice, i) + } + return +} + +// SlicePad prepends size number of val into slice. +func SlicePad(slice []interface{}, size int, val interface{}) []interface{} { + if size <= len(slice) { + return slice + } + for i := 0; i < (size - len(slice)); i++ { + slice = append(slice, val) + } + return slice +} + +// SliceUnique cleans repeated values in slice. +func SliceUnique(slice []interface{}) (uniqueslice []interface{}) { + for _, v := range slice { + if !InSliceIface(v, uniqueslice) { + uniqueslice = append(uniqueslice, v) + } + } + return +} + +// SliceShuffle shuffles a slice. +func SliceShuffle(slice []interface{}) []interface{} { + for i := 0; i < len(slice); i++ { + a := rand.Intn(len(slice)) + b := rand.Intn(len(slice)) + slice[a], slice[b] = slice[b], slice[a] + } + return slice +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/validation/README.md b/Godeps/_workspace/src/github.com/astaxie/beego/validation/README.md new file mode 100644 index 000000000..5c3212b0c --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/validation/README.md @@ -0,0 +1,144 @@ +validation +============== + +validation is a form validation for a data validation and error collecting using Go. + +## Installation and tests + +Install: + + go get github.com/astaxie/beego/validation + +Test: + + go test github.com/astaxie/beego/validation + +## Example + +Direct Use: + + import ( + "github.com/astaxie/beego/validation" + "log" + ) + + type User struct { + Name string + Age int + } + + func main() { + u := User{"man", 40} + valid := validation.Validation{} + valid.Required(u.Name, "name") + valid.MaxSize(u.Name, 15, "nameMax") + valid.Range(u.Age, 0, 140, "age") + if valid.HasErrors() { + // validation does not pass + // print invalid message + for _, err := range valid.Errors { + log.Println(err.Key, err.Message) + } + } + // or use like this + if v := valid.Max(u.Age, 140, "ageMax"); !v.Ok { + log.Println(v.Error.Key, v.Error.Message) + } + } + +Struct Tag Use: + + import ( + "github.com/astaxie/beego/validation" + ) + + // validation function follow with "valid" tag + // functions divide with ";" + // parameters in parentheses "()" and divide with "," + // Match function's pattern string must in "//" + type user struct { + Id int + Name string `valid:"Required;Match(/^(test)?\\w*@;com$/)"` + Age int `valid:"Required;Range(1, 140)"` + } + + func main() { + valid := validation.Validation{} + u := user{Name: "test", Age: 40} + b, err := valid.Valid(u) + if err != nil { + // handle error + } + if !b { + // validation does not pass + // blabla... + } + } + +Use custom function: + + import ( + "github.com/astaxie/beego/validation" + ) + + type user struct { + Id int + Name string `valid:"Required;IsMe"` + Age int `valid:"Required;Range(1, 140)"` + } + + func IsMe(v *validation.Validation, obj interface{}, key string) { + name, ok:= obj.(string) + if !ok { + // wrong use case? + return + } + + if name != "me" { + // valid false + v.SetError("Name", "is not me!") + } + } + + func main() { + valid := validation.Validation{} + if err := validation.AddCustomFunc("IsMe", IsMe); err != nil { + // hadle error + } + u := user{Name: "test", Age: 40} + b, err := valid.Valid(u) + if err != nil { + // handle error + } + if !b { + // validation does not pass + // blabla... + } + } + +Struct Tag Functions: + + Required + Min(min int) + Max(max int) + Range(min, max int) + MinSize(min int) + MaxSize(max int) + Length(length int) + Alpha + Numeric + AlphaNumeric + Match(pattern string) + AlphaDash + Email + IP + Base64 + Mobile + Tel + Phone + ZipCode + + +## LICENSE + +BSD License http://creativecommons.org/licenses/BSD/ diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/validation/util.go b/Godeps/_workspace/src/github.com/astaxie/beego/validation/util.go new file mode 100644 index 000000000..9e7460a68 --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/validation/util.go @@ -0,0 +1,268 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validation + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +const ( + // ValidTag struct tag + ValidTag = "valid" +) + +var ( + // key: function name + // value: the number of parameters + funcs = make(Funcs) + + // doesn't belong to validation functions + unFuncs = map[string]bool{ + "Clear": true, + "HasErrors": true, + "ErrorMap": true, + "Error": true, + "apply": true, + "Check": true, + "Valid": true, + "NoMatch": true, + } +) + +func init() { + v := &Validation{} + t := reflect.TypeOf(v) + for i := 0; i < t.NumMethod(); i++ { + m := t.Method(i) + if !unFuncs[m.Name] { + funcs[m.Name] = m.Func + } + } +} + +// CustomFunc is for custom validate function +type CustomFunc func(v *Validation, obj interface{}, key string) + +// AddCustomFunc Add a custom function to validation +// The name can not be: +// Clear +// HasErrors +// ErrorMap +// Error +// Check +// Valid +// NoMatch +// If the name is same with exists function, it will replace the origin valid function +func AddCustomFunc(name string, f CustomFunc) error { + if unFuncs[name] { + return fmt.Errorf("invalid function name: %s", name) + } + + funcs[name] = reflect.ValueOf(f) + return nil +} + +// ValidFunc Valid function type +type ValidFunc struct { + Name string + Params []interface{} +} + +// Funcs Validate function map +type Funcs map[string]reflect.Value + +// Call validate values with named type string +func (f Funcs) Call(name string, params ...interface{}) (result []reflect.Value, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + if _, ok := f[name]; !ok { + err = fmt.Errorf("%s does not exist", name) + return + } + if len(params) != f[name].Type().NumIn() { + err = fmt.Errorf("The number of params is not adapted") + return + } + in := make([]reflect.Value, len(params)) + for k, param := range params { + in[k] = reflect.ValueOf(param) + } + result = f[name].Call(in) + return +} + +func isStruct(t reflect.Type) bool { + return t.Kind() == reflect.Struct +} + +func isStructPtr(t reflect.Type) bool { + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +func getValidFuncs(f reflect.StructField) (vfs []ValidFunc, err error) { + tag := f.Tag.Get(ValidTag) + if len(tag) == 0 { + return + } + if vfs, tag, err = getRegFuncs(tag, f.Name); err != nil { + return + } + fs := strings.Split(tag, ";") + for _, vfunc := range fs { + var vf ValidFunc + if len(vfunc) == 0 { + continue + } + vf, err = parseFunc(vfunc, f.Name) + if err != nil { + return + } + vfs = append(vfs, vf) + } + return +} + +// Get Match function +// May be get NoMatch function in the future +func getRegFuncs(tag, key string) (vfs []ValidFunc, str string, err error) { + tag = strings.TrimSpace(tag) + index := strings.Index(tag, "Match(/") + if index == -1 { + str = tag + return + } + end := strings.LastIndex(tag, "/)") + if end < index { + err = fmt.Errorf("invalid Match function") + return + } + reg, err := regexp.Compile(tag[index+len("Match(/") : end]) + if err != nil { + return + } + vfs = []ValidFunc{{"Match", []interface{}{reg, key + ".Match"}}} + str = strings.TrimSpace(tag[:index]) + strings.TrimSpace(tag[end+len("/)"):]) + return +} + +func parseFunc(vfunc, key string) (v ValidFunc, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + + vfunc = strings.TrimSpace(vfunc) + start := strings.Index(vfunc, "(") + var num int + + // doesn't need parameter valid function + if start == -1 { + if num, err = numIn(vfunc); err != nil { + return + } + if num != 0 { + err = fmt.Errorf("%s require %d parameters", vfunc, num) + return + } + v = ValidFunc{vfunc, []interface{}{key + "." + vfunc}} + return + } + + end := strings.Index(vfunc, ")") + if end == -1 { + err = fmt.Errorf("invalid valid function") + return + } + + name := strings.TrimSpace(vfunc[:start]) + if num, err = numIn(name); err != nil { + return + } + + params := strings.Split(vfunc[start+1:end], ",") + // the num of param must be equal + if num != len(params) { + err = fmt.Errorf("%s require %d parameters", name, num) + return + } + + tParams, err := trim(name, key+"."+name, params) + if err != nil { + return + } + v = ValidFunc{name, tParams} + return +} + +func numIn(name string) (num int, err error) { + fn, ok := funcs[name] + if !ok { + err = fmt.Errorf("doesn't exsits %s valid function", name) + return + } + // sub *Validation obj and key + num = fn.Type().NumIn() - 3 + return +} + +func trim(name, key string, s []string) (ts []interface{}, err error) { + ts = make([]interface{}, len(s), len(s)+1) + fn, ok := funcs[name] + if !ok { + err = fmt.Errorf("doesn't exsits %s valid function", name) + return + } + for i := 0; i < len(s); i++ { + var param interface{} + // skip *Validation and obj params + if param, err = parseParam(fn.Type().In(i+2), strings.TrimSpace(s[i])); err != nil { + return + } + ts[i] = param + } + ts = append(ts, key) + return +} + +// modify the parameters's type to adapt the function input parameters' type +func parseParam(t reflect.Type, s string) (i interface{}, err error) { + switch t.Kind() { + case reflect.Int: + i, err = strconv.Atoi(s) + case reflect.String: + i = s + case reflect.Ptr: + if t.Elem().String() != "regexp.Regexp" { + err = fmt.Errorf("does not support %s", t.Elem().String()) + return + } + i, err = regexp.Compile(s) + default: + err = fmt.Errorf("does not support %s", t.Kind().String()) + } + return +} + +func mergeParam(v *Validation, obj interface{}, params []interface{}) []interface{} { + return append([]interface{}{v, obj}, params...) +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/validation/validation.go b/Godeps/_workspace/src/github.com/astaxie/beego/validation/validation.go new file mode 100644 index 000000000..2b020aa8d --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/validation/validation.go @@ -0,0 +1,374 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package validation for validations +// +// import ( +// "github.com/astaxie/beego/validation" +// "log" +// ) +// +// type User struct { +// Name string +// Age int +// } +// +// func main() { +// u := User{"man", 40} +// valid := validation.Validation{} +// valid.Required(u.Name, "name") +// valid.MaxSize(u.Name, 15, "nameMax") +// valid.Range(u.Age, 0, 140, "age") +// if valid.HasErrors() { +// // validation does not pass +// // print invalid message +// for _, err := range valid.Errors { +// log.Println(err.Key, err.Message) +// } +// } +// // or use like this +// if v := valid.Max(u.Age, 140, "ageMax"); !v.Ok { +// log.Println(v.Error.Key, v.Error.Message) +// } +// } +// +// more info: http://beego.me/docs/mvc/controller/validation.md +package validation + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// ValidFormer valid interface +type ValidFormer interface { + Valid(*Validation) +} + +// Error show the error +type Error struct { + Message, Key, Name, Field, Tmpl string + Value interface{} + LimitValue interface{} +} + +// String Returns the Message. +func (e *Error) String() string { + if e == nil { + return "" + } + return e.Message +} + +// Result is returned from every validation method. +// It provides an indication of success, and a pointer to the Error (if any). +type Result struct { + Error *Error + Ok bool +} + +// Key Get Result by given key string. +func (r *Result) Key(key string) *Result { + if r.Error != nil { + r.Error.Key = key + } + return r +} + +// Message Set Result message by string or format string with args +func (r *Result) Message(message string, args ...interface{}) *Result { + if r.Error != nil { + if len(args) == 0 { + r.Error.Message = message + } else { + r.Error.Message = fmt.Sprintf(message, args...) + } + } + return r +} + +// A Validation context manages data validation and error messages. +type Validation struct { + Errors []*Error + ErrorsMap map[string]*Error +} + +// Clear Clean all ValidationError. +func (v *Validation) Clear() { + v.Errors = []*Error{} + v.ErrorsMap = nil +} + +// HasErrors Has ValidationError nor not. +func (v *Validation) HasErrors() bool { + return len(v.Errors) > 0 +} + +// ErrorMap Return the errors mapped by key. +// If there are multiple validation errors associated with a single key, the +// first one "wins". (Typically the first validation will be the more basic). +func (v *Validation) ErrorMap() map[string]*Error { + return v.ErrorsMap +} + +// Error Add an error to the validation context. +func (v *Validation) Error(message string, args ...interface{}) *Result { + result := (&Result{ + Ok: false, + Error: &Error{}, + }).Message(message, args...) + v.Errors = append(v.Errors, result.Error) + return result +} + +// Required Test that the argument is non-nil and non-empty (if string or list) +func (v *Validation) Required(obj interface{}, key string) *Result { + return v.apply(Required{key}, obj) +} + +// Min Test that the obj is greater than min if obj's type is int +func (v *Validation) Min(obj interface{}, min int, key string) *Result { + return v.apply(Min{min, key}, obj) +} + +// Max Test that the obj is less than max if obj's type is int +func (v *Validation) Max(obj interface{}, max int, key string) *Result { + return v.apply(Max{max, key}, obj) +} + +// Range Test that the obj is between mni and max if obj's type is int +func (v *Validation) Range(obj interface{}, min, max int, key string) *Result { + return v.apply(Range{Min{Min: min}, Max{Max: max}, key}, obj) +} + +// MinSize Test that the obj is longer than min size if type is string or slice +func (v *Validation) MinSize(obj interface{}, min int, key string) *Result { + return v.apply(MinSize{min, key}, obj) +} + +// MaxSize Test that the obj is shorter than max size if type is string or slice +func (v *Validation) MaxSize(obj interface{}, max int, key string) *Result { + return v.apply(MaxSize{max, key}, obj) +} + +// Length Test that the obj is same length to n if type is string or slice +func (v *Validation) Length(obj interface{}, n int, key string) *Result { + return v.apply(Length{n, key}, obj) +} + +// Alpha Test that the obj is [a-zA-Z] if type is string +func (v *Validation) Alpha(obj interface{}, key string) *Result { + return v.apply(Alpha{key}, obj) +} + +// Numeric Test that the obj is [0-9] if type is string +func (v *Validation) Numeric(obj interface{}, key string) *Result { + return v.apply(Numeric{key}, obj) +} + +// AlphaNumeric Test that the obj is [0-9a-zA-Z] if type is string +func (v *Validation) AlphaNumeric(obj interface{}, key string) *Result { + return v.apply(AlphaNumeric{key}, obj) +} + +// Match Test that the obj matches regexp if type is string +func (v *Validation) Match(obj interface{}, regex *regexp.Regexp, key string) *Result { + return v.apply(Match{regex, key}, obj) +} + +// NoMatch Test that the obj doesn't match regexp if type is string +func (v *Validation) NoMatch(obj interface{}, regex *regexp.Regexp, key string) *Result { + return v.apply(NoMatch{Match{Regexp: regex}, key}, obj) +} + +// AlphaDash Test that the obj is [0-9a-zA-Z_-] if type is string +func (v *Validation) AlphaDash(obj interface{}, key string) *Result { + return v.apply(AlphaDash{NoMatch{Match: Match{Regexp: alphaDashPattern}}, key}, obj) +} + +// Email Test that the obj is email address if type is string +func (v *Validation) Email(obj interface{}, key string) *Result { + return v.apply(Email{Match{Regexp: emailPattern}, key}, obj) +} + +// IP Test that the obj is IP address if type is string +func (v *Validation) IP(obj interface{}, key string) *Result { + return v.apply(IP{Match{Regexp: ipPattern}, key}, obj) +} + +// Base64 Test that the obj is base64 encoded if type is string +func (v *Validation) Base64(obj interface{}, key string) *Result { + return v.apply(Base64{Match{Regexp: base64Pattern}, key}, obj) +} + +// Mobile Test that the obj is chinese mobile number if type is string +func (v *Validation) Mobile(obj interface{}, key string) *Result { + return v.apply(Mobile{Match{Regexp: mobilePattern}, key}, obj) +} + +// Tel Test that the obj is chinese telephone number if type is string +func (v *Validation) Tel(obj interface{}, key string) *Result { + return v.apply(Tel{Match{Regexp: telPattern}, key}, obj) +} + +// Phone Test that the obj is chinese mobile or telephone number if type is string +func (v *Validation) Phone(obj interface{}, key string) *Result { + return v.apply(Phone{Mobile{Match: Match{Regexp: mobilePattern}}, + Tel{Match: Match{Regexp: telPattern}}, key}, obj) +} + +// ZipCode Test that the obj is chinese zip code if type is string +func (v *Validation) ZipCode(obj interface{}, key string) *Result { + return v.apply(ZipCode{Match{Regexp: zipCodePattern}, key}, obj) +} + +func (v *Validation) apply(chk Validator, obj interface{}) *Result { + if chk.IsSatisfied(obj) { + return &Result{Ok: true} + } + + // Add the error to the validation context. + key := chk.GetKey() + Name := key + Field := "" + + parts := strings.Split(key, ".") + if len(parts) == 2 { + Field = parts[0] + Name = parts[1] + } + + err := &Error{ + Message: chk.DefaultMessage(), + Key: key, + Name: Name, + Field: Field, + Value: obj, + Tmpl: MessageTmpls[Name], + LimitValue: chk.GetLimitValue(), + } + v.setError(err) + + // Also return it in the result. + return &Result{ + Ok: false, + Error: err, + } +} + +func (v *Validation) setError(err *Error) { + v.Errors = append(v.Errors, err) + if v.ErrorsMap == nil { + v.ErrorsMap = make(map[string]*Error) + } + if _, ok := v.ErrorsMap[err.Field]; !ok { + v.ErrorsMap[err.Field] = err + } +} + +// SetError Set error message for one field in ValidationError +func (v *Validation) SetError(fieldName string, errMsg string) *Error { + err := &Error{Key: fieldName, Field: fieldName, Tmpl: errMsg, Message: errMsg} + v.setError(err) + return err +} + +// Check Apply a group of validators to a field, in order, and return the +// ValidationResult from the first one that fails, or the last one that +// succeeds. +func (v *Validation) Check(obj interface{}, checks ...Validator) *Result { + var result *Result + for _, check := range checks { + result = v.apply(check, obj) + if !result.Ok { + return result + } + } + return result +} + +// Valid Validate a struct. +// the obj parameter must be a struct or a struct pointer +func (v *Validation) Valid(obj interface{}) (b bool, err error) { + objT := reflect.TypeOf(obj) + objV := reflect.ValueOf(obj) + switch { + case isStruct(objT): + case isStructPtr(objT): + objT = objT.Elem() + objV = objV.Elem() + default: + err = fmt.Errorf("%v must be a struct or a struct pointer", obj) + return + } + + for i := 0; i < objT.NumField(); i++ { + var vfs []ValidFunc + if vfs, err = getValidFuncs(objT.Field(i)); err != nil { + return + } + for _, vf := range vfs { + if _, err = funcs.Call(vf.Name, + mergeParam(v, objV.Field(i).Interface(), vf.Params)...); err != nil { + return + } + } + } + + if !v.HasErrors() { + if form, ok := obj.(ValidFormer); ok { + form.Valid(v) + } + } + + return !v.HasErrors(), nil +} + +// RecursiveValid Recursively validate a struct. +// Step1: Validate by v.Valid +// Step2: If pass on step1, then reflect obj's fields +// Step3: Do the Recursively validation to all struct or struct pointer fields +func (v *Validation) RecursiveValid(objc interface{}) (bool, error) { + //Step 1: validate obj itself firstly + // fails if objc is not struct + pass, err := v.Valid(objc) + if err != nil || false == pass { + return pass, err // Stop recursive validation + } + // Step 2: Validate struct's struct fields + objT := reflect.TypeOf(objc) + objV := reflect.ValueOf(objc) + + if isStructPtr(objT) { + objT = objT.Elem() + objV = objV.Elem() + } + + for i := 0; i < objT.NumField(); i++ { + + t := objT.Field(i).Type + + // Recursive applies to struct or pointer to structs fields + if isStruct(t) || isStructPtr(t) { + // Step 3: do the recursive validation + // Only valid the Public field recursively + if objV.Field(i).CanInterface() { + pass, err = v.RecursiveValid(objV.Field(i).Interface()) + } + } + } + return pass, err +} diff --git a/Godeps/_workspace/src/github.com/astaxie/beego/validation/validators.go b/Godeps/_workspace/src/github.com/astaxie/beego/validation/validators.go new file mode 100644 index 000000000..ebff2191a --- /dev/null +++ b/Godeps/_workspace/src/github.com/astaxie/beego/validation/validators.go @@ -0,0 +1,687 @@ +// Copyright 2014 beego Author. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package validation + +import ( + "fmt" + "reflect" + "regexp" + "time" + "unicode/utf8" +) + +// MessageTmpls store commond validate template +var MessageTmpls = map[string]string{ + "Required": "Can not be empty", + "Min": "Minimum is %d", + "Max": "Maximum is %d", + "Range": "Range is %d to %d", + "MinSize": "Minimum size is %d", + "MaxSize": "Maximum size is %d", + "Length": "Required length is %d", + "Alpha": "Must be valid alpha characters", + "Numeric": "Must be valid numeric characters", + "AlphaNumeric": "Must be valid alpha or numeric characters", + "Match": "Must match %s", + "NoMatch": "Must not match %s", + "AlphaDash": "Must be valid alpha or numeric or dash(-_) characters", + "Email": "Must be a valid email address", + "IP": "Must be a valid ip address", + "Base64": "Must be valid base64 characters", + "Mobile": "Must be valid mobile number", + "Tel": "Must be valid telephone number", + "Phone": "Must be valid telephone or mobile phone number", + "ZipCode": "Must be valid zipcode", +} + +// SetDefaultMessage set default messages +// if not set, the default messages are +// "Required": "Can not be empty", +// "Min": "Minimum is %d", +// "Max": "Maximum is %d", +// "Range": "Range is %d to %d", +// "MinSize": "Minimum size is %d", +// "MaxSize": "Maximum size is %d", +// "Length": "Required length is %d", +// "Alpha": "Must be valid alpha characters", +// "Numeric": "Must be valid numeric characters", +// "AlphaNumeric": "Must be valid alpha or numeric characters", +// "Match": "Must match %s", +// "NoMatch": "Must not match %s", +// "AlphaDash": "Must be valid alpha or numeric or dash(-_) characters", +// "Email": "Must be a valid email address", +// "IP": "Must be a valid ip address", +// "Base64": "Must be valid base64 characters", +// "Mobile": "Must be valid mobile number", +// "Tel": "Must be valid telephone number", +// "Phone": "Must be valid telephone or mobile phone number", +// "ZipCode": "Must be valid zipcode", +func SetDefaultMessage(msg map[string]string) { + if len(msg) == 0 { + return + } + + for name := range msg { + MessageTmpls[name] = msg[name] + } +} + +// Validator interface +type Validator interface { + IsSatisfied(interface{}) bool + DefaultMessage() string + GetKey() string + GetLimitValue() interface{} +} + +// Required struct +type Required struct { + Key string +} + +// IsSatisfied judge whether obj has value +func (r Required) IsSatisfied(obj interface{}) bool { + if obj == nil { + return false + } + + if str, ok := obj.(string); ok { + return len(str) > 0 + } + if _, ok := obj.(bool); ok { + return true + } + if i, ok := obj.(int); ok { + return i != 0 + } + if i, ok := obj.(uint); ok { + return i != 0 + } + if i, ok := obj.(int8); ok { + return i != 0 + } + if i, ok := obj.(uint8); ok { + return i != 0 + } + if i, ok := obj.(int16); ok { + return i != 0 + } + if i, ok := obj.(uint16); ok { + return i != 0 + } + if i, ok := obj.(uint32); ok { + return i != 0 + } + if i, ok := obj.(int32); ok { + return i != 0 + } + if i, ok := obj.(int64); ok { + return i != 0 + } + if i, ok := obj.(uint64); ok { + return i != 0 + } + if t, ok := obj.(time.Time); ok { + return !t.IsZero() + } + v := reflect.ValueOf(obj) + if v.Kind() == reflect.Slice { + return v.Len() > 0 + } + return true +} + +// DefaultMessage return the default error message +func (r Required) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Required"]) +} + +// GetKey return the r.Key +func (r Required) GetKey() string { + return r.Key +} + +// GetLimitValue return nil now +func (r Required) GetLimitValue() interface{} { + return nil +} + +// Min check struct +type Min struct { + Min int + Key string +} + +// IsSatisfied judge whether obj is valid +func (m Min) IsSatisfied(obj interface{}) bool { + num, ok := obj.(int) + if ok { + return num >= m.Min + } + return false +} + +// DefaultMessage return the default min error message +func (m Min) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["Min"], m.Min) +} + +// GetKey return the m.Key +func (m Min) GetKey() string { + return m.Key +} + +// GetLimitValue return the limit value, Min +func (m Min) GetLimitValue() interface{} { + return m.Min +} + +// Max validate struct +type Max struct { + Max int + Key string +} + +// IsSatisfied judge whether obj is valid +func (m Max) IsSatisfied(obj interface{}) bool { + num, ok := obj.(int) + if ok { + return num <= m.Max + } + return false +} + +// DefaultMessage return the default max error message +func (m Max) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["Max"], m.Max) +} + +// GetKey return the m.Key +func (m Max) GetKey() string { + return m.Key +} + +// GetLimitValue return the limit value, Max +func (m Max) GetLimitValue() interface{} { + return m.Max +} + +// Range Requires an integer to be within Min, Max inclusive. +type Range struct { + Min + Max + Key string +} + +// IsSatisfied judge whether obj is valid +func (r Range) IsSatisfied(obj interface{}) bool { + return r.Min.IsSatisfied(obj) && r.Max.IsSatisfied(obj) +} + +// DefaultMessage return the default Range error message +func (r Range) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["Range"], r.Min.Min, r.Max.Max) +} + +// GetKey return the m.Key +func (r Range) GetKey() string { + return r.Key +} + +// GetLimitValue return the limit value, Max +func (r Range) GetLimitValue() interface{} { + return []int{r.Min.Min, r.Max.Max} +} + +// MinSize Requires an array or string to be at least a given length. +type MinSize struct { + Min int + Key string +} + +// IsSatisfied judge whether obj is valid +func (m MinSize) IsSatisfied(obj interface{}) bool { + if str, ok := obj.(string); ok { + return utf8.RuneCountInString(str) >= m.Min + } + v := reflect.ValueOf(obj) + if v.Kind() == reflect.Slice { + return v.Len() >= m.Min + } + return false +} + +// DefaultMessage return the default MinSize error message +func (m MinSize) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["MinSize"], m.Min) +} + +// GetKey return the m.Key +func (m MinSize) GetKey() string { + return m.Key +} + +// GetLimitValue return the limit value +func (m MinSize) GetLimitValue() interface{} { + return m.Min +} + +// MaxSize Requires an array or string to be at most a given length. +type MaxSize struct { + Max int + Key string +} + +// IsSatisfied judge whether obj is valid +func (m MaxSize) IsSatisfied(obj interface{}) bool { + if str, ok := obj.(string); ok { + return utf8.RuneCountInString(str) <= m.Max + } + v := reflect.ValueOf(obj) + if v.Kind() == reflect.Slice { + return v.Len() <= m.Max + } + return false +} + +// DefaultMessage return the default MaxSize error message +func (m MaxSize) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["MaxSize"], m.Max) +} + +// GetKey return the m.Key +func (m MaxSize) GetKey() string { + return m.Key +} + +// GetLimitValue return the limit value +func (m MaxSize) GetLimitValue() interface{} { + return m.Max +} + +// Length Requires an array or string to be exactly a given length. +type Length struct { + N int + Key string +} + +// IsSatisfied judge whether obj is valid +func (l Length) IsSatisfied(obj interface{}) bool { + if str, ok := obj.(string); ok { + return utf8.RuneCountInString(str) == l.N + } + v := reflect.ValueOf(obj) + if v.Kind() == reflect.Slice { + return v.Len() == l.N + } + return false +} + +// DefaultMessage return the default Length error message +func (l Length) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["Length"], l.N) +} + +// GetKey return the m.Key +func (l Length) GetKey() string { + return l.Key +} + +// GetLimitValue return the limit value +func (l Length) GetLimitValue() interface{} { + return l.N +} + +// Alpha check the alpha +type Alpha struct { + Key string +} + +// IsSatisfied judge whether obj is valid +func (a Alpha) IsSatisfied(obj interface{}) bool { + if str, ok := obj.(string); ok { + for _, v := range str { + if ('Z' < v || v < 'A') && ('z' < v || v < 'a') { + return false + } + } + return true + } + return false +} + +// DefaultMessage return the default Length error message +func (a Alpha) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Alpha"]) +} + +// GetKey return the m.Key +func (a Alpha) GetKey() string { + return a.Key +} + +// GetLimitValue return the limit value +func (a Alpha) GetLimitValue() interface{} { + return nil +} + +// Numeric check number +type Numeric struct { + Key string +} + +// IsSatisfied judge whether obj is valid +func (n Numeric) IsSatisfied(obj interface{}) bool { + if str, ok := obj.(string); ok { + for _, v := range str { + if '9' < v || v < '0' { + return false + } + } + return true + } + return false +} + +// DefaultMessage return the default Length error message +func (n Numeric) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Numeric"]) +} + +// GetKey return the n.Key +func (n Numeric) GetKey() string { + return n.Key +} + +// GetLimitValue return the limit value +func (n Numeric) GetLimitValue() interface{} { + return nil +} + +// AlphaNumeric check alpha and number +type AlphaNumeric struct { + Key string +} + +// IsSatisfied judge whether obj is valid +func (a AlphaNumeric) IsSatisfied(obj interface{}) bool { + if str, ok := obj.(string); ok { + for _, v := range str { + if ('Z' < v || v < 'A') && ('z' < v || v < 'a') && ('9' < v || v < '0') { + return false + } + } + return true + } + return false +} + +// DefaultMessage return the default Length error message +func (a AlphaNumeric) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["AlphaNumeric"]) +} + +// GetKey return the a.Key +func (a AlphaNumeric) GetKey() string { + return a.Key +} + +// GetLimitValue return the limit value +func (a AlphaNumeric) GetLimitValue() interface{} { + return nil +} + +// Match Requires a string to match a given regex. +type Match struct { + Regexp *regexp.Regexp + Key string +} + +// IsSatisfied judge whether obj is valid +func (m Match) IsSatisfied(obj interface{}) bool { + return m.Regexp.MatchString(fmt.Sprintf("%v", obj)) +} + +// DefaultMessage return the default Match error message +func (m Match) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["Match"], m.Regexp.String()) +} + +// GetKey return the m.Key +func (m Match) GetKey() string { + return m.Key +} + +// GetLimitValue return the limit value +func (m Match) GetLimitValue() interface{} { + return m.Regexp.String() +} + +// NoMatch Requires a string to not match a given regex. +type NoMatch struct { + Match + Key string +} + +// IsSatisfied judge whether obj is valid +func (n NoMatch) IsSatisfied(obj interface{}) bool { + return !n.Match.IsSatisfied(obj) +} + +// DefaultMessage return the default NoMatch error message +func (n NoMatch) DefaultMessage() string { + return fmt.Sprintf(MessageTmpls["NoMatch"], n.Regexp.String()) +} + +// GetKey return the n.Key +func (n NoMatch) GetKey() string { + return n.Key +} + +// GetLimitValue return the limit value +func (n NoMatch) GetLimitValue() interface{} { + return n.Regexp.String() +} + +var alphaDashPattern = regexp.MustCompile("[^\\d\\w-_]") + +// AlphaDash check not Alpha +type AlphaDash struct { + NoMatch + Key string +} + +// DefaultMessage return the default AlphaDash error message +func (a AlphaDash) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["AlphaDash"]) +} + +// GetKey return the n.Key +func (a AlphaDash) GetKey() string { + return a.Key +} + +// GetLimitValue return the limit value +func (a AlphaDash) GetLimitValue() interface{} { + return nil +} + +var emailPattern = regexp.MustCompile("[\\w!#$%&'*+/=?^_`{|}~-]+(?:\\.[\\w!#$%&'*+/=?^_`{|}~-]+)*@(?:[\\w](?:[\\w-]*[\\w])?\\.)+[a-zA-Z0-9](?:[\\w-]*[\\w])?") + +// Email check struct +type Email struct { + Match + Key string +} + +// DefaultMessage return the default Email error message +func (e Email) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Email"]) +} + +// GetKey return the n.Key +func (e Email) GetKey() string { + return e.Key +} + +// GetLimitValue return the limit value +func (e Email) GetLimitValue() interface{} { + return nil +} + +var ipPattern = regexp.MustCompile("^((2[0-4]\\d|25[0-5]|[01]?\\d\\d?)\\.){3}(2[0-4]\\d|25[0-5]|[01]?\\d\\d?)$") + +// IP check struct +type IP struct { + Match + Key string +} + +// DefaultMessage return the default IP error message +func (i IP) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["IP"]) +} + +// GetKey return the i.Key +func (i IP) GetKey() string { + return i.Key +} + +// GetLimitValue return the limit value +func (i IP) GetLimitValue() interface{} { + return nil +} + +var base64Pattern = regexp.MustCompile("^(?:[A-Za-z0-99+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?$") + +// Base64 check struct +type Base64 struct { + Match + Key string +} + +// DefaultMessage return the default Base64 error message +func (b Base64) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Base64"]) +} + +// GetKey return the b.Key +func (b Base64) GetKey() string { + return b.Key +} + +// GetLimitValue return the limit value +func (b Base64) GetLimitValue() interface{} { + return nil +} + +// just for chinese mobile phone number +var mobilePattern = regexp.MustCompile("^((\\+86)|(86))?(1(([35][0-9])|[8][0-9]|[7][0679]|[4][579]))\\d{8}$") + +// Mobile check struct +type Mobile struct { + Match + Key string +} + +// DefaultMessage return the default Mobile error message +func (m Mobile) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Mobile"]) +} + +// GetKey return the m.Key +func (m Mobile) GetKey() string { + return m.Key +} + +// GetLimitValue return the limit value +func (m Mobile) GetLimitValue() interface{} { + return nil +} + +// just for chinese telephone number +var telPattern = regexp.MustCompile("^(0\\d{2,3}(\\-)?)?\\d{7,8}$") + +// Tel check telephone struct +type Tel struct { + Match + Key string +} + +// DefaultMessage return the default Tel error message +func (t Tel) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Tel"]) +} + +// GetKey return the t.Key +func (t Tel) GetKey() string { + return t.Key +} + +// GetLimitValue return the limit value +func (t Tel) GetLimitValue() interface{} { + return nil +} + +// Phone just for chinese telephone or mobile phone number +type Phone struct { + Mobile + Tel + Key string +} + +// IsSatisfied judge whether obj is valid +func (p Phone) IsSatisfied(obj interface{}) bool { + return p.Mobile.IsSatisfied(obj) || p.Tel.IsSatisfied(obj) +} + +// DefaultMessage return the default Phone error message +func (p Phone) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["Phone"]) +} + +// GetKey return the p.Key +func (p Phone) GetKey() string { + return p.Key +} + +// GetLimitValue return the limit value +func (p Phone) GetLimitValue() interface{} { + return nil +} + +// just for chinese zipcode +var zipCodePattern = regexp.MustCompile("^[1-9]\\d{5}$") + +// ZipCode check the zip struct +type ZipCode struct { + Match + Key string +} + +// DefaultMessage return the default Zip error message +func (z ZipCode) DefaultMessage() string { + return fmt.Sprint(MessageTmpls["ZipCode"]) +} + +// GetKey return the z.Key +func (z ZipCode) GetKey() string { + return z.Key +} + +// GetLimitValue return the limit value +func (z ZipCode) GetLimitValue() interface{} { + return nil +} diff --git a/Godeps/_workspace/src/github.com/beego/i18n/.gitignore b/Godeps/_workspace/src/github.com/beego/i18n/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/Godeps/_workspace/src/github.com/beego/i18n/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/Godeps/_workspace/src/github.com/beego/i18n/LICENSE b/Godeps/_workspace/src/github.com/beego/i18n/LICENSE new file mode 100644 index 000000000..8405e89a0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/beego/i18n/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/beego/i18n/README.md b/Godeps/_workspace/src/github.com/beego/i18n/README.md new file mode 100644 index 000000000..b1cf49670 --- /dev/null +++ b/Godeps/_workspace/src/github.com/beego/i18n/README.md @@ -0,0 +1,6 @@ +i18n +==== + +Package i18n is for app Internationalization and Localization. + +[Documentation](http://beego.me/docs/module/i18n.md) \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/beego/i18n/beei18n/beei18n.go b/Godeps/_workspace/src/github.com/beego/i18n/beei18n/beei18n.go new file mode 100644 index 000000000..487f792fc --- /dev/null +++ b/Godeps/_workspace/src/github.com/beego/i18n/beei18n/beei18n.go @@ -0,0 +1,166 @@ +// Copyright 2013-2014 beego authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// beei18n is a helper tool for beego/i18n package. +package main + +import ( + "flag" + "fmt" + "html/template" + "io" + "log" + "os" + "strings" +) + +type Command struct { + // Run runs the command. + // The args are the arguments after the command name. + Run func(cmd *Command, args []string) + + // UsageLine is the one-line usage message. + // The first word in the line is taken to be the command name. + UsageLine string + + // Short is the short description shown in the 'go help' output. + Short string + + // Long is the long message shown in the 'go help ' output. + Long string + + // Flag is a set of flags specific to this command. + Flag flag.FlagSet + + // CustomFlags indicates that the command will do its own + // flag parsing. + CustomFlags bool +} + +// Name returns the command's name: the first word in the usage line. +func (c *Command) Name() string { + name := c.UsageLine + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +func (c *Command) Usage() { + fmt.Fprintf(os.Stderr, "usage: %s\n\n", c.UsageLine) + fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(c.Long)) + os.Exit(2) +} + +// Runnable reports whether the command can be run; otherwise +// it is a documentation pseudo-command such as importpath. +func (c *Command) Runnable() bool { + return c.Run != nil +} + +var commands = []*Command{ + cmdSync, +} + +func main() { + flag.Usage = usage + flag.Parse() + log.SetFlags(0) + + args := flag.Args() + if len(args) < 1 { + usage() + } + + if args[0] == "help" { + help(args[1:]) + return + } + + for _, cmd := range commands { + if cmd.Name() == args[0] && cmd.Run != nil { + cmd.Flag.Usage = func() { cmd.Usage() } + if cmd.CustomFlags { + args = args[1:] + } else { + cmd.Flag.Parse(args[1:]) + args = cmd.Flag.Args() + } + cmd.Run(cmd, args) + os.Exit(2) + return + } + } + + fmt.Fprintf(os.Stderr, "beei18n: unknown subcommand %q\nRun 'bee help' for usage.\n", args[0]) + os.Exit(2) +} + +var usageTemplate = `beei18n is a helper tool for beego/i18n package. + +Usage: + + beei18n command [arguments] + +The commands are: +{{range .}}{{if .Runnable}} + {{.Name | printf "%-11s"}} {{.Short}}{{end}}{{end}} + +Use "beei18n help [command]" for more information about a command. +` + +var helpTemplate = `{{if .Runnable}}usage: beei18n {{.UsageLine}} + +{{end}}{{.Long | trim}} +` + +func usage() { + tmpl(os.Stdout, usageTemplate, commands) + os.Exit(2) +} + +func tmpl(w io.Writer, text string, data interface{}) { + t := template.New("top") + t.Funcs(template.FuncMap{"trim": strings.TrimSpace}) + template.Must(t.Parse(text)) + if err := t.Execute(w, data); err != nil { + panic(err) + } +} + +func help(args []string) { + if len(args) == 0 { + usage() + // not exit 2: succeeded at 'go help'. + return + } + if len(args) != 1 { + fmt.Fprintf(os.Stdout, "usage: beei18n help command\n\nToo many arguments given.\n") + os.Exit(2) // failed at 'bee help' + } + + arg := args[0] + + for _, cmd := range commands { + if cmd.Name() == arg { + tmpl(os.Stdout, helpTemplate, cmd) + // not exit 2: succeeded at 'go help cmd'. + return + } + } + + fmt.Fprintf(os.Stdout, "Unknown help topic %#q. Run 'beei18n help'.\n", arg) + os.Exit(2) // failed at 'bee help cmd' +} diff --git a/Godeps/_workspace/src/github.com/beego/i18n/beei18n/sync.go b/Godeps/_workspace/src/github.com/beego/i18n/beei18n/sync.go new file mode 100644 index 000000000..324444bb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/beego/i18n/beei18n/sync.go @@ -0,0 +1,86 @@ +// Copyright 2013-2014 beego authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package main + +import ( + "log" + "os" + + "github.com/Unknwon/goconfig" +) + +var cmdSync = &Command{ + UsageLine: "sync [source file] [target files]", + Short: "sync keys for locale files", + Long: `to quickly sync keys for one or more locale files +based on the one you already have +`, +} + +func init() { + cmdSync.Run = syncLocales +} + +func syncLocales(cmd *Command, args []string) { + switch len(args) { + case 0: + log.Fatalln("No source locale file is specified") + case 1: + log.Fatalln("No target locale file is specified") + } + + srcLocale, err := goconfig.LoadConfigFile(args[0]) + if err != nil { + log.Fatalln(err) + } + + // Load or create target locales. + targets := args[1:] + targetLocales := make([]*goconfig.ConfigFile, len(targets)) + for i, target := range targets { + if !isExist(target) { + os.Create(target) + } + + targetLocales[i], err = goconfig.LoadConfigFile(target) + if err != nil { + log.Fatalln(err) + } + } + + for _, secName := range srcLocale.GetSectionList() { + keyList := srcLocale.GetKeyList(secName) + for _, target := range targetLocales { + for _, k := range keyList { + if _, err = target.GetValue(secName, k); err != nil { + target.SetValue(secName, k, "") + } + } + } + } + + for i, target := range targetLocales { + err = goconfig.SaveConfigFile(target, targets[i]) + if err != nil { + log.Fatalln(err) + } + } +} + +// IsExist returns whether a file or directory exists. +func isExist(path string) bool { + _, err := os.Stat(path) + return err == nil || os.IsExist(err) +} diff --git a/Godeps/_workspace/src/github.com/beego/i18n/i18n.go b/Godeps/_workspace/src/github.com/beego/i18n/i18n.go new file mode 100644 index 000000000..c3653e35e --- /dev/null +++ b/Godeps/_workspace/src/github.com/beego/i18n/i18n.go @@ -0,0 +1,212 @@ +// Copyright 2013-2014 beego authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package i18n is for app Internationalization and Localization. +package i18n + +import ( + "fmt" + "reflect" + "strings" + + "github.com/Unknwon/goconfig" +) + +var ( + locales = &localeStore{store: make(map[string]*locale)} +) + +type locale struct { + id int + lang string + langDesc string + message *goconfig.ConfigFile +} + +type localeStore struct { + langs []string + langDescs []string + store map[string]*locale +} + +// Get target language string +func (d *localeStore) Get(lang, section, format string) (string, bool) { + if locale, ok := d.store[lang]; ok { + if section == "" { + section = goconfig.DEFAULT_SECTION + } + + if value, err := locale.message.GetValue(section, format); err == nil { + return value, true + } + } + + return "", false +} + +func (d *localeStore) Add(lc *locale) bool { + if _, ok := d.store[lc.lang]; ok { + return false + } + + lc.id = len(d.langs) + d.langs = append(d.langs, lc.lang) + d.langDescs = append(d.langDescs, lc.langDesc) + d.store[lc.lang] = lc + + return true +} + +func (d *localeStore) Reload(langs ...string) error { + if len(langs) == 0 { + for _, lc := range d.store { + err := lc.message.Reload() + if err != nil { + return err + } + } + } else { + for _, lang := range langs { + if lc, ok := d.store[lang]; ok { + err := lc.message.Reload() + if err != nil { + return err + } + } + } + } + + return nil +} + +// Reload locales +func ReloadLangs(langs ...string) error { + return locales.Reload(langs...) +} + +// List all locale languages +func ListLangs() []string { + langs := make([]string, len(locales.langs)) + copy(langs, locales.langs) + return langs +} + +func ListLangDescs() []string { + langDescs := make([]string, len(locales.langDescs)) + copy(langDescs, locales.langDescs) + return langDescs +} + +// Check language name if exist +func IsExist(lang string) bool { + _, ok := locales.store[lang] + return ok +} + +// Check language name if exist +func IndexLang(lang string) int { + if lc, ok := locales.store[lang]; ok { + return lc.id + } + return -1 +} + +// Get language by index id +func GetLangByIndex(index int) string { + if index < 0 || index >= len(locales.langs) { + return "" + } + return locales.langs[index] +} + +func GetDescriptionByIndex(index int) string { + if index < 0 || index >= len(locales.langDescs) { + return "" + } + + return locales.langDescs[index] +} + +func GetDescriptionByLang(lang string) string { + return GetDescriptionByIndex(IndexLang(lang)) +} + +func SetMessageWithDesc(lang, langDesc, filePath string, appendFiles ...string) error { + message, err := goconfig.LoadConfigFile(filePath, appendFiles...) + if err == nil { + message.BlockMode = false + lc := new(locale) + lc.lang = lang + lc.langDesc = langDesc + lc.message = message + + if locales.Add(lc) == false { + return fmt.Errorf("Lang %s alread exist", lang) + } + } + return err +} + +// SetMessage sets the message file for localization. +func SetMessage(lang, filePath string, appendFiles ...string) error { + return SetMessageWithDesc(lang, lang, filePath, appendFiles...) +} + +// A Locale describles the information of localization. +type Locale struct { + Lang string +} + +// Tr translate content to target language. +func (l Locale) Tr(format string, args ...interface{}) string { + return Tr(l.Lang, format, args...) +} + +// Index get lang index of LangStore +func (l Locale) Index() int { + return IndexLang(l.Lang) +} + +// Tr translate content to target language. +func Tr(lang, format string, args ...interface{}) string { + var section string + parts := strings.SplitN(format, ".", 2) + if len(parts) == 2 { + section = parts[0] + format = parts[1] + } + + value, ok := locales.Get(lang, section, format) + if ok { + format = value + } + + if len(args) > 0 { + params := make([]interface{}, 0, len(args)) + for _, arg := range args { + if arg != nil { + val := reflect.ValueOf(arg) + if val.Kind() == reflect.Slice { + for i := 0; i < val.Len(); i++ { + params = append(params, val.Index(i).Interface()) + } + } else { + params = append(params, arg) + } + } + } + return fmt.Sprintf(format, params...) + } + return fmt.Sprintf(format) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/context.go b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go new file mode 100644 index 000000000..23cbf5b54 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/context.go @@ -0,0 +1,85 @@ +package context + +import ( + "sync" + + "github.com/docker/distribution/uuid" + "golang.org/x/net/context" +) + +// Context is a copy of Context from the golang.org/x/net/context package. +type Context interface { + context.Context +} + +// instanceContext is a context that provides only an instance id. It is +// provided as the main background context. +type instanceContext struct { + Context + id string // id of context, logged as "instance.id" + once sync.Once // once protect generation of the id +} + +func (ic *instanceContext) Value(key interface{}) interface{} { + if key == "instance.id" { + ic.once.Do(func() { + // We want to lazy initialize the UUID such that we don't + // call a random generator from the package initialization + // code. For various reasons random could not be available + // https://github.com/docker/distribution/issues/782 + ic.id = uuid.Generate().String() + }) + return ic.id + } + + return ic.Context.Value(key) +} + +var background = &instanceContext{ + Context: context.Background(), +} + +// Background returns a non-nil, empty Context. The background context +// provides a single key, "instance.id" that is globally unique to the +// process. +func Background() Context { + return background +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. Use context Values only for request-scoped data that transits processes +// and APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} + +// stringMapContext is a simple context implementation that checks a map for a +// key, falling back to a parent if not present. +type stringMapContext struct { + context.Context + m map[string]interface{} +} + +// WithValues returns a context that proxies lookups through a map. Only +// supports string keys. +func WithValues(ctx context.Context, m map[string]interface{}) context.Context { + mo := make(map[string]interface{}, len(m)) // make our own copy. + for k, v := range m { + mo[k] = v + } + + return stringMapContext{ + Context: ctx, + m: mo, + } +} + +func (smc stringMapContext) Value(key interface{}) interface{} { + if ks, ok := key.(string); ok { + if v, ok := smc.m[ks]; ok { + return v + } + } + + return smc.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go new file mode 100644 index 000000000..6fe1f817d --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/doc.go @@ -0,0 +1,89 @@ +// Package context provides several utilities for working with +// golang.org/x/net/context in http requests. Primarily, the focus is on +// logging relevent request information but this package is not limited to +// that purpose. +// +// The easiest way to get started is to get the background context: +// +// ctx := context.Background() +// +// The returned context should be passed around your application and be the +// root of all other context instances. If the application has a version, this +// line should be called before anything else: +// +// ctx := context.WithVersion(context.Background(), version) +// +// The above will store the version in the context and will be available to +// the logger. +// +// Logging +// +// The most useful aspect of this package is GetLogger. This function takes +// any context.Context interface and returns the current logger from the +// context. Canonical usage looks like this: +// +// GetLogger(ctx).Infof("something interesting happened") +// +// GetLogger also takes optional key arguments. The keys will be looked up in +// the context and reported with the logger. The following example would +// return a logger that prints the version with each log message: +// +// ctx := context.Context(context.Background(), "version", version) +// GetLogger(ctx, "version").Infof("this log message has a version field") +// +// The above would print out a log message like this: +// +// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m +// +// When used with WithLogger, we gain the ability to decorate the context with +// loggers that have information from disparate parts of the call stack. +// Following from the version example, we can build a new context with the +// configured logger such that we always print the version field: +// +// ctx = WithLogger(ctx, GetLogger(ctx, "version")) +// +// Since the logger has been pushed to the context, we can now get the version +// field for free with our log messages. Future calls to GetLogger on the new +// context will have the version field: +// +// GetLogger(ctx).Infof("this log message has a version field") +// +// This becomes more powerful when we start stacking loggers. Let's say we +// have the version logger from above but also want a request id. Using the +// context above, in our request scoped function, we place another logger in +// the context: +// +// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context +// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) +// +// When GetLogger is called on the new context, "http.request.id" will be +// included as a logger field, along with the original "version" field: +// +// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m +// +// Note that this only affects the new context, the previous context, with the +// version field, can be used independently. Put another way, the new logger, +// added to the request context, is unique to that context and can have +// request scoped varaibles. +// +// HTTP Requests +// +// This package also contains several methods for working with http requests. +// The concepts are very similar to those described above. We simply place the +// request in the context using WithRequest. This makes the request variables +// available. GetRequestLogger can then be called to get request specific +// variables in a log line: +// +// ctx = WithRequest(ctx, req) +// GetRequestLogger(ctx).Infof("request variables") +// +// Like above, if we want to include the request data in all log messages in +// the context, we push the logger to a new context and use that one: +// +// ctx = WithLogger(ctx, GetRequestLogger(ctx)) +// +// The concept is fairly powerful and ensures that calls throughout the stack +// can be traced in log messages. Using the fields like "http.request.id", one +// can analyze call flow for a particular request with a simple grep of the +// logs. +package context diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go new file mode 100644 index 000000000..2cb1d0417 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http.go @@ -0,0 +1,364 @@ +package context + +import ( + "errors" + "net" + "net/http" + "strings" + "sync" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/distribution/uuid" + "github.com/gorilla/mux" +) + +// Common errors used with this package. +var ( + ErrNoRequestContext = errors.New("no http request in context") + ErrNoResponseWriterContext = errors.New("no http response in context") +) + +func parseIP(ipStr string) net.IP { + ip := net.ParseIP(ipStr) + if ip == nil { + log.Warnf("invalid remote IP address: %q", ipStr) + } + return ip +} + +// RemoteAddr extracts the remote address of the request, taking into +// account proxy headers. +func RemoteAddr(r *http.Request) string { + if prior := r.Header.Get("X-Forwarded-For"); prior != "" { + proxies := strings.Split(prior, ",") + if len(proxies) > 0 { + remoteAddr := strings.Trim(proxies[0], " ") + if parseIP(remoteAddr) != nil { + return remoteAddr + } + } + } + // X-Real-Ip is less supported, but worth checking in the + // absence of X-Forwarded-For + if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { + if parseIP(realIP) != nil { + return realIP + } + } + + return r.RemoteAddr +} + +// RemoteIP extracts the remote IP of the request, taking into +// account proxy headers. +func RemoteIP(r *http.Request) string { + addr := RemoteAddr(r) + + // Try parsing it as "IP:port" + if ip, _, err := net.SplitHostPort(addr); err == nil { + return ip + } + + return addr +} + +// WithRequest places the request on the context. The context of the request +// is assigned a unique id, available at "http.request.id". The request itself +// is available at "http.request". Other common attributes are available under +// the prefix "http.request.". If a request is already present on the context, +// this method will panic. +func WithRequest(ctx Context, r *http.Request) Context { + if ctx.Value("http.request") != nil { + // NOTE(stevvooe): This needs to be considered a programming error. It + // is unlikely that we'd want to have more than one request in + // context. + panic("only one request per context") + } + + return &httpRequestContext{ + Context: ctx, + startedAt: time.Now(), + id: uuid.Generate().String(), + r: r, + } +} + +// GetRequest returns the http request in the given context. Returns +// ErrNoRequestContext if the context does not have an http request associated +// with it. +func GetRequest(ctx Context) (*http.Request, error) { + if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { + return r, nil + } + return nil, ErrNoRequestContext +} + +// GetRequestID attempts to resolve the current request id, if possible. An +// error is return if it is not available on the context. +func GetRequestID(ctx Context) string { + return GetStringValue(ctx, "http.request.id") +} + +// WithResponseWriter returns a new context and response writer that makes +// interesting response statistics available within the context. +func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { + irw := instrumentedResponseWriter{ + ResponseWriter: w, + Context: ctx, + } + + if closeNotifier, ok := w.(http.CloseNotifier); ok { + irwCN := &instrumentedResponseWriterCN{ + instrumentedResponseWriter: irw, + CloseNotifier: closeNotifier, + } + + return irwCN, irwCN + } + + return &irw, &irw +} + +// GetResponseWriter returns the http.ResponseWriter from the provided +// context. If not present, ErrNoResponseWriterContext is returned. The +// returned instance provides instrumentation in the context. +func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { + v := ctx.Value("http.response") + + rw, ok := v.(http.ResponseWriter) + if !ok || rw == nil { + return nil, ErrNoResponseWriterContext + } + + return rw, nil +} + +// getVarsFromRequest let's us change request vars implementation for testing +// and maybe future changes. +var getVarsFromRequest = mux.Vars + +// WithVars extracts gorilla/mux vars and makes them available on the returned +// context. Variables are available at keys with the prefix "vars.". For +// example, if looking for the variable "name", it can be accessed as +// "vars.name". Implementations that are accessing values need not know that +// the underlying context is implemented with gorilla/mux vars. +func WithVars(ctx Context, r *http.Request) Context { + return &muxVarsContext{ + Context: ctx, + vars: getVarsFromRequest(r), + } +} + +// GetRequestLogger returns a logger that contains fields from the request in +// the current context. If the request is not available in the context, no +// fields will display. Request loggers can safely be pushed onto the context. +func GetRequestLogger(ctx Context) Logger { + return GetLogger(ctx, + "http.request.id", + "http.request.method", + "http.request.host", + "http.request.uri", + "http.request.referer", + "http.request.useragent", + "http.request.remoteaddr", + "http.request.contenttype") +} + +// GetResponseLogger reads the current response stats and builds a logger. +// Because the values are read at call time, pushing a logger returned from +// this function on the context will lead to missing or invalid data. Only +// call this at the end of a request, after the response has been written. +func GetResponseLogger(ctx Context) Logger { + l := getLogrusLogger(ctx, + "http.response.written", + "http.response.status", + "http.response.contenttype") + + duration := Since(ctx, "http.request.startedat") + + if duration > 0 { + l = l.WithField("http.response.duration", duration.String()) + } + + return l +} + +// httpRequestContext makes information about a request available to context. +type httpRequestContext struct { + Context + + startedAt time.Time + id string + r *http.Request +} + +// Value returns a keyed element of the request for use in the context. To get +// the request itself, query "request". For other components, access them as +// "request.". For example, r.RequestURI +func (ctx *httpRequestContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.request" { + return ctx.r + } + + if !strings.HasPrefix(keyStr, "http.request.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + switch parts[2] { + case "uri": + return ctx.r.RequestURI + case "remoteaddr": + return RemoteAddr(ctx.r) + case "method": + return ctx.r.Method + case "host": + return ctx.r.Host + case "referer": + referer := ctx.r.Referer() + if referer != "" { + return referer + } + case "useragent": + return ctx.r.UserAgent() + case "id": + return ctx.id + case "startedat": + return ctx.startedAt + case "contenttype": + ct := ctx.r.Header.Get("Content-Type") + if ct != "" { + return ct + } + } + } + +fallback: + return ctx.Context.Value(key) +} + +type muxVarsContext struct { + Context + vars map[string]string +} + +func (ctx *muxVarsContext) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "vars" { + return ctx.vars + } + + if strings.HasPrefix(keyStr, "vars.") { + keyStr = strings.TrimPrefix(keyStr, "vars.") + } + + if v, ok := ctx.vars[keyStr]; ok { + return v + } + } + + return ctx.Context.Value(key) +} + +// instrumentedResponseWriterCN provides response writer information in a +// context. It implements http.CloseNotifier so that users can detect +// early disconnects. +type instrumentedResponseWriterCN struct { + instrumentedResponseWriter + http.CloseNotifier +} + +// instrumentedResponseWriter provides response writer information in a +// context. This variant is only used in the case where CloseNotifier is not +// implemented by the parent ResponseWriter. +type instrumentedResponseWriter struct { + http.ResponseWriter + Context + + mu sync.Mutex + status int + written int64 +} + +func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { + n, err = irw.ResponseWriter.Write(p) + + irw.mu.Lock() + irw.written += int64(n) + + // Guess the likely status if not set. + if irw.status == 0 { + irw.status = http.StatusOK + } + + irw.mu.Unlock() + + return +} + +func (irw *instrumentedResponseWriter) WriteHeader(status int) { + irw.ResponseWriter.WriteHeader(status) + + irw.mu.Lock() + irw.status = status + irw.mu.Unlock() +} + +func (irw *instrumentedResponseWriter) Flush() { + if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } +} + +func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + + if !strings.HasPrefix(keyStr, "http.response.") { + goto fallback + } + + parts := strings.Split(keyStr, ".") + + if len(parts) != 3 { + goto fallback + } + + irw.mu.Lock() + defer irw.mu.Unlock() + + switch parts[2] { + case "written": + return irw.written + case "status": + return irw.status + case "contenttype": + contentType := irw.Header().Get("Content-Type") + if contentType != "" { + return contentType + } + } + } + +fallback: + return irw.Context.Value(key) +} + +func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { + if keyStr, ok := key.(string); ok { + if keyStr == "http.response" { + return irw + } + } + + return irw.instrumentedResponseWriter.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go new file mode 100644 index 000000000..3d4b3c8eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/http_test.go @@ -0,0 +1,285 @@ +package context + +import ( + "net/http" + "net/http/httptest" + "net/http/httputil" + "net/url" + "reflect" + "testing" + "time" +) + +func TestWithRequest(t *testing.T) { + var req http.Request + + start := time.Now() + req.Method = "GET" + req.Host = "example.com" + req.RequestURI = "/test-test" + req.Header = make(http.Header) + req.Header.Set("Referer", "foo.com/referer") + req.Header.Set("User-Agent", "test/0.1") + + ctx := WithRequest(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "http.request", + expected: &req, + }, + { + key: "http.request.id", + }, + { + key: "http.request.method", + expected: req.Method, + }, + { + key: "http.request.host", + expected: req.Host, + }, + { + key: "http.request.uri", + expected: req.RequestURI, + }, + { + key: "http.request.referer", + expected: req.Referer(), + }, + { + key: "http.request.useragent", + expected: req.UserAgent(), + }, + { + key: "http.request.remoteaddr", + expected: req.RemoteAddr, + }, + { + key: "http.request.startedat", + }, + } { + v := ctx.Value(testcase.key) + + if v == nil { + t.Fatalf("value not found for %q", testcase.key) + } + + if testcase.expected != nil && v != testcase.expected { + t.Fatalf("%s: %v != %v", testcase.key, v, testcase.expected) + } + + // Key specific checks! + switch testcase.key { + case "http.request.id": + if _, ok := v.(string); !ok { + t.Fatalf("request id not a string: %v", v) + } + case "http.request.startedat": + vt, ok := v.(time.Time) + if !ok { + t.Fatalf("value not a time: %v", v) + } + + now := time.Now() + if vt.After(now) { + t.Fatalf("time generated too late: %v > %v", vt, now) + } + + if vt.Before(start) { + t.Fatalf("time generated too early: %v < %v", vt, start) + } + } + } +} + +type testResponseWriter struct { + flushed bool + status int + written int64 + header http.Header +} + +func (trw *testResponseWriter) Header() http.Header { + if trw.header == nil { + trw.header = make(http.Header) + } + + return trw.header +} + +func (trw *testResponseWriter) Write(p []byte) (n int, err error) { + if trw.status == 0 { + trw.status = http.StatusOK + } + + n = len(p) + trw.written += int64(n) + return +} + +func (trw *testResponseWriter) WriteHeader(status int) { + trw.status = status +} + +func (trw *testResponseWriter) Flush() { + trw.flushed = true +} + +func TestWithResponseWriter(t *testing.T) { + trw := testResponseWriter{} + ctx, rw := WithResponseWriter(Background(), &trw) + + if ctx.Value("http.response") != rw { + t.Fatalf("response not available in context: %v != %v", ctx.Value("http.response"), rw) + } + + grw, err := GetResponseWriter(ctx) + if err != nil { + t.Fatalf("error getting response writer: %v", err) + } + + if grw != rw { + t.Fatalf("unexpected response writer returned: %#v != %#v", grw, rw) + } + + if ctx.Value("http.response.status") != 0 { + t.Fatalf("response status should always be a number and should be zero here: %v != 0", ctx.Value("http.response.status")) + } + + if n, err := rw.Write(make([]byte, 1024)); err != nil { + t.Fatalf("unexpected error writing: %v", err) + } else if n != 1024 { + t.Fatalf("unexpected number of bytes written: %v != %v", n, 1024) + } + + if ctx.Value("http.response.status") != http.StatusOK { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusOK) + } + + if ctx.Value("http.response.written") != int64(1024) { + t.Fatalf("unexpected number reported bytes written: %v != %v", ctx.Value("http.response.written"), 1024) + } + + // Make sure flush propagates + rw.(http.Flusher).Flush() + + if !trw.flushed { + t.Fatalf("response writer not flushed") + } + + // Write another status and make sure context is correct. This normally + // wouldn't work except for in this contrived testcase. + rw.WriteHeader(http.StatusBadRequest) + + if ctx.Value("http.response.status") != http.StatusBadRequest { + t.Fatalf("unexpected response status in context: %v != %v", ctx.Value("http.response.status"), http.StatusBadRequest) + } +} + +func TestWithVars(t *testing.T) { + var req http.Request + vars := map[string]string{ + "foo": "asdf", + "bar": "qwer", + } + + getVarsFromRequest = func(r *http.Request) map[string]string { + if r != &req { + t.Fatalf("unexpected request: %v != %v", r, req) + } + + return vars + } + + ctx := WithVars(Background(), &req) + for _, testcase := range []struct { + key string + expected interface{} + }{ + { + key: "vars", + expected: vars, + }, + { + key: "vars.foo", + expected: "asdf", + }, + { + key: "vars.bar", + expected: "qwer", + }, + } { + v := ctx.Value(testcase.key) + + if !reflect.DeepEqual(v, testcase.expected) { + t.Fatalf("%q: %v != %v", testcase.key, v, testcase.expected) + } + } +} + +// SingleHostReverseProxy will insert an X-Forwarded-For header, and can be used to test +// RemoteAddr(). A fake RemoteAddr cannot be set on the HTTP request - it is overwritten +// at the transport layer to 127.0.0.1: . However, as the X-Forwarded-For header +// just contains the IP address, it is different enough for testing. +func TestRemoteAddr(t *testing.T) { + var expectedRemote string + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + + if r.RemoteAddr == expectedRemote { + t.Errorf("Unexpected matching remote addresses") + } + + actualRemote := RemoteAddr(r) + if expectedRemote != actualRemote { + t.Errorf("Mismatching remote hosts: %v != %v", expectedRemote, actualRemote) + } + + w.WriteHeader(200) + })) + + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + + proxy := httputil.NewSingleHostReverseProxy(backendURL) + frontend := httptest.NewServer(proxy) + defer frontend.Close() + + // X-Forwarded-For set by proxy + expectedRemote = "127.0.0.1" + proxyReq, err := http.NewRequest("GET", frontend.URL, nil) + if err != nil { + t.Fatal(err) + } + + _, err = http.DefaultClient.Do(proxyReq) + if err != nil { + t.Fatal(err) + } + + // RemoteAddr in X-Real-Ip + getReq, err := http.NewRequest("GET", backend.URL, nil) + if err != nil { + t.Fatal(err) + } + + expectedRemote = "1.2.3.4" + getReq.Header["X-Real-ip"] = []string{expectedRemote} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } + + // Valid X-Real-Ip and invalid X-Forwarded-For + getReq.Header["X-forwarded-for"] = []string{"1.2.3"} + _, err = http.DefaultClient.Do(getReq) + if err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go new file mode 100644 index 000000000..fbb6a0511 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/logger.go @@ -0,0 +1,116 @@ +package context + +import ( + "fmt" + + "github.com/Sirupsen/logrus" + "runtime" +) + +// Logger provides a leveled-logging interface. +type Logger interface { + // standard logger methods + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) + + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + + Panic(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + + // Leveled methods, from logrus + Debug(args ...interface{}) + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + + Error(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + + Info(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + + Warn(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) +} + +// WithLogger creates a new context with provided logger. +func WithLogger(ctx Context, logger Logger) Context { + return WithValue(ctx, "logger", logger) +} + +// GetLoggerWithField returns a logger instance with the specified field key +// and value without affecting the context. Extra specified keys will be +// resolved from the context. +func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) +} + +// GetLoggerWithFields returns a logger instance with the specified fields +// without affecting the context. Extra specified keys will be resolved from +// the context. +func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { + // must convert from interface{} -> interface{} to string -> interface{} for logrus. + lfields := make(logrus.Fields, len(fields)) + for key, value := range fields { + lfields[fmt.Sprint(key)] = value + } + + return getLogrusLogger(ctx, keys...).WithFields(lfields) +} + +// GetLogger returns the logger from the current context, if present. If one +// or more keys are provided, they will be resolved on the context and +// included in the logger. While context.Value takes an interface, any key +// argument passed to GetLogger will be passed to fmt.Sprint when expanded as +// a logging key field. If context keys are integer constants, for example, +// its recommended that a String method is implemented. +func GetLogger(ctx Context, keys ...interface{}) Logger { + return getLogrusLogger(ctx, keys...) +} + +// GetLogrusLogger returns the logrus logger for the context. If one more keys +// are provided, they will be resolved on the context and included in the +// logger. Only use this function if specific logrus functionality is +// required. +func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { + var logger *logrus.Entry + + // Get a logger, if it is present. + loggerInterface := ctx.Value("logger") + if loggerInterface != nil { + if lgr, ok := loggerInterface.(*logrus.Entry); ok { + logger = lgr + } + } + + if logger == nil { + fields := logrus.Fields{} + + // Fill in the instance id, if we have it. + instanceID := ctx.Value("instance.id") + if instanceID != nil { + fields["instance.id"] = instanceID + } + + fields["go.version"] = runtime.Version() + // If no logger is found, just return the standard logger. + logger = logrus.StandardLogger().WithFields(fields) + } + + fields := logrus.Fields{} + for _, key := range keys { + v := ctx.Value(key) + if v != nil { + fields[fmt.Sprint(key)] = v + } + } + + return logger.WithFields(fields) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go new file mode 100644 index 000000000..af4f1351e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace.go @@ -0,0 +1,104 @@ +package context + +import ( + "runtime" + "time" + + "github.com/docker/distribution/uuid" +) + +// WithTrace allocates a traced timing span in a new context. This allows a +// caller to track the time between calling WithTrace and the returned done +// function. When the done function is called, a log message is emitted with a +// "trace.duration" field, corresponding to the elapased time and a +// "trace.func" field, corresponding to the function that called WithTrace. +// +// The logging keys "trace.id" and "trace.parent.id" are provided to implement +// dapper-like tracing. This function should be complemented with a WithSpan +// method that could be used for tracing distributed RPC calls. +// +// The main benefit of this function is to post-process log messages or +// intercept them in a hook to provide timing data. Trace ids and parent ids +// can also be linked to provide call tracing, if so required. +// +// Here is an example of the usage: +// +// func timedOperation(ctx Context) { +// ctx, done := WithTrace(ctx) +// defer done("this will be the log message") +// // ... function body ... +// } +// +// If the function ran for roughly 1s, such a usage would emit a log message +// as follows: +// +// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... +// +// Notice that the function name is automatically resolved, along with the +// package and a trace id is emitted that can be linked with parent ids. +func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { + if ctx == nil { + ctx = Background() + } + + pc, file, line, _ := runtime.Caller(1) + f := runtime.FuncForPC(pc) + ctx = &traced{ + Context: ctx, + id: uuid.Generate().String(), + start: time.Now(), + parent: GetStringValue(ctx, "trace.id"), + fnname: f.Name(), + file: file, + line: line, + } + + return ctx, func(format string, a ...interface{}) { + GetLogger(ctx, + "trace.duration", + "trace.id", + "trace.parent.id", + "trace.func", + "trace.file", + "trace.line"). + Debugf(format, a...) + } +} + +// traced represents a context that is traced for function call timing. It +// also provides fast lookup for the various attributes that are available on +// the trace. +type traced struct { + Context + id string + parent string + start time.Time + fnname string + file string + line int +} + +func (ts *traced) Value(key interface{}) interface{} { + switch key { + case "trace.start": + return ts.start + case "trace.duration": + return time.Since(ts.start) + case "trace.id": + return ts.id + case "trace.parent.id": + if ts.parent == "" { + return nil // must return nil to signal no parent. + } + + return ts.parent + case "trace.func": + return ts.fnname + case "trace.file": + return ts.file + case "trace.line": + return ts.line + } + + return ts.Context.Value(key) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go new file mode 100644 index 000000000..4b969fbb0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/trace_test.go @@ -0,0 +1,85 @@ +package context + +import ( + "runtime" + "testing" + "time" +) + +// TestWithTrace ensures that tracing has the expected values in the context. +func TestWithTrace(t *testing.T) { + pc, file, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + + base := []valueTestCase{ + { + key: "trace.id", + notnilorempty: true, + }, + + { + key: "trace.file", + expected: file, + notnilorempty: true, + }, + { + key: "trace.line", + notnilorempty: true, + }, + { + key: "trace.start", + notnilorempty: true, + }, + } + + ctx, done := WithTrace(Background()) + defer done("this will be emitted at end of test") + + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + })) + + traced := func() { + parentID := ctx.Value("trace.id") // ensure the parent trace id is correct. + + pc, _, _, _ := runtime.Caller(0) // get current caller. + f := runtime.FuncForPC(pc) + ctx, done := WithTrace(ctx) + defer done("this should be subordinate to the other trace") + time.Sleep(time.Second) + checkContextForValues(t, ctx, append(base, valueTestCase{ + key: "trace.func", + expected: f.Name(), + }, valueTestCase{ + key: "trace.parent.id", + expected: parentID, + })) + } + traced() + + time.Sleep(time.Second) +} + +type valueTestCase struct { + key string + expected interface{} + notnilorempty bool // just check not empty/not nil +} + +func checkContextForValues(t *testing.T, ctx Context, values []valueTestCase) { + + for _, testcase := range values { + v := ctx.Value(testcase.key) + if testcase.notnilorempty { + if v == nil || v == "" { + t.Fatalf("value was nil or empty for %q: %#v", testcase.key, v) + } + continue + } + + if v != testcase.expected { + t.Fatalf("unexpected value for key %q: %v != %v", testcase.key, v, testcase.expected) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/util.go b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go new file mode 100644 index 000000000..299edc004 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/util.go @@ -0,0 +1,32 @@ +package context + +import ( + "time" +) + +// Since looks up key, which should be a time.Time, and returns the duration +// since that time. If the key is not found, the value returned will be zero. +// This is helpful when inferring metrics related to context execution times. +func Since(ctx Context, key interface{}) time.Duration { + startedAtI := ctx.Value(key) + if startedAtI != nil { + if startedAt, ok := startedAtI.(time.Time); ok { + return time.Since(startedAt) + } + } + + return 0 +} + +// GetStringValue returns a string value from the context. The empty string +// will be returned if not found. +func GetStringValue(ctx Context, key interface{}) (value string) { + stringi := ctx.Value(key) + if stringi != nil { + if valuev, ok := stringi.(string); ok { + value = valuev + } + } + + return value +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/version.go b/Godeps/_workspace/src/github.com/docker/distribution/context/version.go new file mode 100644 index 000000000..746cda02e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/version.go @@ -0,0 +1,16 @@ +package context + +// WithVersion stores the application version in the context. The new context +// gets a logger to ensure log messages are marked with the application +// version. +func WithVersion(ctx Context, version string) Context { + ctx = WithValue(ctx, "version", version) + // push a new logger onto the stack + return WithLogger(ctx, GetLogger(ctx, "version")) +} + +// GetVersion returns the application version from the context. An empty +// string may returned if the version was not set on the context. +func GetVersion(ctx Context) string { + return GetStringValue(ctx, "version") +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/context/version_test.go b/Godeps/_workspace/src/github.com/docker/distribution/context/version_test.go new file mode 100644 index 000000000..b81652691 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/context/version_test.go @@ -0,0 +1,19 @@ +package context + +import "testing" + +func TestVersionContext(t *testing.T) { + ctx := Background() + + if GetVersion(ctx) != "" { + t.Fatalf("context should not yet have a version") + } + + expected := "2.1-whatever" + ctx = WithVersion(ctx, expected) + version := GetVersion(ctx) + + if version != expected { + t.Fatalf("version was not set: %q != %q", version, expected) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go new file mode 100644 index 000000000..b3bb580d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/auth.go @@ -0,0 +1,144 @@ +// Package auth defines a standard interface for request access controllers. +// +// An access controller has a simple interface with a single `Authorized` +// method which checks that a given request is authorized to perform one or +// more actions on one or more resources. This method should return a non-nil +// error if the request is not authorized. +// +// An implementation registers its access controller by name with a constructor +// which accepts an options map for configuring the access controller. +// +// options := map[string]interface{}{"sillySecret": "whysosilly?"} +// accessController, _ := auth.GetAccessController("silly", options) +// +// This `accessController` can then be used in a request handler like so: +// +// func updateOrder(w http.ResponseWriter, r *http.Request) { +// orderNumber := r.FormValue("orderNumber") +// resource := auth.Resource{Type: "customerOrder", Name: orderNumber} +// access := auth.Access{Resource: resource, Action: "update"} +// +// if ctx, err := accessController.Authorized(ctx, access); err != nil { +// if challenge, ok := err.(auth.Challenge) { +// // Let the challenge write the response. +// challenge.SetHeaders(w) +// w.WriteHeader(http.StatusUnauthorized) +// return +// } else { +// // Some other error. +// } +// } +// } +// +package auth + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/context" +) + +// UserInfo carries information about +// an autenticated/authorized client. +type UserInfo struct { + Name string +} + +// Resource describes a resource by type and name. +type Resource struct { + Type string + Name string +} + +// Access describes a specific action that is +// requested or allowed for a given resource. +type Access struct { + Resource + Action string +} + +// Challenge is a special error type which is used for HTTP 401 Unauthorized +// responses and is able to write the response with WWW-Authenticate challenge +// header values based on the error. +type Challenge interface { + error + + // SetHeaders prepares the request to conduct a challenge response by + // adding the an HTTP challenge header on the response message. Callers + // are expected to set the appropriate HTTP status code (e.g. 401) + // themselves. + SetHeaders(w http.ResponseWriter) +} + +// AccessController controls access to registry resources based on a request +// and required access levels for a request. Implementations can support both +// complete denial and http authorization challenges. +type AccessController interface { + // Authorized returns a non-nil error if the context is granted access and + // returns a new authorized context. If one or more Access structs are + // provided, the requested access will be compared with what is available + // to the context. The given context will contain a "http.request" key with + // a `*http.Request` value. If the error is non-nil, access should always + // be denied. The error may be of type Challenge, in which case the caller + // may have the Challenge handle the request or choose what action to take + // based on the Challenge header or response status. The returned context + // object should have a "auth.user" value set to a UserInfo struct. + Authorized(ctx context.Context, access ...Access) (context.Context, error) +} + +// WithUser returns a context with the authorized user info. +func WithUser(ctx context.Context, user UserInfo) context.Context { + return userInfoContext{ + Context: ctx, + user: user, + } +} + +type userInfoContext struct { + context.Context + user UserInfo +} + +func (uic userInfoContext) Value(key interface{}) interface{} { + switch key { + case "auth.user": + return uic.user + case "auth.user.name": + return uic.user.Name + } + + return uic.Context.Value(key) +} + +// InitFunc is the type of an AccessController factory function and is used +// to register the constructor for different AccesController backends. +type InitFunc func(options map[string]interface{}) (AccessController, error) + +var accessControllers map[string]InitFunc + +func init() { + accessControllers = make(map[string]InitFunc) +} + +// Register is used to register an InitFunc for +// an AccessController backend with the given name. +func Register(name string, initFunc InitFunc) error { + if _, exists := accessControllers[name]; exists { + return fmt.Errorf("name already registered: %s", name) + } + + accessControllers[name] = initFunc + + return nil +} + +// GetAccessController constructs an AccessController +// with the given options using the named backend. +func GetAccessController(name string, options map[string]interface{}) (AccessController, error) { + if initFunc, exists := accessControllers[name]; exists { + return initFunc(options) + } + + return nil, fmt.Errorf("no access controller registered with name: %s", name) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go new file mode 100644 index 000000000..82d3556dc --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access.go @@ -0,0 +1,102 @@ +// Package htpasswd provides a simple authentication scheme that checks for the +// user credential hash in an htpasswd formatted file in a configuration-determined +// location. +// +// This authentication method MUST be used under TLS, as simple token-replay attack is possible. +package htpasswd + +import ( + "errors" + "fmt" + "net/http" + "os" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +var ( + // ErrInvalidCredential is returned when the auth token does not authenticate correctly. + ErrInvalidCredential = errors.New("invalid authorization credential") + + // ErrAuthenticationFailure returned when authentication failure to be presented to agent. + ErrAuthenticationFailure = errors.New("authentication failured") +) + +type accessController struct { + realm string + htpasswd *htpasswd +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for htpasswd access controller`) + } + + path, present := options["path"] + if _, ok := path.(string); !present || !ok { + return nil, fmt.Errorf(`"path" must be set for htpasswd access controller`) + } + + f, err := os.Open(path.(string)) + if err != nil { + return nil, err + } + defer f.Close() + + h, err := newHTPasswd(f) + if err != nil { + return nil, err + } + + return &accessController{realm: realm.(string), htpasswd: h}, nil +} + +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + username, password, ok := req.BasicAuth() + if !ok { + return nil, &challenge{ + realm: ac.realm, + err: ErrInvalidCredential, + } + } + + if err := ac.htpasswd.authenticateUser(username, password); err != nil { + context.GetLogger(ctx).Errorf("error authenticating user %q: %v", username, err) + return nil, &challenge{ + realm: ac.realm, + err: ErrAuthenticationFailure, + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: username}), nil +} + +// challenge implements the auth.Challenge interface. +type challenge struct { + realm string + err error +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets the basic challenge header on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + w.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", ch.realm)) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("basic authentication challenge for realm %q: %s", ch.realm, ch.err) +} + +func init() { + auth.Register("htpasswd", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go new file mode 100644 index 000000000..db0405475 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/access_test.go @@ -0,0 +1,122 @@ +package htpasswd + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestBasicAccessController(t *testing.T) { + testRealm := "The-Shire" + testUsers := []string{"bilbo", "frodo", "MiShil", "DeokMan"} + testPasswords := []string{"baggins", "baggins", "새주", "공주님"} + testHtpasswdContent := `bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= + frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W + MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 + DeokMan:공주님` + + tempFile, err := ioutil.TempFile("", "htpasswd-test") + if err != nil { + t.Fatal("could not create temporary htpasswd file") + } + if _, err = tempFile.WriteString(testHtpasswdContent); err != nil { + t.Fatal("could not write temporary htpasswd file") + } + + options := map[string]interface{}{ + "realm": testRealm, + "path": tempFile.Name(), + } + ctx := context.Background() + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal("error creating access controller") + } + + tempFile.Close() + + var userNumber = 0 + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithRequest(ctx, r) + authCtx, err := accessController.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("basic accessController did not set auth.user context") + } + + if userInfo.Name != testUsers[userNumber] { + t.Fatalf("expected user name %q, got %q", testUsers[userNumber], userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + client := &http.Client{ + CheckRedirect: nil, + } + + req, _ := http.NewRequest("GET", server.URL, nil) + resp, err := client.Do(req) + + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-fail response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + nonbcrypt := map[string]struct{}{ + "bilbo": {}, + "DeokMan": {}, + } + + for i := 0; i < len(testUsers); i++ { + userNumber = i + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("error allocating new request: %v", err) + } + + req.SetBasicAuth(testUsers[i], testPasswords[i]) + + resp, err = client.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + if _, ok := nonbcrypt[testUsers[i]]; ok { + // these are not allowed. + // Request should be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusUnauthorized, testUsers[i], testPasswords[i]) + } + } else { + // Request should be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected non-success response status: %v != %v for %s %s", resp.StatusCode, http.StatusNoContent, testUsers[i], testPasswords[i]) + } + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go new file mode 100644 index 000000000..494ad0a76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd.go @@ -0,0 +1,80 @@ +package htpasswd + +import ( + "bufio" + "fmt" + "io" + "strings" + + "golang.org/x/crypto/bcrypt" +) + +// htpasswd holds a path to a system .htpasswd file and the machinery to parse +// it. Only bcrypt hash entries are supported. +type htpasswd struct { + entries map[string][]byte // maps username to password byte slice. +} + +// newHTPasswd parses the reader and returns an htpasswd or an error. +func newHTPasswd(rd io.Reader) (*htpasswd, error) { + entries, err := parseHTPasswd(rd) + if err != nil { + return nil, err + } + + return &htpasswd{entries: entries}, nil +} + +// AuthenticateUser checks a given user:password credential against the +// receiving HTPasswd's file. If the check passes, nil is returned. +func (htpasswd *htpasswd) authenticateUser(username string, password string) error { + credentials, ok := htpasswd.entries[username] + if !ok { + // timing attack paranoia + bcrypt.CompareHashAndPassword([]byte{}, []byte(password)) + + return ErrAuthenticationFailure + } + + err := bcrypt.CompareHashAndPassword([]byte(credentials), []byte(password)) + if err != nil { + return ErrAuthenticationFailure + } + + return nil +} + +// parseHTPasswd parses the contents of htpasswd. This will read all the +// entries in the file, whether or not they are needed. An error is returned +// if an syntax errors are encountered or if the reader fails. +func parseHTPasswd(rd io.Reader) (map[string][]byte, error) { + entries := map[string][]byte{} + scanner := bufio.NewScanner(rd) + var line int + for scanner.Scan() { + line++ // 1-based line numbering + t := strings.TrimSpace(scanner.Text()) + + if len(t) < 1 { + continue + } + + // lines that *begin* with a '#' are considered comments + if t[0] == '#' { + continue + } + + i := strings.Index(t, ":") + if i < 0 || i >= len(t) { + return nil, fmt.Errorf("htpasswd: invalid entry at line %d: %q", line, scanner.Text()) + } + + entries[t[:i]] = []byte(t[i+1:]) + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + return entries, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go new file mode 100644 index 000000000..309c359ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/htpasswd/htpasswd_test.go @@ -0,0 +1,85 @@ +package htpasswd + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +func TestParseHTPasswd(t *testing.T) { + + for _, tc := range []struct { + desc string + input string + err error + entries map[string][]byte + }{ + { + desc: "basic example", + input: ` +# This is a comment in a basic example. +bilbo:{SHA}5siv5c0SHx681xU6GiSx9ZQryqs= +frodo:$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W +MiShil:$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2 +DeokMan:공주님 +`, + entries: map[string][]byte{ + "bilbo": []byte("{SHA}5siv5c0SHx681xU6GiSx9ZQryqs="), + "frodo": []byte("$2y$05$926C3y10Quzn/LnqQH86VOEVh/18T6RnLaS.khre96jLNL/7e.K5W"), + "MiShil": []byte("$2y$05$0oHgwMehvoe8iAWS8I.7l.KoECXrwVaC16RPfaSCU5eVTFrATuMI2"), + "DeokMan": []byte("공주님"), + }, + }, + { + desc: "ensures comments are filtered", + input: ` +# asdf:asdf +`, + }, + { + desc: "ensure midline hash is not comment", + input: ` +asdf:as#df +`, + entries: map[string][]byte{ + "asdf": []byte("as#df"), + }, + }, + { + desc: "ensure midline hash is not comment", + input: ` +# A valid comment +valid:entry +asdf +`, + err: fmt.Errorf(`htpasswd: invalid entry at line 4: "asdf"`), + }, + } { + + entries, err := parseHTPasswd(strings.NewReader(tc.input)) + if err != tc.err { + if tc.err == nil { + t.Fatalf("%s: unexpected error: %v", tc.desc, err) + } else { + if err.Error() != tc.err.Error() { // use string equality here. + t.Fatalf("%s: expected error not returned: %v != %v", tc.desc, err, tc.err) + } + } + } + + if tc.err != nil { + continue // don't test output + } + + // allow empty and nil to be equal + if tc.entries == nil { + tc.entries = map[string][]byte{} + } + + if !reflect.DeepEqual(entries, tc.entries) { + t.Fatalf("%s: entries not parsed correctly: %v != %v", tc.desc, entries, tc.entries) + } + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go new file mode 100644 index 000000000..2b801d946 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access.go @@ -0,0 +1,97 @@ +// Package silly provides a simple authentication scheme that checks for the +// existence of an Authorization header and issues access if is present and +// non-empty. +// +// This package is present as an example implementation of a minimal +// auth.AccessController and for testing. This is not suitable for any kind of +// production security. +package silly + +import ( + "fmt" + "net/http" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +// accessController provides a simple implementation of auth.AccessController +// that simply checks for a non-empty Authorization header. It is useful for +// demonstration and testing. +type accessController struct { + realm string + service string +} + +var _ auth.AccessController = &accessController{} + +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + realm, present := options["realm"] + if _, ok := realm.(string); !present || !ok { + return nil, fmt.Errorf(`"realm" must be set for silly access controller`) + } + + service, present := options["service"] + if _, ok := service.(string); !present || !ok { + return nil, fmt.Errorf(`"service" must be set for silly access controller`) + } + + return &accessController{realm: realm.(string), service: service.(string)}, nil +} + +// Authorized simply checks for the existence of the authorization header, +// responding with a bearer challenge if it doesn't exist. +func (ac *accessController) Authorized(ctx context.Context, accessRecords ...auth.Access) (context.Context, error) { + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + if req.Header.Get("Authorization") == "" { + challenge := challenge{ + realm: ac.realm, + service: ac.service, + } + + if len(accessRecords) > 0 { + var scopes []string + for _, access := range accessRecords { + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action)) + } + challenge.scope = strings.Join(scopes, " ") + } + + return nil, &challenge + } + + return auth.WithUser(ctx, auth.UserInfo{Name: "silly"}), nil +} + +type challenge struct { + realm string + service string + scope string +} + +var _ auth.Challenge = challenge{} + +// SetHeaders sets a simple bearer challenge on the response. +func (ch challenge) SetHeaders(w http.ResponseWriter) { + header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service) + + if ch.scope != "" { + header = fmt.Sprintf("%s,scope=%q", header, ch.scope) + } + + w.Header().Set("WWW-Authenticate", header) +} + +func (ch challenge) Error() string { + return fmt.Sprintf("silly authentication challenge: %#v", ch) +} + +// init registers the silly auth backend. +func init() { + auth.Register("silly", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go new file mode 100644 index 000000000..ff2155b18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/silly/access_test.go @@ -0,0 +1,71 @@ +package silly + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" +) + +func TestSillyAccessController(t *testing.T) { + ac := &accessController{ + realm: "test-realm", + service: "test-service", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := context.WithValue(nil, "http.request", r) + authCtx, err := ac.Authorized(ctx) + if err != nil { + switch err := err.(type) { + case auth.Challenge: + err.SetHeaders(w) + w.WriteHeader(http.StatusUnauthorized) + return + default: + t.Fatalf("unexpected error authorizing request: %v", err) + } + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("silly accessController did not set auth.user context") + } + + if userInfo.Name != "silly" { + t.Fatalf("expected user name %q, got %q", "silly", userInfo.Name) + } + + w.WriteHeader(http.StatusNoContent) + })) + + resp, err := http.Get(server.URL) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusUnauthorized) + } + + req, err := http.NewRequest("GET", server.URL, nil) + if err != nil { + t.Fatalf("unexpected error creating new request: %v", err) + } + req.Header.Set("Authorization", "seriously, anything") + + resp, err = http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("unexpected error during GET: %v", err) + } + defer resp.Body.Close() + + // Request should not be authorized + if resp.StatusCode != http.StatusNoContent { + t.Fatalf("unexpected response status: %v != %v", resp.StatusCode, http.StatusNoContent) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go new file mode 100644 index 000000000..5b1ff7caa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/accesscontroller.go @@ -0,0 +1,268 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +// accessSet maps a typed, named resource to +// a set of actions requested or authorized. +type accessSet map[auth.Resource]actionSet + +// newAccessSet constructs an accessSet from +// a variable number of auth.Access items. +func newAccessSet(accessItems ...auth.Access) accessSet { + accessSet := make(accessSet, len(accessItems)) + + for _, access := range accessItems { + resource := auth.Resource{ + Type: access.Type, + Name: access.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + set.add(access.Action) + } + + return accessSet +} + +// contains returns whether or not the given access is in this accessSet. +func (s accessSet) contains(access auth.Access) bool { + actionSet, ok := s[access.Resource] + if ok { + return actionSet.contains(access.Action) + } + + return false +} + +// scopeParam returns a collection of scopes which can +// be used for a WWW-Authenticate challenge parameter. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (s accessSet) scopeParam() string { + scopes := make([]string, 0, len(s)) + + for resource, actionSet := range s { + actions := strings.Join(actionSet.keys(), ",") + scopes = append(scopes, fmt.Sprintf("%s:%s:%s", resource.Type, resource.Name, actions)) + } + + return strings.Join(scopes, " ") +} + +// Errors used and exported by this package. +var ( + ErrInsufficientScope = errors.New("insufficient scope") + ErrTokenRequired = errors.New("authorization token required") +) + +// authChallenge implements the auth.Challenge interface. +type authChallenge struct { + err error + realm string + service string + accessSet accessSet +} + +var _ auth.Challenge = authChallenge{} + +// Error returns the internal error string for this authChallenge. +func (ac authChallenge) Error() string { + return ac.err.Error() +} + +// Status returns the HTTP Response Status Code for this authChallenge. +func (ac authChallenge) Status() int { + return http.StatusUnauthorized +} + +// challengeParams constructs the value to be used in +// the WWW-Authenticate response challenge header. +// See https://tools.ietf.org/html/rfc6750#section-3 +func (ac authChallenge) challengeParams() string { + str := fmt.Sprintf("Bearer realm=%q,service=%q", ac.realm, ac.service) + + if scope := ac.accessSet.scopeParam(); scope != "" { + str = fmt.Sprintf("%s,scope=%q", str, scope) + } + + if ac.err == ErrInvalidToken || ac.err == ErrMalformedToken { + str = fmt.Sprintf("%s,error=%q", str, "invalid_token") + } else if ac.err == ErrInsufficientScope { + str = fmt.Sprintf("%s,error=%q", str, "insufficient_scope") + } + + return str +} + +// SetChallenge sets the WWW-Authenticate value for the response. +func (ac authChallenge) SetHeaders(w http.ResponseWriter) { + w.Header().Add("WWW-Authenticate", ac.challengeParams()) +} + +// accessController implements the auth.AccessController interface. +type accessController struct { + realm string + issuer string + service string + rootCerts *x509.CertPool + trustedKeys map[string]libtrust.PublicKey +} + +// tokenAccessOptions is a convenience type for handling +// options to the contstructor of an accessController. +type tokenAccessOptions struct { + realm string + issuer string + service string + rootCertBundle string +} + +// checkOptions gathers the necessary options +// for an accessController from the given map. +func checkOptions(options map[string]interface{}) (tokenAccessOptions, error) { + var opts tokenAccessOptions + + keys := []string{"realm", "issuer", "service", "rootcertbundle"} + vals := make([]string, 0, len(keys)) + for _, key := range keys { + val, ok := options[key].(string) + if !ok { + return opts, fmt.Errorf("token auth requires a valid option string: %q", key) + } + vals = append(vals, val) + } + + opts.realm, opts.issuer, opts.service, opts.rootCertBundle = vals[0], vals[1], vals[2], vals[3] + + return opts, nil +} + +// newAccessController creates an accessController using the given options. +func newAccessController(options map[string]interface{}) (auth.AccessController, error) { + config, err := checkOptions(options) + if err != nil { + return nil, err + } + + fp, err := os.Open(config.rootCertBundle) + if err != nil { + return nil, fmt.Errorf("unable to open token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + defer fp.Close() + + rawCertBundle, err := ioutil.ReadAll(fp) + if err != nil { + return nil, fmt.Errorf("unable to read token auth root certificate bundle file %q: %s", config.rootCertBundle, err) + } + + var rootCerts []*x509.Certificate + pemBlock, rawCertBundle := pem.Decode(rawCertBundle) + for pemBlock != nil { + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to parse token auth root certificate: %s", err) + } + + rootCerts = append(rootCerts, cert) + + pemBlock, rawCertBundle = pem.Decode(rawCertBundle) + } + + if len(rootCerts) == 0 { + return nil, errors.New("token auth requires at least one token signing root certificate") + } + + rootPool := x509.NewCertPool() + trustedKeys := make(map[string]libtrust.PublicKey, len(rootCerts)) + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + pubKey, err := libtrust.FromCryptoPublicKey(crypto.PublicKey(rootCert.PublicKey)) + if err != nil { + return nil, fmt.Errorf("unable to get public key from token auth root certificate: %s", err) + } + trustedKeys[pubKey.KeyID()] = pubKey + } + + return &accessController{ + realm: config.realm, + issuer: config.issuer, + service: config.service, + rootCerts: rootPool, + trustedKeys: trustedKeys, + }, nil +} + +// Authorized handles checking whether the given request is authorized +// for actions on resources described by the given access items. +func (ac *accessController) Authorized(ctx context.Context, accessItems ...auth.Access) (context.Context, error) { + challenge := &authChallenge{ + realm: ac.realm, + service: ac.service, + accessSet: newAccessSet(accessItems...), + } + + req, err := context.GetRequest(ctx) + if err != nil { + return nil, err + } + + parts := strings.Split(req.Header.Get("Authorization"), " ") + + if len(parts) != 2 || strings.ToLower(parts[0]) != "bearer" { + challenge.err = ErrTokenRequired + return nil, challenge + } + + rawToken := parts[1] + + token, err := NewToken(rawToken) + if err != nil { + challenge.err = err + return nil, challenge + } + + verifyOpts := VerifyOptions{ + TrustedIssuers: []string{ac.issuer}, + AcceptedAudiences: []string{ac.service}, + Roots: ac.rootCerts, + TrustedKeys: ac.trustedKeys, + } + + if err = token.Verify(verifyOpts); err != nil { + challenge.err = err + return nil, challenge + } + + accessSet := token.accessSet() + for _, access := range accessItems { + if !accessSet.contains(access) { + challenge.err = ErrInsufficientScope + return nil, challenge + } + } + + return auth.WithUser(ctx, auth.UserInfo{Name: token.Claims.Subject}), nil +} + +// init handles registering the token auth backend. +func init() { + auth.Register("token", auth.InitFunc(newAccessController)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go new file mode 100644 index 000000000..1d04f104c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/stringset.go @@ -0,0 +1,35 @@ +package token + +// StringSet is a useful type for looking up strings. +type stringSet map[string]struct{} + +// NewStringSet creates a new StringSet with the given strings. +func newStringSet(keys ...string) stringSet { + ss := make(stringSet, len(keys)) + ss.add(keys...) + return ss +} + +// Add inserts the given keys into this StringSet. +func (ss stringSet) add(keys ...string) { + for _, key := range keys { + ss[key] = struct{}{} + } +} + +// Contains returns whether the given key is in this StringSet. +func (ss stringSet) contains(key string) bool { + _, ok := ss[key] + return ok +} + +// Keys returns a slice of all keys in this StringSet. +func (ss stringSet) keys() []string { + keys := make([]string, 0, len(ss)) + + for key := range ss { + keys = append(keys, key) + } + + return keys +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go new file mode 100644 index 000000000..166816eea --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token.go @@ -0,0 +1,343 @@ +package token + +import ( + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + log "github.com/Sirupsen/logrus" + "github.com/docker/libtrust" + + "github.com/docker/distribution/registry/auth" +) + +const ( + // TokenSeparator is the value which separates the header, claims, and + // signature in the compact serialization of a JSON Web Token. + TokenSeparator = "." +) + +// Errors used by token parsing and verification. +var ( + ErrMalformedToken = errors.New("malformed token") + ErrInvalidToken = errors.New("invalid token") +) + +// ResourceActions stores allowed actions on a named and typed resource. +type ResourceActions struct { + Type string `json:"type"` + Name string `json:"name"` + Actions []string `json:"actions"` +} + +// ClaimSet describes the main section of a JSON Web Token. +type ClaimSet struct { + // Public claims + Issuer string `json:"iss"` + Subject string `json:"sub"` + Audience string `json:"aud"` + Expiration int64 `json:"exp"` + NotBefore int64 `json:"nbf"` + IssuedAt int64 `json:"iat"` + JWTID string `json:"jti"` + + // Private claims + Access []*ResourceActions `json:"access"` +} + +// Header describes the header section of a JSON Web Token. +type Header struct { + Type string `json:"typ"` + SigningAlg string `json:"alg"` + KeyID string `json:"kid,omitempty"` + X5c []string `json:"x5c,omitempty"` + RawJWK json.RawMessage `json:"jwk,omitempty"` +} + +// Token describes a JSON Web Token. +type Token struct { + Raw string + Header *Header + Claims *ClaimSet + Signature []byte +} + +// VerifyOptions is used to specify +// options when verifying a JSON Web Token. +type VerifyOptions struct { + TrustedIssuers []string + AcceptedAudiences []string + Roots *x509.CertPool + TrustedKeys map[string]libtrust.PublicKey +} + +// NewToken parses the given raw token string +// and constructs an unverified JSON Web Token. +func NewToken(rawToken string) (*Token, error) { + parts := strings.Split(rawToken, TokenSeparator) + if len(parts) != 3 { + return nil, ErrMalformedToken + } + + var ( + rawHeader, rawClaims = parts[0], parts[1] + headerJSON, claimsJSON []byte + err error + ) + + defer func() { + if err != nil { + log.Errorf("error while unmarshalling raw token: %s", err) + } + }() + + if headerJSON, err = joseBase64UrlDecode(rawHeader); err != nil { + err = fmt.Errorf("unable to decode header: %s", err) + return nil, ErrMalformedToken + } + + if claimsJSON, err = joseBase64UrlDecode(rawClaims); err != nil { + err = fmt.Errorf("unable to decode claims: %s", err) + return nil, ErrMalformedToken + } + + token := new(Token) + token.Header = new(Header) + token.Claims = new(ClaimSet) + + token.Raw = strings.Join(parts[:2], TokenSeparator) + if token.Signature, err = joseBase64UrlDecode(parts[2]); err != nil { + err = fmt.Errorf("unable to decode signature: %s", err) + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(headerJSON, token.Header); err != nil { + return nil, ErrMalformedToken + } + + if err = json.Unmarshal(claimsJSON, token.Claims); err != nil { + return nil, ErrMalformedToken + } + + return token, nil +} + +// Verify attempts to verify this token using the given options. +// Returns a nil error if the token is valid. +func (t *Token) Verify(verifyOpts VerifyOptions) error { + // Verify that the Issuer claim is a trusted authority. + if !contains(verifyOpts.TrustedIssuers, t.Claims.Issuer) { + log.Errorf("token from untrusted issuer: %q", t.Claims.Issuer) + return ErrInvalidToken + } + + // Verify that the Audience claim is allowed. + if !contains(verifyOpts.AcceptedAudiences, t.Claims.Audience) { + log.Errorf("token intended for another audience: %q", t.Claims.Audience) + return ErrInvalidToken + } + + // Verify that the token is currently usable and not expired. + currentUnixTime := time.Now().Unix() + if !(t.Claims.NotBefore <= currentUnixTime && currentUnixTime <= t.Claims.Expiration) { + log.Errorf("token not to be used before %d or after %d - currently %d", t.Claims.NotBefore, t.Claims.Expiration, currentUnixTime) + return ErrInvalidToken + } + + // Verify the token signature. + if len(t.Signature) == 0 { + log.Error("token has no signature") + return ErrInvalidToken + } + + // Verify that the signing key is trusted. + signingKey, err := t.VerifySigningKey(verifyOpts) + if err != nil { + log.Error(err) + return ErrInvalidToken + } + + // Finally, verify the signature of the token using the key which signed it. + if err := signingKey.Verify(strings.NewReader(t.Raw), t.Header.SigningAlg, t.Signature); err != nil { + log.Errorf("unable to verify token signature: %s", err) + return ErrInvalidToken + } + + return nil +} + +// VerifySigningKey attempts to get the key which was used to sign this token. +// The token header should contain either of these 3 fields: +// `x5c` - The x509 certificate chain for the signing key. Needs to be +// verified. +// `jwk` - The JSON Web Key representation of the signing key. +// May contain its own `x5c` field which needs to be verified. +// `kid` - The unique identifier for the key. This library interprets it +// as a libtrust fingerprint. The key itself can be looked up in +// the trustedKeys field of the given verify options. +// Each of these methods are tried in that order of preference until the +// signing key is found or an error is returned. +func (t *Token) VerifySigningKey(verifyOpts VerifyOptions) (signingKey libtrust.PublicKey, err error) { + // First attempt to get an x509 certificate chain from the header. + var ( + x5c = t.Header.X5c + rawJWK = t.Header.RawJWK + keyID = t.Header.KeyID + ) + + switch { + case len(x5c) > 0: + signingKey, err = parseAndVerifyCertChain(x5c, verifyOpts.Roots) + case len(rawJWK) > 0: + signingKey, err = parseAndVerifyRawJWK(rawJWK, verifyOpts) + case len(keyID) > 0: + signingKey = verifyOpts.TrustedKeys[keyID] + if signingKey == nil { + err = fmt.Errorf("token signed by untrusted key with ID: %q", keyID) + } + default: + err = errors.New("unable to get token signing key") + } + + return +} + +func parseAndVerifyCertChain(x5c []string, roots *x509.CertPool) (leafKey libtrust.PublicKey, err error) { + if len(x5c) == 0 { + return nil, errors.New("empty x509 certificate chain") + } + + // Ensure the first element is encoded correctly. + leafCertDer, err := base64.StdEncoding.DecodeString(x5c[0]) + if err != nil { + return nil, fmt.Errorf("unable to decode leaf certificate: %s", err) + } + + // And that it is a valid x509 certificate. + leafCert, err := x509.ParseCertificate(leafCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse leaf certificate: %s", err) + } + + // The rest of the certificate chain are intermediate certificates. + intermediates := x509.NewCertPool() + for i := 1; i < len(x5c); i++ { + intermediateCertDer, err := base64.StdEncoding.DecodeString(x5c[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode intermediate certificate: %s", err) + } + + intermediateCert, err := x509.ParseCertificate(intermediateCertDer) + if err != nil { + return nil, fmt.Errorf("unable to parse intermediate certificate: %s", err) + } + + intermediates.AddCert(intermediateCert) + } + + verifyOpts := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: roots, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + } + + // TODO: this call returns certificate chains which we ignore for now, but + // we should check them for revocations if we have the ability later. + if _, err = leafCert.Verify(verifyOpts); err != nil { + return nil, fmt.Errorf("unable to verify certificate chain: %s", err) + } + + // Get the public key from the leaf certificate. + leafCryptoKey, ok := leafCert.PublicKey.(crypto.PublicKey) + if !ok { + return nil, errors.New("unable to get leaf cert public key value") + } + + leafKey, err = libtrust.FromCryptoPublicKey(leafCryptoKey) + if err != nil { + return nil, fmt.Errorf("unable to make libtrust public key from leaf certificate: %s", err) + } + + return +} + +func parseAndVerifyRawJWK(rawJWK json.RawMessage, verifyOpts VerifyOptions) (pubKey libtrust.PublicKey, err error) { + pubKey, err = libtrust.UnmarshalPublicKeyJWK([]byte(rawJWK)) + if err != nil { + return nil, fmt.Errorf("unable to decode raw JWK value: %s", err) + } + + // Check to see if the key includes a certificate chain. + x5cVal, ok := pubKey.GetExtendedField("x5c").([]interface{}) + if !ok { + // The JWK should be one of the trusted root keys. + if _, trusted := verifyOpts.TrustedKeys[pubKey.KeyID()]; !trusted { + return nil, errors.New("untrusted JWK with no certificate chain") + } + + // The JWK is one of the trusted keys. + return + } + + // Ensure each item in the chain is of the correct type. + x5c := make([]string, len(x5cVal)) + for i, val := range x5cVal { + certString, ok := val.(string) + if !ok || len(certString) == 0 { + return nil, errors.New("malformed certificate chain") + } + x5c[i] = certString + } + + // Ensure that the x509 certificate chain can + // be verified up to one of our trusted roots. + leafKey, err := parseAndVerifyCertChain(x5c, verifyOpts.Roots) + if err != nil { + return nil, fmt.Errorf("could not verify JWK certificate chain: %s", err) + } + + // Verify that the public key in the leaf cert *is* the signing key. + if pubKey.KeyID() != leafKey.KeyID() { + return nil, errors.New("leaf certificate public key ID does not match JWK key ID") + } + + return +} + +// accessSet returns a set of actions available for the resource +// actions listed in the `access` section of this token. +func (t *Token) accessSet() accessSet { + if t.Claims == nil { + return nil + } + + accessSet := make(accessSet, len(t.Claims.Access)) + + for _, resourceActions := range t.Claims.Access { + resource := auth.Resource{ + Type: resourceActions.Type, + Name: resourceActions.Name, + } + + set, exists := accessSet[resource] + if !exists { + set = newActionSet() + accessSet[resource] = set + } + + for _, action := range resourceActions.Actions { + set.add(action) + } + } + + return accessSet +} + +func (t *Token) compactRaw() string { + return fmt.Sprintf("%s.%s", t.Raw, joseBase64UrlEncode(t.Signature)) +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go new file mode 100644 index 000000000..119aa738a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/token_test.go @@ -0,0 +1,386 @@ +package token + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/registry/auth" + "github.com/docker/libtrust" +) + +func makeRootKeys(numKeys int) ([]libtrust.PrivateKey, error) { + keys := make([]libtrust.PrivateKey, 0, numKeys) + + for i := 0; i < numKeys; i++ { + key, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, err + } + keys = append(keys, key) + } + + return keys, nil +} + +func makeSigningKeyWithChain(rootKey libtrust.PrivateKey, depth int) (libtrust.PrivateKey, error) { + if depth == 0 { + // Don't need to build a chain. + return rootKey, nil + } + + var ( + x5c = make([]string, depth) + parentKey = rootKey + key libtrust.PrivateKey + cert *x509.Certificate + err error + ) + + for depth > 0 { + if key, err = libtrust.GenerateECP256PrivateKey(); err != nil { + return nil, err + } + + if cert, err = libtrust.GenerateCACert(parentKey, key); err != nil { + return nil, err + } + + depth-- + x5c[depth] = base64.StdEncoding.EncodeToString(cert.Raw) + parentKey = key + } + + key.AddExtendedField("x5c", x5c) + + return key, nil +} + +func makeRootCerts(rootKeys []libtrust.PrivateKey) ([]*x509.Certificate, error) { + certs := make([]*x509.Certificate, 0, len(rootKeys)) + + for _, key := range rootKeys { + cert, err := libtrust.GenerateCACert(key, key) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + + return certs, nil +} + +func makeTrustedKeyMap(rootKeys []libtrust.PrivateKey) map[string]libtrust.PublicKey { + trustedKeys := make(map[string]libtrust.PublicKey, len(rootKeys)) + + for _, key := range rootKeys { + trustedKeys[key.KeyID()] = key.PublicKey() + } + + return trustedKeys +} + +func makeTestToken(issuer, audience string, access []*ResourceActions, rootKey libtrust.PrivateKey, depth int) (*Token, error) { + signingKey, err := makeSigningKeyWithChain(rootKey, depth) + if err != nil { + return nil, fmt.Errorf("unable to amke signing key with chain: %s", err) + } + + rawJWK, err := signingKey.PublicKey().MarshalJSON() + if err != nil { + return nil, fmt.Errorf("unable to marshal signing key to JSON: %s", err) + } + + joseHeader := &Header{ + Type: "JWT", + SigningAlg: "ES256", + RawJWK: json.RawMessage(rawJWK), + } + + now := time.Now() + + randomBytes := make([]byte, 15) + if _, err = rand.Read(randomBytes); err != nil { + return nil, fmt.Errorf("unable to read random bytes for jwt id: %s", err) + } + + claimSet := &ClaimSet{ + Issuer: issuer, + Subject: "foo", + Audience: audience, + Expiration: now.Add(5 * time.Minute).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: base64.URLEncoding.EncodeToString(randomBytes), + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := joseBase64UrlEncode(joseHeaderBytes) + encodedClaimSet := joseBase64UrlEncode(claimSetBytes) + encodingToSign := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(encodingToSign), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := joseBase64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", encodingToSign, signature) + + return NewToken(tokenString) +} + +// This test makes 4 tokens with a varying number of intermediate +// certificates ranging from no intermediate chain to a length of 3 +// intermediates. +func TestTokenVerify(t *testing.T) { + var ( + numTokens = 4 + issuer = "test-issuer" + audience = "test-audience" + access = []*ResourceActions{ + { + Type: "repository", + Name: "foo/bar", + Actions: []string{"pull", "push"}, + }, + } + ) + + rootKeys, err := makeRootKeys(numTokens) + if err != nil { + t.Fatal(err) + } + + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + t.Fatal(err) + } + + rootPool := x509.NewCertPool() + for _, rootCert := range rootCerts { + rootPool.AddCert(rootCert) + } + + trustedKeys := makeTrustedKeyMap(rootKeys) + + tokens := make([]*Token, 0, numTokens) + + for i := 0; i < numTokens; i++ { + token, err := makeTestToken(issuer, audience, access, rootKeys[i], i) + if err != nil { + t.Fatal(err) + } + tokens = append(tokens, token) + } + + verifyOps := VerifyOptions{ + TrustedIssuers: []string{issuer}, + AcceptedAudiences: []string{audience}, + Roots: rootPool, + TrustedKeys: trustedKeys, + } + + for _, token := range tokens { + if err := token.Verify(verifyOps); err != nil { + t.Fatal(err) + } + } +} + +func writeTempRootCerts(rootKeys []libtrust.PrivateKey) (filename string, err error) { + rootCerts, err := makeRootCerts(rootKeys) + if err != nil { + return "", err + } + + tempFile, err := ioutil.TempFile("", "rootCertBundle") + if err != nil { + return "", err + } + defer tempFile.Close() + + for _, cert := range rootCerts { + if err = pem.Encode(tempFile, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }); err != nil { + os.Remove(tempFile.Name()) + return "", err + } + } + + return tempFile.Name(), nil +} + +// TestAccessController tests complete integration of the token auth package. +// It starts by mocking the options for a token auth accessController which +// it creates. It then tries a few mock requests: +// - don't supply a token; should error with challenge +// - supply an invalid token; should error with challenge +// - supply a token with insufficient access; should error with challenge +// - supply a valid token; should not error +func TestAccessController(t *testing.T) { + // Make 2 keys; only the first is to be a trusted root key. + rootKeys, err := makeRootKeys(2) + if err != nil { + t.Fatal(err) + } + + rootCertBundleFilename, err := writeTempRootCerts(rootKeys[:1]) + if err != nil { + t.Fatal(err) + } + defer os.Remove(rootCertBundleFilename) + + realm := "https://auth.example.com/token/" + issuer := "test-issuer.example.com" + service := "test-service.example.com" + + options := map[string]interface{}{ + "realm": realm, + "issuer": issuer, + "service": service, + "rootcertbundle": rootCertBundleFilename, + } + + accessController, err := newAccessController(options) + if err != nil { + t.Fatal(err) + } + + // 1. Make a mock http.Request with no token. + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + + testAccess := auth.Access{ + Resource: auth.Resource{ + Type: "foo", + Name: "bar", + }, + Action: "baz", + } + + ctx := context.WithValue(nil, "http.request", req) + authCtx, err := accessController.Authorized(ctx, testAccess) + challenge, ok := err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrTokenRequired.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 2. Supply an invalid token. + token, err := makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[1], 1, // Everything is valid except the key which signed it. + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInvalidToken.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrTokenRequired) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 3. Supply a token with insufficient access. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{}, // No access specified. + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + challenge, ok = err.(auth.Challenge) + if !ok { + t.Fatal("accessController did not return a challenge") + } + + if challenge.Error() != ErrInsufficientScope.Error() { + t.Fatalf("accessControler did not get expected error - got %s - expected %s", challenge, ErrInsufficientScope) + } + + if authCtx != nil { + t.Fatalf("expected nil auth context but got %s", authCtx) + } + + // 4. Supply the token we need, or deserve, or whatever. + token, err = makeTestToken( + issuer, service, + []*ResourceActions{{ + Type: testAccess.Type, + Name: testAccess.Name, + Actions: []string{testAccess.Action}, + }}, + rootKeys[0], 1, + ) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.compactRaw())) + + authCtx, err = accessController.Authorized(ctx, testAccess) + if err != nil { + t.Fatalf("accessController returned unexpected error: %s", err) + } + + userInfo, ok := authCtx.Value("auth.user").(auth.UserInfo) + if !ok { + t.Fatal("token accessController did not set auth.user context") + } + + if userInfo.Name != "foo" { + t.Fatalf("expected user name %q, got %q", "foo", userInfo.Name) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go new file mode 100644 index 000000000..d7f95be42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/registry/auth/token/util.go @@ -0,0 +1,58 @@ +package token + +import ( + "encoding/base64" + "errors" + "strings" +) + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters omitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +// actionSet is a special type of stringSet. +type actionSet struct { + stringSet +} + +func newActionSet(actions ...string) actionSet { + return actionSet{newStringSet(actions...)} +} + +// Contains calls StringSet.Contains() for +// either "*" or the given action string. +func (s actionSet) contains(action string) bool { + return s.stringSet.contains("*") || s.stringSet.contains(action) +} + +// contains returns true if q is found in ss. +func contains(ss []string, q string) bool { + for _, s := range ss { + if s == q { + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go new file mode 100644 index 000000000..d433ccaf5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid.go @@ -0,0 +1,126 @@ +// Package uuid provides simple UUID generation. Only version 4 style UUIDs +// can be generated. +// +// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "io" + "os" + "syscall" + "time" +) + +const ( + // Bits is the number of bits in a UUID + Bits = 128 + + // Size is the number of bytes in a UUID + Size = Bits / 8 + + format = "%08x-%04x-%04x-%04x-%012x" +) + +var ( + // ErrUUIDInvalid indicates a parsed string is not a valid uuid. + ErrUUIDInvalid = fmt.Errorf("invalid uuid") + + // Loggerf can be used to override the default logging destination. Such + // log messages in this library should be logged at warning or higher. + Loggerf = func(format string, args ...interface{}) {} +) + +// UUID represents a UUID value. UUIDs can be compared and set to other values +// and accessed by byte. +type UUID [Size]byte + +// Generate creates a new, version 4 uuid. +func Generate() (u UUID) { + const ( + // ensures we backoff for less than 450ms total. Use the following to + // select new value, in units of 10ms: + // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 + maxretries = 9 + backoff = time.Millisecond * 10 + ) + + var ( + totalBackoff time.Duration + count int + retries int + ) + + for { + // This should never block but the read may fail. Because of this, + // we just try to read the random number generator until we get + // something. This is a very rare condition but may happen. + b := time.Duration(retries) * backoff + time.Sleep(b) + totalBackoff += b + + n, err := io.ReadFull(rand.Reader, u[count:]) + if err != nil { + if retryOnError(err) && retries < maxretries { + count += n + retries++ + Loggerf("error generating version 4 uuid, retrying: %v", err) + continue + } + + // Any other errors represent a system problem. What did someone + // do to /dev/urandom? + panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) + } + + break + } + + u[6] = (u[6] & 0x0f) | 0x40 // set version byte + u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} + + return u +} + +// Parse attempts to extract a uuid from the string or returns an error. +func Parse(s string) (u UUID, err error) { + if len(s) != 36 { + return UUID{}, ErrUUIDInvalid + } + + // create stack addresses for each section of the uuid. + p := make([][]byte, 5) + + if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { + return u, err + } + + copy(u[0:4], p[0]) + copy(u[4:6], p[1]) + copy(u[6:8], p[2]) + copy(u[8:10], p[3]) + copy(u[10:16], p[4]) + + return +} + +func (u UUID) String() string { + return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// retryOnError tries to detect whether or not retrying would be fruitful. +func retryOnError(err error) bool { + switch err := err.(type) { + case *os.PathError: + return retryOnError(err.Err) // unpack the target error + case syscall.Errno: + if err == syscall.EPERM { + // EPERM represents an entropy pool exhaustion, a condition under + // which we backoff and retry. + return true + } + } + + return false +} diff --git a/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go new file mode 100644 index 000000000..09c3a7bb4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/distribution/uuid/uuid_test.go @@ -0,0 +1,48 @@ +package uuid + +import ( + "testing" +) + +const iterations = 1000 + +func TestUUID4Generation(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + if u[6]&0xf0 != 0x40 { + t.Fatalf("version byte not correctly set: %v, %08b %08b", u, u[6], u[6]&0xf0) + } + + if u[8]&0xc0 != 0x80 { + t.Fatalf("top order 8th byte not correctly set: %v, %b", u, u[8]) + } + } +} + +func TestParseAndEquality(t *testing.T) { + for i := 0; i < iterations; i++ { + u := Generate() + + parsed, err := Parse(u.String()) + if err != nil { + t.Fatalf("error parsing uuid %v: %v", u, err) + } + + if parsed != u { + t.Fatalf("parsing round trip failed: %v != %v", parsed, u) + } + } + + for _, c := range []string{ + "bad", + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // correct length, incorrect format + " 20cc7775-2671-43c7-8742-51d1cfa23258", // leading space + "20cc7775-2671-43c7-8742-51d1cfa23258 ", // trailing space + "00000000-0000-0000-0000-x00000000000", // out of range character + } { + if _, err := Parse(c); err == nil { + t.Fatalf("parsing %q should have failed", c) + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md new file mode 100644 index 000000000..05be0f8ab --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to libtrust + +Want to hack on libtrust? Awesome! Here are instructions to get you +started. + +libtrust is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE new file mode 100644 index 000000000..27448585a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS new file mode 100644 index 000000000..9768175fe --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/MAINTAINERS @@ -0,0 +1,3 @@ +Solomon Hykes +Josh Hawn (github: jlhawn) +Derek McGowan (github: dmcgowan) diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/README.md new file mode 100644 index 000000000..8e7db3818 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/README.md @@ -0,0 +1,18 @@ +# libtrust + +Libtrust is library for managing authentication and authorization using public key cryptography. + +Authentication is handled using the identity attached to the public key. +Libtrust provides multiple methods to prove possession of the private key associated with an identity. + - TLS x509 certificates + - Signature verification + - Key Challenge + +Authorization and access control is managed through a distributed trust graph. +Trust servers are used as the authorities of the trust graph and allow caching portions of the graph for faster access. + +## Copyright and license + +Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license. +Docs released under Creative commons. + diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go new file mode 100644 index 000000000..3dcca33cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "time" +) + +type certTemplateInfo struct { + commonName string + domains []string + ipAddresses []net.IP + isCA bool + clientAuth bool + serverAuth bool +} + +func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { + // Generate a certificate template which is valid from the past week to + // 10 years from now. The usage of the certificate depends on the + // specified fields in the given certTempInfo object. + var ( + keyUsage x509.KeyUsage + extKeyUsage []x509.ExtKeyUsage + ) + + if info.isCA { + keyUsage = x509.KeyUsageCertSign + } + + if info.clientAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) + } + + if info.serverAuth { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + return &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: info.commonName, + }, + NotBefore: time.Now().Add(-time.Hour * 24 * 7), + NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), + DNSNames: info.domains, + IPAddresses: info.ipAddresses, + IsCA: info.isCA, + KeyUsage: keyUsage, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: info.isCA, + } +} + +func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { + pubCertTemplate := generateCertTemplate(subInfo) + privCertTemplate := generateCertTemplate(issInfo) + + certDER, err := x509.CreateCertificate( + rand.Reader, pubCertTemplate, privCertTemplate, + pub.CryptoPublicKey(), priv.CryptoPrivateKey(), + ) + if err != nil { + return nil, fmt.Errorf("failed to create certificate: %s", err) + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate: %s", err) + } + + return +} + +// GenerateSelfSignedServerCert creates a self-signed certificate for the +// given key which is to be used for TLS servers with the given domains and +// IP addresses. +func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + domains: domains, + ipAddresses: ipAddresses, + serverAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateSelfSignedClientCert creates a self-signed certificate for the +// given key which is to be used for TLS clients. +func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { + info := &certTemplateInfo{ + commonName: key.KeyID(), + clientAuth: true, + } + + return generateCert(key.PublicKey(), key, info, info) +} + +// GenerateCACert creates a certificate which can be used as a trusted +// certificate authority. +func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { + subjectInfo := &certTemplateInfo{ + commonName: trustedKey.KeyID(), + isCA: true, + } + issuerInfo := &certTemplateInfo{ + commonName: signer.KeyID(), + } + + return generateCert(trustedKey, signer, subjectInfo, issuerInfo) +} + +// GenerateCACertPool creates a certificate authority pool to be used for a +// TLS configuration. Any self-signed certificates issued by the specified +// trusted keys will be verified during a TLS handshake +func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { + certPool := x509.NewCertPool() + + for _, trustedKey := range trustedKeys { + cert, err := GenerateCACert(signer, trustedKey) + if err != nil { + return nil, fmt.Errorf("failed to generate CA certificate: %s", err) + } + + certPool.AddCert(cert) + } + + return certPool, nil +} + +// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + certificates := []*x509.Certificate{} + var block *pem.Block + block, b = pem.Decode(b) + for ; block != nil; block, b = pem.Decode(b) { + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certificates = append(certificates, cert) + } else { + return nil, fmt.Errorf("invalid pem block type: %s", block.Type) + } + } + + return certificates, nil +} + +// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded +// containing one or more certificates. The expected pem type is "CERTIFICATE". +func LoadCertificatePool(filename string) (*x509.CertPool, error) { + certs, err := LoadCertificateBundle(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go new file mode 100644 index 000000000..c111f3531 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/certificates_test.go @@ -0,0 +1,111 @@ +package libtrust + +import ( + "encoding/pem" + "io/ioutil" + "net" + "os" + "path" + "testing" +) + +func TestGenerateCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedServerCert(key, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}) + if err != nil { + t.Fatal(err) + } + + _, err = GenerateSelfSignedClientCert(key) + if err != nil { + t.Fatal(err) + } +} + +func TestGenerateCACertPool(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + _, err = GenerateCACertPool(key, []PublicKey{caKey1.PublicKey(), caKey2.PublicKey()}) + if err != nil { + t.Fatal(err) + } +} + +func TestLoadCertificates(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + caKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + caKey2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + cert1, err := GenerateCACert(caKey1, key) + if err != nil { + t.Fatal(err) + } + cert2, err := GenerateCACert(caKey2, key) + if err != nil { + t.Fatal(err) + } + + d, err := ioutil.TempDir("/tmp", "cert-test") + if err != nil { + t.Fatal(err) + } + caFile := path.Join(d, "ca.pem") + f, err := os.OpenFile(caFile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + t.Fatal(err) + } + + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert1.Raw}) + if err != nil { + t.Fatal(err) + } + err = pem.Encode(f, &pem.Block{Type: "CERTIFICATE", Bytes: cert2.Raw}) + if err != nil { + t.Fatal(err) + } + f.Close() + + certs, err := LoadCertificateBundle(caFile) + if err != nil { + t.Fatal(err) + } + if len(certs) != 2 { + t.Fatalf("Wrong number of certs received, expected: %d, received %d", 2, len(certs)) + } + + pool, err := LoadCertificatePool(caFile) + if err != nil { + t.Fatal(err) + } + + if len(pool.Subjects()) != 2 { + t.Fatalf("Invalid certificate pool") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/doc.go b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go new file mode 100644 index 000000000..ec5d2159c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/doc.go @@ -0,0 +1,9 @@ +/* +Package libtrust provides an interface for managing authentication and +authorization using public key cryptography. Authentication is handled +using the identity attached to the public key and verified through TLS +x509 certificates, a key challenge, or signature. Authorization and +access control is managed through a trust graph distributed between +both remote trust servers and locally cached and managed data. +*/ +package libtrust diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go new file mode 100644 index 000000000..00bbe4b3c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key.go @@ -0,0 +1,428 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * EC DSA PUBLIC KEY + */ + +// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital +// signature algorithms. +type ecPublicKey struct { + *ecdsa.PublicKey + curveName string + signatureAlgorithm *signatureAlgorithm + extended map[string]interface{} +} + +func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { + curve := cryptoPublicKey.Curve + + switch { + case curve == elliptic.P256(): + return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil + case curve == elliptic.P384(): + return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil + case curve == elliptic.P521(): + return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil + default: + return nil, errors.New("unsupported elliptic curve") + } +} + +// KeyType returns the key type for elliptic curve keys, i.e., "EC". +func (k *ecPublicKey) KeyType() string { + return "EC" +} + +// CurveName returns the elliptic curve identifier. +// Possible values are "P-256", "P-384", and "P-521". +func (k *ecPublicKey) CurveName() string { + return k.curveName +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *ecPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *ecPublicKey) String() string { + return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this +// PublicKey. The alg parameter should identify the digital signature +// algorithm which was used to produce the signature and should be supported +// by this public key. Returns a nil error if the signature is valid. +func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // For EC keys there is only one supported signature algorithm depending + // on the curve parameters. + if k.signatureAlgorithm.HeaderParam() != alg { + return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) + } + + // signature is the concatenation of (r, s), base64Url encoded. + sigLength := len(signature) + expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) + if sigLength != expectedOctetLength { + return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) + } + + rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] + r := new(big.Int).SetBytes(rBytes) + s := new(big.Int).SetBytes(sBytes) + + hasher := k.signatureAlgorithm.HashID().New() + _, err := io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + if !ecdsa.Verify(k.PublicKey, hash, r, s) { + return errors.New("invalid signature") + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *ecPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["crv"] = k.CurveName() + + xBytes := k.X.Bytes() + yBytes := k.Y.Bytes() + octetLength := (k.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output so that x, y are each + // *octetLength* bytes long. + xBuf := make([]byte, octetLength-len(xBytes), octetLength) + yBuf := make([]byte, octetLength-len(yBytes), octetLength) + xBuf = append(xBuf, xBytes...) + yBuf = append(yBuf, yBytes...) + + jwk["x"] = joseBase64UrlEncode(xBuf) + jwk["y"] = joseBase64UrlEncode(yBuf) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *ecPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { + // JWK key type (kty) has already been determined to be "EC". + // Need to extract 'crv', 'x', 'y', and 'kid' and check for + // consistency. + + // Get the curve identifier value. + crv, err := stringFromMap(jwk, "crv") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) + } + + var ( + curve elliptic.Curve + sigAlg *signatureAlgorithm + ) + + switch { + case crv == "P-256": + curve = elliptic.P256() + sigAlg = es256 + case crv == "P-384": + curve = elliptic.P384() + sigAlg = es384 + case crv == "P-521": + curve = elliptic.P521() + sigAlg = es512 + default: + return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) + } + + // Get the X and Y coordinates for the public key point. + xB64Url, err := stringFromMap(jwk, "x") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + x, err := parseECCoordinate(xB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) + } + + yB64Url, err := stringFromMap(jwk, "y") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + y, err := parseECCoordinate(yB64Url, curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) + } + + key := &ecPublicKey{ + PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, + curveName: crv, signatureAlgorithm: sigAlg, + } + + // Key ID is optional too, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) + } + } + + key.extended = jwk + + return key, nil +} + +/* + * EC DSA PRIVATE KEY + */ + +// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature +// algorithms. +type ecPrivateKey struct { + ecPublicKey + *ecdsa.PrivateKey +} + +func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { + publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) + if err != nil { + return nil, err + } + + return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *ecPrivateKey) PublicKey() PublicKey { + return &k.ecPublicKey +} + +func (k *ecPrivateKey) String() string { + return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the elliptic curve private key. If the specified hashing algorithm is +// supported by this key, that hash function is used to generate the signature +// otherwise the the default hashing algorithm for this key is used. Returns +// the signature and the name of the JWK signature algorithm used, e.g., +// "ES256", "ES384", "ES512". +func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + // The given hashId is only a suggestion, and since EC keys only support + // on signature/hash algorithm given the curve name, we disregard it for + // the elliptic curve JWK signature implementation. + hasher := k.signatureAlgorithm.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + rBytes, sBytes := r.Bytes(), s.Bytes() + octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 + // MUST include leading zeros in the output + rBuf := make([]byte, octetLength-len(rBytes), octetLength) + sBuf := make([]byte, octetLength-len(sBytes), octetLength) + + rBuf = append(rBuf, rBytes...) + sBuf = append(sBuf, sBytes...) + + signature = append(rBuf, sBuf...) + alg = k.signatureAlgorithm.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *ecPrivateKey) toMap() map[string]interface{} { + jwk := k.ecPublicKey.toMap() + + dBytes := k.D.Bytes() + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := k.ecPublicKey.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + // Create a buffer with the necessary zero-padding. + dBuf := make([]byte, octetLength-len(dBytes), octetLength) + dBuf = append(dBuf, dBytes...) + + jwk["d"] = joseBase64UrlEncode(dBuf) + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// elliptic curve keys. +func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) + } + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) +} + +func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { + dB64Url, err := stringFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key: %s", err) + } + + // JWK key type (kty) has already been determined to be "EC". + // Need to extract the public key information, then extract the private + // key value 'd'. + publicKey, err := ecPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + d, err := parseECPrivateParam(dB64Url, publicKey.Curve) + if err != nil { + return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) + } + + key := &ecPrivateKey{ + ecPublicKey: *publicKey, + PrivateKey: &ecdsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: d, + }, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { + k = new(ecPrivateKey) + k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return nil, err + } + + k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. +func GenerateECP256PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P256()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-256 key: %s", err) + } + + k.curveName = "P-256" + k.signatureAlgorithm = es256 + + return k, nil +} + +// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. +func GenerateECP384PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P384()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-384 key: %s", err) + } + + k.curveName = "P-384" + k.signatureAlgorithm = es384 + + return k, nil +} + +// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. +func GenerateECP521PrivateKey() (PrivateKey, error) { + k, err := generateECPrivateKey(elliptic.P521()) + if err != nil { + return nil, fmt.Errorf("error generating EC P-521 key: %s", err) + } + + k.curveName = "P-521" + k.signatureAlgorithm = es512 + + return k, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go new file mode 100644 index 000000000..26ac38149 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/ec_key_test.go @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "testing" +) + +func generateECTestKeys(t *testing.T) []PrivateKey { + p256Key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + p384Key, err := GenerateECP384PrivateKey() + if err != nil { + t.Fatal(err) + } + + p521Key, err := GenerateECP521PrivateKey() + if err != nil { + t.Fatal(err) + } + + return []PrivateKey{p256Key, p384Key, p521Key} +} + +func TestECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + if ecKey.KeyType() != "EC" { + t.Fatalf("key type must be %q, instead got %q", "EC", ecKey.KeyType()) + } + } +} + +func TestECSignVerify(t *testing.T) { + ecKeys := generateECTestKeys(t) + + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, ecKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := ecKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = ecKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{es256, es384, es512} + + for i, ecKey := range ecKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(ecKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(ecKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoECKeys(t *testing.T) { + ecKeys := generateECTestKeys(t) + + for _, ecKey := range ecKeys { + cryptoPrivateKey := ecKey.CryptoPrivateKey() + cryptoPublicKey := ecKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != ecKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} + +func TestExtendedFields(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + key.AddExtendedField("test", "foobar") + val := key.GetExtendedField("test") + + gotVal, ok := val.(string) + if !ok { + t.Fatalf("value is not a string") + } else if gotVal != val { + t.Fatalf("value %q is not equal to %q", gotVal, val) + } + +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go new file mode 100644 index 000000000..5b2b4fca6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter.go @@ -0,0 +1,50 @@ +package libtrust + +import ( + "path/filepath" +) + +// FilterByHosts filters the list of PublicKeys to only those which contain a +// 'hosts' pattern which matches the given host. If *includeEmpty* is true, +// then keys which do not specify any hosts are also returned. +func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { + filtered := make([]PublicKey, 0, len(keys)) + + for _, pubKey := range keys { + var hosts []string + switch v := pubKey.GetExtendedField("hosts").(type) { + case []string: + hosts = v + case []interface{}: + for _, value := range v { + h, ok := value.(string) + if !ok { + continue + } + hosts = append(hosts, h) + } + } + + if len(hosts) == 0 { + if includeEmpty { + filtered = append(filtered, pubKey) + } + continue + } + + // Check if any hosts match pattern + for _, hostPattern := range hosts { + match, err := filepath.Match(hostPattern, host) + if err != nil { + return nil, err + } + + if match { + filtered = append(filtered, pubKey) + continue + } + } + } + + return filtered, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go new file mode 100644 index 000000000..997e554c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/filter_test.go @@ -0,0 +1,81 @@ +package libtrust + +import ( + "testing" +) + +func compareKeySlices(t *testing.T, sliceA, sliceB []PublicKey) { + if len(sliceA) != len(sliceB) { + t.Fatalf("slice size %d, expected %d", len(sliceA), len(sliceB)) + } + + for i, itemA := range sliceA { + itemB := sliceB[i] + if itemA != itemB { + t.Fatalf("slice index %d not equal: %#v != %#v", i, itemA, itemB) + } + } +} + +func TestFilter(t *testing.T) { + keys := make([]PublicKey, 0, 8) + + // Create 8 keys and add host entries. + for i := 0; i < cap(keys); i++ { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + // we use both []interface{} and []string here because jwt uses + // []interface{} format, while PEM uses []string + switch { + case i == 0: + // Don't add entries for this key, key 0. + break + case i%2 == 0: + // Should catch keys 2, 4, and 6. + key.AddExtendedField("hosts", []interface{}{"*.even.example.com"}) + case i == 7: + // Should catch only the last key, and make it match any hostname. + key.AddExtendedField("hosts", []string{"*"}) + default: + // should catch keys 1, 3, 5. + key.AddExtendedField("hosts", []string{"*.example.com"}) + } + + keys = append(keys, key) + } + + // Should match 2 keys, the empty one, and the one that matches all hosts. + matchedKeys, err := FilterByHosts(keys, "foo.bar.com", true) + if err != nil { + t.Fatal(err) + } + expectedMatch := []PublicKey{keys[0], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match 1 key, the one that matches any host. + matchedKeys, err = FilterByHosts(keys, "foo.bar.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match keys that end in "example.com", and the key that matches anything. + matchedKeys, err = FilterByHosts(keys, "foo.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = []PublicKey{keys[1], keys[3], keys[5], keys[7]} + compareKeySlices(t, expectedMatch, matchedKeys) + + // Should match all of the keys except the empty key. + matchedKeys, err = FilterByHosts(keys, "foo.even.example.com", false) + if err != nil { + t.Fatal(err) + } + expectedMatch = keys[1:] + compareKeySlices(t, expectedMatch, matchedKeys) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/hash.go b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go new file mode 100644 index 000000000..a2df787dd --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/hash.go @@ -0,0 +1,56 @@ +package libtrust + +import ( + "crypto" + _ "crypto/sha256" // Registrer SHA224 and SHA256 + _ "crypto/sha512" // Registrer SHA384 and SHA512 + "fmt" +) + +type signatureAlgorithm struct { + algHeaderParam string + hashID crypto.Hash +} + +func (h *signatureAlgorithm) HeaderParam() string { + return h.algHeaderParam +} + +func (h *signatureAlgorithm) HashID() crypto.Hash { + return h.hashID +} + +var ( + rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} + rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} + rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} + es256 = &signatureAlgorithm{"ES256", crypto.SHA256} + es384 = &signatureAlgorithm{"ES384", crypto.SHA384} + es512 = &signatureAlgorithm{"ES512", crypto.SHA512} +) + +func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { + switch { + case alg == "RS256": + return rs256, nil + case alg == "RS384": + return rs384, nil + case alg == "RS512": + return rs512, nil + default: + return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) + } +} + +func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { + switch { + case hashID == crypto.SHA512: + return rs512 + case hashID == crypto.SHA384: + return rs384 + case hashID == crypto.SHA256: + fallthrough + default: + return rs256 + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go new file mode 100644 index 000000000..cb2ca9a76 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign.go @@ -0,0 +1,657 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "sort" + "time" + "unicode" +) + +var ( + // ErrInvalidSignContent is used when the content to be signed is invalid. + ErrInvalidSignContent = errors.New("invalid sign content") + + // ErrInvalidJSONContent is used when invalid json is encountered. + ErrInvalidJSONContent = errors.New("invalid json content") + + // ErrMissingSignatureKey is used when the specified signature key + // does not exist in the JSON content. + ErrMissingSignatureKey = errors.New("missing signature key") +) + +type jsHeader struct { + JWK PublicKey `json:"jwk,omitempty"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c,omitempty"` +} + +type jsSignature struct { + Header jsHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected,omitempty"` +} + +type jsSignaturesSorted []jsSignature + +func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } +func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } + +func (jsbkid jsSignaturesSorted) Less(i, j int) bool { + ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() + si, sj := jsbkid[i].Signature, jsbkid[j].Signature + + if ki == kj { + return si < sj + } + + return ki < kj +} + +type signKey struct { + PrivateKey + Chain []*x509.Certificate +} + +// JSONSignature represents a signature of a json object. +type JSONSignature struct { + payload string + signatures []jsSignature + indent string + formatLength int + formatTail []byte +} + +func newJSONSignature() *JSONSignature { + return &JSONSignature{ + signatures: make([]jsSignature, 0, 1), + } +} + +// Payload returns the encoded payload of the signature. This +// payload should not be signed directly +func (js *JSONSignature) Payload() ([]byte, error) { + return joseBase64UrlDecode(js.payload) +} + +func (js *JSONSignature) protectedHeader() (string, error) { + protected := map[string]interface{}{ + "formatLength": js.formatLength, + "formatTail": joseBase64UrlEncode(js.formatTail), + "time": time.Now().UTC().Format(time.RFC3339), + } + protectedBytes, err := json.Marshal(protected) + if err != nil { + return "", err + } + + return joseBase64UrlEncode(protectedBytes), nil +} + +func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { + buf := make([]byte, len(js.payload)+len(protectedHeader)+1) + copy(buf, protectedHeader) + buf[len(protectedHeader)] = '.' + copy(buf[len(protectedHeader)+1:], js.payload) + return buf, nil +} + +// Sign adds a signature using the given private key. +func (js *JSONSignature) Sign(key PrivateKey) error { + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + js.signatures = append(js.signatures, jsSignature{ + Header: jsHeader{ + JWK: key.PublicKey(), + Algorithm: algorithm, + }, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// SignWithChain adds a signature using the given private key +// and setting the x509 chain. The public key of the first element +// in the chain must be the public key corresponding with the sign key. +func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { + // Ensure key.Chain[0] is public key for key + //key.Chain.PublicKey + //key.PublicKey().CryptoPublicKey() + + // Verify chain + protected, err := js.protectedHeader() + if err != nil { + return err + } + signBytes, err := js.signBytes(protected) + if err != nil { + return err + } + sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) + if err != nil { + return err + } + + header := jsHeader{ + Chain: make([]string, len(chain)), + Algorithm: algorithm, + } + + for i, cert := range chain { + header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) + } + + js.signatures = append(js.signatures, jsSignature{ + Header: header, + Signature: joseBase64UrlEncode(sigBytes), + Protected: protected, + }) + + return nil +} + +// Verify verifies all the signatures and returns the list of +// public keys used to sign. Any x509 chains are not checked. +func (js *JSONSignature) Verify() ([]PublicKey, error) { + keys := make([]PublicKey, len(js.signatures)) + for i, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + } else if signature.Header.JWK != nil { + publicKey = signature.Header.JWK + } else { + return nil, errors.New("missing public key") + } + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + + keys[i] = publicKey + } + return keys, nil +} + +// VerifyChains verifies all the signatures and the chains associated +// with each signature and returns the list of verified chains. +// Signatures without an x509 chain are not checked. +func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { + chains := make([][]*x509.Certificate, 0, len(js.signatures)) + for _, signature := range js.signatures { + signBytes, err := js.signBytes(signature.Protected) + if err != nil { + return nil, err + } + var publicKey PublicKey + if len(signature.Header.Chain) > 0 { + certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) + if err != nil { + return nil, err + } + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + publicKey, err = FromCryptoPublicKey(cert.PublicKey) + if err != nil { + return nil, err + } + intermediates := x509.NewCertPool() + if len(signature.Header.Chain) > 1 { + intermediateChain := signature.Header.Chain[1:] + for i := range intermediateChain { + certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) + if err != nil { + return nil, err + } + intermediate, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + intermediates.AddCert(intermediate) + } + } + + verifyOptions := x509.VerifyOptions{ + Intermediates: intermediates, + Roots: ca, + } + + verifiedChains, err := cert.Verify(verifyOptions) + if err != nil { + return nil, err + } + chains = append(chains, verifiedChains...) + + sigBytes, err := joseBase64UrlDecode(signature.Signature) + if err != nil { + return nil, err + } + + err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) + if err != nil { + return nil, err + } + } + + } + return chains, nil +} + +// JWS returns JSON serialized JWS according to +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 +func (js *JSONSignature) JWS() ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("missing signature") + } + + sort.Sort(jsSignaturesSorted(js.signatures)) + + jsonMap := map[string]interface{}{ + "payload": js.payload, + "signatures": js.signatures, + } + + return json.MarshalIndent(jsonMap, "", " ") +} + +func notSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +func detectJSONIndent(jsonContent []byte) (indent string) { + if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { + quoteIndex := bytes.IndexRune(jsonContent[1:], '"') + if quoteIndex > 0 { + indent = string(jsonContent[2 : quoteIndex+1]) + } + } + return +} + +type jsParsedHeader struct { + JWK json.RawMessage `json:"jwk"` + Algorithm string `json:"alg"` + Chain []string `json:"x5c"` +} + +type jsParsedSignature struct { + Header jsParsedHeader `json:"header"` + Signature string `json:"signature"` + Protected string `json:"protected"` +} + +// ParseJWS parses a JWS serialized JSON object into a Json Signature. +func ParseJWS(content []byte) (*JSONSignature, error) { + type jsParsed struct { + Payload string `json:"payload"` + Signatures []jsParsedSignature `json:"signatures"` + } + parsed := &jsParsed{} + err := json.Unmarshal(content, parsed) + if err != nil { + return nil, err + } + if len(parsed.Signatures) == 0 { + return nil, errors.New("missing signatures") + } + payload, err := joseBase64UrlDecode(parsed.Payload) + if err != nil { + return nil, err + } + + js, err := NewJSONSignature(payload) + if err != nil { + return nil, err + } + js.signatures = make([]jsSignature, len(parsed.Signatures)) + for i, signature := range parsed.Signatures { + header := jsHeader{ + Algorithm: signature.Header.Algorithm, + } + if signature.Header.Chain != nil { + header.Chain = signature.Header.Chain + } + if signature.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) + if err != nil { + return nil, err + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signature.Signature, + Protected: signature.Protected, + } + } + + return js, nil +} + +// NewJSONSignature returns a new unsigned JWS from a json byte array. +// JSONSignature will need to be signed before serializing or storing. +// Optionally, one or more signatures can be provided as byte buffers, +// containing serialized JWS signatures, to assemble a fully signed JWS +// package. It is the callers responsibility to ensure uniqueness of the +// provided signatures. +func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { + var dataMap map[string]interface{} + err := json.Unmarshal(content, &dataMap) + if err != nil { + return nil, err + } + + js := newJSONSignature() + js.indent = detectJSONIndent(content) + + js.payload = joseBase64UrlEncode(content) + + // Find trailing } and whitespace, put in protected header + closeIndex := bytes.LastIndexFunc(content, notSpace) + if content[closeIndex] != '}' { + return nil, ErrInvalidJSONContent + } + lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) + if content[lastRuneIndex] == ',' { + return nil, ErrInvalidJSONContent + } + js.formatLength = lastRuneIndex + 1 + js.formatTail = content[js.formatLength:] + + if len(signatures) > 0 { + for _, signature := range signatures { + var parsedJSig jsParsedSignature + + if err := json.Unmarshal(signature, &parsedJSig); err != nil { + return nil, err + } + + // TODO(stevvooe): A lot of the code below is repeated in + // ParseJWS. It will require more refactoring to fix that. + jsig := jsSignature{ + Header: jsHeader{ + Algorithm: parsedJSig.Header.Algorithm, + }, + Signature: parsedJSig.Signature, + Protected: parsedJSig.Protected, + } + + if parsedJSig.Header.Chain != nil { + jsig.Header.Chain = parsedJSig.Header.Chain + } + + if parsedJSig.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) + if err != nil { + return nil, err + } + jsig.Header.JWK = publicKey + } + + js.signatures = append(js.signatures, jsig) + } + } + + return js, nil +} + +// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or +// struct. JWS will need to be signed before serializing or storing. +func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { + switch content.(type) { + case map[string]interface{}: + case struct{}: + default: + return nil, errors.New("invalid data type") + } + + js := newJSONSignature() + js.indent = " " + + payload, err := json.MarshalIndent(content, "", js.indent) + if err != nil { + return nil, err + } + js.payload = joseBase64UrlEncode(payload) + + // Remove '\n}' from formatted section, put in protected header + js.formatLength = len(payload) - 2 + js.formatTail = payload[js.formatLength:] + + return js, nil +} + +func readIntFromMap(key string, m map[string]interface{}) (int, bool) { + value, ok := m[key] + if !ok { + return 0, false + } + switch v := value.(type) { + case int: + return v, true + case float64: + return int(v), true + default: + return 0, false + } +} + +func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { + value, ok := m[key] + if !ok { + return "", false + } + v, ok = value.(string) + return +} + +// ParsePrettySignature parses a formatted signature into a +// JSON signature. If the signatures are missing the format information +// an error is thrown. The formatted signature must be created by +// the same method as format signature. +func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { + var contentMap map[string]json.RawMessage + err := json.Unmarshal(content, &contentMap) + if err != nil { + return nil, fmt.Errorf("error unmarshalling content: %s", err) + } + sigMessage, ok := contentMap[signatureKey] + if !ok { + return nil, ErrMissingSignatureKey + } + + var signatureBlocks []jsParsedSignature + err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) + if err != nil { + return nil, fmt.Errorf("error unmarshalling signatures: %s", err) + } + + js := newJSONSignature() + js.signatures = make([]jsSignature, len(signatureBlocks)) + + for i, signatureBlock := range signatureBlocks { + protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) + if err != nil { + return nil, fmt.Errorf("base64 decode error: %s", err) + } + var protectedHeader map[string]interface{} + err = json.Unmarshal(protectedBytes, &protectedHeader) + if err != nil { + return nil, fmt.Errorf("error unmarshalling protected header: %s", err) + } + + formatLength, ok := readIntFromMap("formatLength", protectedHeader) + if !ok { + return nil, errors.New("missing formatted length") + } + encodedTail, ok := readStringFromMap("formatTail", protectedHeader) + if !ok { + return nil, errors.New("missing formatted tail") + } + formatTail, err := joseBase64UrlDecode(encodedTail) + if err != nil { + return nil, fmt.Errorf("base64 decode error on tail: %s", err) + } + if js.formatLength == 0 { + js.formatLength = formatLength + } else if js.formatLength != formatLength { + return nil, errors.New("conflicting format length") + } + if len(js.formatTail) == 0 { + js.formatTail = formatTail + } else if bytes.Compare(js.formatTail, formatTail) != 0 { + return nil, errors.New("conflicting format tail") + } + + header := jsHeader{ + Algorithm: signatureBlock.Header.Algorithm, + Chain: signatureBlock.Header.Chain, + } + if signatureBlock.Header.JWK != nil { + publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) + if err != nil { + return nil, fmt.Errorf("error unmarshalling public key: %s", err) + } + header.JWK = publicKey + } + js.signatures[i] = jsSignature{ + Header: header, + Signature: signatureBlock.Signature, + Protected: signatureBlock.Protected, + } + } + if js.formatLength > len(content) { + return nil, errors.New("invalid format length") + } + formatted := make([]byte, js.formatLength+len(js.formatTail)) + copy(formatted, content[:js.formatLength]) + copy(formatted[js.formatLength:], js.formatTail) + js.indent = detectJSONIndent(formatted) + js.payload = joseBase64UrlEncode(formatted) + + return js, nil +} + +// PrettySignature formats a json signature into an easy to read +// single json serialized object. +func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { + if len(js.signatures) == 0 { + return nil, errors.New("no signatures") + } + payload, err := joseBase64UrlDecode(js.payload) + if err != nil { + return nil, err + } + payload = payload[:js.formatLength] + + sort.Sort(jsSignaturesSorted(js.signatures)) + + var marshalled []byte + var marshallErr error + if js.indent != "" { + marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) + } else { + marshalled, marshallErr = json.Marshal(js.signatures) + } + if marshallErr != nil { + return nil, marshallErr + } + + buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) + buf.Write(payload) + buf.WriteByte(',') + if js.indent != "" { + buf.WriteByte('\n') + buf.WriteString(js.indent) + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\": ") + buf.Write(marshalled) + buf.WriteByte('\n') + } else { + buf.WriteByte('"') + buf.WriteString(signatureKey) + buf.WriteString("\":") + buf.Write(marshalled) + } + buf.WriteByte('}') + + return buf.Bytes(), nil +} + +// Signatures provides the signatures on this JWS as opaque blobs, sorted by +// keyID. These blobs can be stored and reassembled with payloads. Internally, +// they are simply marshaled json web signatures but implementations should +// not rely on this. +func (js *JSONSignature) Signatures() ([][]byte, error) { + sort.Sort(jsSignaturesSorted(js.signatures)) + + var sb [][]byte + for _, jsig := range js.signatures { + p, err := json.Marshal(jsig) + if err != nil { + return nil, err + } + + sb = append(sb, p) + } + + return sb, nil +} + +// Merge combines the signatures from one or more other signatures into the +// method receiver. If the payloads differ for any argument, an error will be +// returned and the receiver will not be modified. +func (js *JSONSignature) Merge(others ...*JSONSignature) error { + merged := js.signatures + for _, other := range others { + if js.payload != other.payload { + return fmt.Errorf("payloads differ from merge target") + } + merged = append(merged, other.signatures...) + } + + js.signatures = merged + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go new file mode 100644 index 000000000..b4f269798 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/jsonsign_test.go @@ -0,0 +1,380 @@ +package libtrust + +import ( + "bytes" + "crypto/rand" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "testing" + + "github.com/docker/libtrust/testutil" +) + +func createTestJSON(sigKey string, indent string) (map[string]interface{}, []byte) { + testMap := map[string]interface{}{ + "name": "dmcgowan/mycontainer", + "config": map[string]interface{}{ + "ports": []int{9101, 9102}, + "run": "/bin/echo \"Hello\"", + }, + "layers": []string{ + "2893c080-27f5-11e4-8c21-0800200c9a66", + "c54bc25b-fbb2-497b-a899-a8bc1b5b9d55", + "4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4", + "0b6da891-7f7f-4abf-9c97-7887549e696c", + "1d960389-ae4f-4011-85fd-18d0f96a67ad", + }, + } + formattedSection := `{"config":{"ports":[9101,9102],"run":"/bin/echo \"Hello\""},"layers":["2893c080-27f5-11e4-8c21-0800200c9a66","c54bc25b-fbb2-497b-a899-a8bc1b5b9d55","4d5d7e03-f908-49f3-a7f6-9ba28dfe0fb4","0b6da891-7f7f-4abf-9c97-7887549e696c","1d960389-ae4f-4011-85fd-18d0f96a67ad"],"name":"dmcgowan/mycontainer","%s":[{"header":{` + formattedSection = fmt.Sprintf(formattedSection, sigKey) + if indent != "" { + buf := bytes.NewBuffer(nil) + json.Indent(buf, []byte(formattedSection), "", indent) + return testMap, buf.Bytes() + } + return testMap, []byte(formattedSection) + +} + +func TestSignJSON(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + +} + +func TestSignMap(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, _ := createTestJSON("buildSignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + keys, err := js.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func TestFormattedJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", " ") + indented, err := json.MarshalIndent(testMap, "", " ") + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(indented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing content: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } + + var unmarshalled map[string]interface{} + err = json.Unmarshal(b, &unmarshalled) + if err != nil { + t.Fatalf("Could not unmarshall after parse: %s", err) + } + +} + +func TestFormattedFlatJson(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating EC key: %s", err) + } + + testMap, firstSection := createTestJSON("buildSignatures", "") + unindented, err := json.Marshal(testMap) + if err != nil { + t.Fatalf("Marshall error: %s", err) + } + + js, err := NewJSONSignature(unindented) + if err != nil { + t.Fatalf("Error creating JSON signature: %s", err) + } + err = js.Sign(key) + if err != nil { + t.Fatalf("Error signing JSON signature: %s", err) + } + + b, err := js.PrettySignature("buildSignatures") + if err != nil { + t.Fatalf("Error signing map: %s", err) + } + + if bytes.Compare(b[:len(firstSection)], firstSection) != 0 { + t.Fatalf("Wrong signed value\nExpected:\n%s\nActual:\n%s", firstSection, b[:len(firstSection)]) + } + + parsed, err := ParsePrettySignature(b, "buildSignatures") + if err != nil { + t.Fatalf("Error parsing formatted signature: %s", err) + } + + keys, err := parsed.Verify() + if err != nil { + t.Fatalf("Error verifying signature: %s", err) + } + if len(keys) != 1 { + t.Fatalf("Error wrong number of keys returned") + } + if keys[0].KeyID() != key.KeyID() { + t.Fatalf("Unexpected public key returned") + } +} + +func generateTrustChain(t *testing.T, key PrivateKey, ca *x509.Certificate) (PrivateKey, []*x509.Certificate) { + parent := ca + parentKey := key + chain := make([]*x509.Certificate, 6) + for i := 5; i > 0; i-- { + intermediatekey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + return trustKey, chain +} + +func TestChainVerify(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err != nil { + t.Fatalf("Error verifying content: %s", err) + } + if len(chains) != 1 { + t.Fatalf("Unexpected chains length: %d", len(chains)) + } + if len(chains[0]) != 7 { + t.Fatalf("Unexpected chain length: %d", len(chains[0])) + } +} + +func TestInvalidChain(t *testing.T) { + caKey, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + trustKey, chain := generateTrustChain(t, caKey, ca) + + testMap, _ := createTestJSON("verifySignatures", " ") + js, err := NewJSONSignatureFromMap(testMap) + if err != nil { + t.Fatalf("Error creating JSONSignature from map: %s", err) + } + + err = js.SignWithChain(trustKey, chain[:5]) + if err != nil { + t.Fatalf("Error signing with chain: %s", err) + } + + pool := x509.NewCertPool() + pool.AddCert(ca) + chains, err := js.VerifyChains(pool) + if err == nil { + t.Fatalf("Expected error verifying with bad chain") + } + if len(chains) != 0 { + t.Fatalf("Unexpected chains returned from invalid verify") + } +} + +func TestMergeSignatures(t *testing.T) { + pk1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key 1: %v", err) + } + + pk2, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("unexpected error generating private key 2: %v", err) + } + + payload := make([]byte, 1<<10) + if _, err = io.ReadFull(rand.Reader, payload); err != nil { + t.Fatalf("error generating payload: %v", err) + } + + payload, _ = json.Marshal(map[string]interface{}{"data": payload}) + + sig1, err := NewJSONSignature(payload) + if err != nil { + t.Fatalf("unexpected error creating signature 1: %v", err) + } + + if err := sig1.Sign(pk1); err != nil { + t.Fatalf("unexpected error signing with pk1: %v", err) + } + + sig2, err := NewJSONSignature(payload) + if err != nil { + t.Fatalf("unexpected error creating signature 2: %v", err) + } + + if err := sig2.Sign(pk2); err != nil { + t.Fatalf("unexpected error signing with pk2: %v", err) + } + + // Now, we actually merge into sig1 + if err := sig1.Merge(sig2); err != nil { + t.Fatalf("unexpected error merging: %v", err) + } + + // Verify the new signature package + pubkeys, err := sig1.Verify() + if err != nil { + t.Fatalf("unexpected error during verify: %v", err) + } + + // Make sure the pubkeys match the two private keys from before + privkeys := map[string]PrivateKey{ + pk1.KeyID(): pk1, + pk2.KeyID(): pk2, + } + + found := map[string]struct{}{} + + for _, pubkey := range pubkeys { + if _, ok := privkeys[pubkey.KeyID()]; !ok { + t.Fatalf("unexpected public key found during verification: %v", pubkey) + } + + found[pubkey.KeyID()] = struct{}{} + } + + // Make sure we've found all the private keys from verification + for keyid, _ := range privkeys { + if _, ok := found[keyid]; !ok { + t.Fatalf("public key %v not found during verification", keyid) + } + } + + // Create another signature, with a different payload, and ensure we get an error. + sig3, err := NewJSONSignature([]byte("{}")) + if err != nil { + t.Fatalf("unexpected error making signature for sig3: %v", err) + } + + if err := sig1.Merge(sig3); err == nil { + t.Fatalf("error expected during invalid merge with different payload") + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key.go b/Godeps/_workspace/src/github.com/docker/libtrust/key.go new file mode 100644 index 000000000..73642db2a --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key.go @@ -0,0 +1,253 @@ +package libtrust + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" +) + +// PublicKey is a generic interface for a Public Key. +type PublicKey interface { + // KeyType returns the key type for this key. For elliptic curve keys, + // this value should be "EC". For RSA keys, this value should be "RSA". + KeyType() string + // KeyID returns a distinct identifier which is unique to this Public Key. + // The format generated by this library is a base32 encoding of a 240 bit + // hash of the public key data divided into 12 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + KeyID() string + // Verify verifyies the signature of the data in the io.Reader using this + // Public Key. The alg parameter should identify the digital signature + // algorithm which was used to produce the signature and should be + // supported by this public key. Returns a nil error if the signature + // is valid. + Verify(data io.Reader, alg string, signature []byte) error + // CryptoPublicKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The type + // is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPublicKey() crypto.PublicKey + // These public keys can be serialized to the standard JSON encoding for + // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web + // Algorithms. + MarshalJSON() ([]byte, error) + // These keys can also be serialized to the standard PEM encoding. + PEMBlock() (*pem.Block, error) + // The string representation of a key is its key type and ID. + String() string + AddExtendedField(string, interface{}) + GetExtendedField(string) interface{} +} + +// PrivateKey is a generic interface for a Private Key. +type PrivateKey interface { + // A PrivateKey contains all fields and methods of a PublicKey of the + // same type. The MarshalJSON method also outputs the private key as a + // JSON Web Key, and the PEMBlock method outputs the private key as a + // PEM block. + PublicKey + // PublicKey returns the PublicKey associated with this PrivateKey. + PublicKey() PublicKey + // Sign signs the data read from the io.Reader using a signature algorithm + // supported by the private key. If the specified hashing algorithm is + // supported by this key, that hash function is used to generate the + // signature otherwise the the default hashing algorithm for this key is + // used. Returns the signature and identifier of the algorithm used. + Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) + // CryptoPrivateKey returns the internal object which can be used as a + // crypto.PublicKey for use with other standard library operations. The + // type is either *rsa.PublicKey or *ecdsa.PublicKey + CryptoPrivateKey() crypto.PrivateKey +} + +// FromCryptoPublicKey returns a libtrust PublicKey representation of the given +// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { + switch cryptoPublicKey := cryptoPublicKey.(type) { + case *ecdsa.PublicKey: + return fromECPublicKey(cryptoPublicKey) + case *rsa.PublicKey: + return fromRSAPublicKey(cryptoPublicKey), nil + default: + return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) + } +} + +// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given +// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given +// key is of an unsupported type. +func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { + switch cryptoPrivateKey := cryptoPrivateKey.(type) { + case *ecdsa.PrivateKey: + return fromECPrivateKey(cryptoPrivateKey) + case *rsa.PrivateKey: + return fromRSAPrivateKey(cryptoPrivateKey), nil + default: + return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) + } +} + +// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust +// PublicKey or an error if there is a problem with the encoding. +func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + return pubKeyFromPEMBlock(pemBlock) +} + +// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of +// PEM blocks appended one after the other and returns a slice of PublicKey +// objects that it finds. +func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { + pubKeys := []PublicKey{} + + for { + var pemBlock *pem.Block + pemBlock, data = pem.Decode(data) + if pemBlock == nil { + break + } else if pemBlock.Type != "PUBLIC KEY" { + return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) + } + + pubKey, err := pubKeyFromPEMBlock(pemBlock) + if err != nil { + return nil, err + } + + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust +// PrivateKey or an error if there is a problem with the encoding. +func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { + pemBlock, _ := pem.Decode(data) + if pemBlock == nil { + return nil, errors.New("unable to find PEM encoded data") + } + + var key PrivateKey + + switch { + case pemBlock.Type == "RSA PRIVATE KEY": + rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) + } + key = fromRSAPrivateKey(rsaPrivateKey) + case pemBlock.Type == "EC PRIVATE KEY": + ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) + } + key, err = fromECPrivateKey(ecPrivateKey) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) + } + + addPEMHeadersToKey(pemBlock, key.PublicKey()) + + return key, nil +} + +// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic +// Public Key to be used with libtrust. +func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Public Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Public Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC public key. + return ecPublicKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA public key. + return rsaPublicKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Public Key type not supported: %q\n", kty, + ) + } +} + +// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set +// and returns a slice of Public Key objects. +func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { + rawKeys, err := loadJSONKeySetRaw(data) + if err != nil { + return nil, err + } + + pubKeys := make([]PublicKey, 0, len(rawKeys)) + + for _, rawKey := range rawKeys { + pubKey, err := UnmarshalPublicKeyJWK(rawKey) + if err != nil { + return nil, err + } + pubKeys = append(pubKeys, pubKey) + } + + return pubKeys, nil +} + +// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic +// Private Key to be used with libtrust. +func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { + jwk := make(map[string]interface{}) + + err := json.Unmarshal(data, &jwk) + if err != nil { + return nil, fmt.Errorf( + "decoding JWK Private Key JSON data: %s\n", err, + ) + } + + // Get the Key Type value. + kty, err := stringFromMap(jwk, "kty") + if err != nil { + return nil, fmt.Errorf("JWK Private Key type: %s", err) + } + + switch { + case kty == "EC": + // Call out to unmarshal EC private key. + return ecPrivateKeyFromMap(jwk) + case kty == "RSA": + // Call out to unmarshal RSA private key. + return rsaPrivateKeyFromMap(jwk) + default: + return nil, fmt.Errorf( + "JWK Private Key type not supported: %q\n", kty, + ) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go new file mode 100644 index 000000000..c526de545 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files.go @@ -0,0 +1,255 @@ +package libtrust + +import ( + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + "strings" +) + +var ( + // ErrKeyFileDoesNotExist indicates that the private key file does not exist. + ErrKeyFileDoesNotExist = errors.New("key file does not exist") +) + +func readKeyFileBytes(filename string) ([]byte, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + err = ErrKeyFileDoesNotExist + } else { + err = fmt.Errorf("unable to read key file %s: %s", filename, err) + } + + return nil, err + } + + return data, nil +} + +/* + Loading and Saving of Public and Private Keys in either PEM or JWK format. +*/ + +// LoadKeyFile opens the given filename and attempts to read a Private Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadKeyFile(filename string) (PrivateKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PrivateKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPrivateKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key JWK: %s", err) + } + } else { + key, err = UnmarshalPrivateKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode private key PEM: %s", err) + } + } + + return key, nil +} + +// LoadPublicKeyFile opens the given filename and attempts to read a Public Key +// encoded in either PEM or JWK format (if .json or .jwk file extension). +func LoadPublicKeyFile(filename string) (PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil { + return nil, err + } + + var key PublicKey + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + key, err = UnmarshalPublicKeyJWK(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key JWK: %s", err) + } + } else { + key, err = UnmarshalPublicKeyPEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to decode public key PEM: %s", err) + } + } + + return key, nil +} + +// SaveKey saves the given key to a file using the provided filename. +// This process will overwrite any existing file at the provided location. +func SaveKey(filename string, key PrivateKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode private key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) + if err != nil { + return fmt.Errorf("unable to write private key file %s: %s", filename, err) + } + + return nil +} + +// SavePublicKey saves the given public key to the file. +func SavePublicKey(filename string, key PublicKey) error { + var encodedKey []byte + var err error + + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + // Encode in JSON Web Key format. + encodedKey, err = json.MarshalIndent(key, "", " ") + if err != nil { + return fmt.Errorf("unable to encode public key JWK: %s", err) + } + } else { + // Encode in PEM format. + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encode public key PEM: %s", err) + } + encodedKey = pem.EncodeToMemory(pemBlock) + } + + err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write public key file %s: %s", filename, err) + } + + return nil +} + +// Public Key Set files + +type jwkSet struct { + Keys []json.RawMessage `json:"keys"` +} + +// LoadKeySetFile loads a key set +func LoadKeySetFile(filename string) ([]PublicKey, error) { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return loadJSONKeySetFile(filename) + } + + // Must be a PEM format file + return loadPEMKeySetFile(filename) +} + +func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { + if len(data) == 0 { + // This is okay, just return an empty slice. + return []json.RawMessage{}, nil + } + + keySet := jwkSet{} + + err := json.Unmarshal(data, &keySet) + if err != nil { + return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) + } + + return keySet.Keys, nil +} + +func loadJSONKeySetFile(filename string) ([]PublicKey, error) { + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyJWKSet(contents) +} + +func loadPEMKeySetFile(filename string) ([]PublicKey, error) { + data, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return nil, err + } + + return UnmarshalPublicKeyPEMBundle(data) +} + +// AddKeySetFile adds a key to a key set +func AddKeySetFile(filename string, key PublicKey) error { + if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { + return addKeySetJSONFile(filename, key) + } + + // Must be a PEM format file + return addKeySetPEMFile(filename, key) +} + +func addKeySetJSONFile(filename string, key PublicKey) error { + encodedKey, err := json.Marshal(key) + if err != nil { + return fmt.Errorf("unable to encode trusted client key: %s", err) + } + + contents, err := readKeyFileBytes(filename) + if err != nil && err != ErrKeyFileDoesNotExist { + return err + } + + rawEntries, err := loadJSONKeySetRaw(contents) + if err != nil { + return err + } + + rawEntries = append(rawEntries, json.RawMessage(encodedKey)) + entriesWrapper := jwkSet{Keys: rawEntries} + + encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") + if err != nil { + return fmt.Errorf("unable to encode trusted client keys: %s", err) + } + + err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) + } + + return nil +} + +func addKeySetPEMFile(filename string, key PublicKey) error { + // Encode to PEM, open file for appending, write PEM. + file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) + if err != nil { + return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) + } + defer file.Close() + + pemBlock, err := key.PEMBlock() + if err != nil { + return fmt.Errorf("unable to encoded trusted key: %s", err) + } + + _, err = file.Write(pem.EncodeToMemory(pemBlock)) + if err != nil { + return fmt.Errorf("unable to write trusted keys file: %s", err) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go new file mode 100644 index 000000000..57e691f2e --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_files_test.go @@ -0,0 +1,220 @@ +package libtrust + +import ( + "errors" + "io/ioutil" + "os" + "testing" +) + +func makeTempFile(t *testing.T, prefix string) (filename string) { + file, err := ioutil.TempFile("", prefix) + if err != nil { + t.Fatal(err) + } + + filename = file.Name() + file.Close() + + return +} + +func TestKeyFiles(t *testing.T) { + key, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) + + key, err = GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + testKeyFiles(t, key) +} + +func testKeyFiles(t *testing.T, key PrivateKey) { + var err error + + privateKeyFilename := makeTempFile(t, "private_key") + privateKeyFilenamePEM := privateKeyFilename + ".pem" + privateKeyFilenameJWK := privateKeyFilename + ".jwk" + + publicKeyFilename := makeTempFile(t, "public_key") + publicKeyFilenamePEM := publicKeyFilename + ".pem" + publicKeyFilenameJWK := publicKeyFilename + ".jwk" + + if err = SaveKey(privateKeyFilenamePEM, key); err != nil { + t.Fatal(err) + } + + if err = SaveKey(privateKeyFilenameJWK, key); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenamePEM, key.PublicKey()); err != nil { + t.Fatal(err) + } + + if err = SavePublicKey(publicKeyFilenameJWK, key.PublicKey()); err != nil { + t.Fatal(err) + } + + loadedPEMKey, err := LoadKeyFile(privateKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKKey, err := LoadKeyFile(privateKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + loadedPEMPublicKey, err := LoadPublicKeyFile(publicKeyFilenamePEM) + if err != nil { + t.Fatal(err) + } + + loadedJWKPublicKey, err := LoadPublicKeyFile(publicKeyFilenameJWK) + if err != nil { + t.Fatal(err) + } + + if key.KeyID() != loadedPEMKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedPEMPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + if key.KeyID() != loadedJWKPublicKey.KeyID() { + t.Fatal(errors.New("key IDs do not match")) + } + + os.Remove(privateKeyFilename) + os.Remove(privateKeyFilenamePEM) + os.Remove(privateKeyFilenameJWK) + os.Remove(publicKeyFilename) + os.Remove(publicKeyFilenamePEM) + os.Remove(publicKeyFilenameJWK) +} + +func TestTrustedHostKeysFile(t *testing.T) { + trustedHostKeysFilename := makeTempFile(t, "trusted_host_keys") + trustedHostKeysFilenamePEM := trustedHostKeysFilename + ".pem" + trustedHostKeysFilenameJWK := trustedHostKeysFilename + ".json" + + testTrustedHostKeysFile(t, trustedHostKeysFilenamePEM) + testTrustedHostKeysFile(t, trustedHostKeysFilenameJWK) + + os.Remove(trustedHostKeysFilename) + os.Remove(trustedHostKeysFilenamePEM) + os.Remove(trustedHostKeysFilenameJWK) +} + +func testTrustedHostKeysFile(t *testing.T, trustedHostKeysFilename string) { + hostAddress1 := "docker.example.com:2376" + hostKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey1.AddExtendedField("hosts", []string{hostAddress1}) + err = AddKeySetFile(trustedHostKeysFilename, hostKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err := LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %d\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + + hostAddress2 := "192.168.59.103:2376" + hostKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + hostKey2.AddExtendedField("hosts", hostAddress2) + err = AddKeySetFile(trustedHostKeysFilename, hostKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedHostKeysMapping, err = LoadKeySetFile(trustedHostKeysFilename) + if err != nil { + t.Fatal(err) + } + + for addr, hostKey := range trustedHostKeysMapping { + t.Logf("Host Address: %d\n", addr) + t.Logf("Host Key: %s\n\n", hostKey) + } + +} + +func TestTrustedClientKeysFile(t *testing.T) { + trustedClientKeysFilename := makeTempFile(t, "trusted_client_keys") + trustedClientKeysFilenamePEM := trustedClientKeysFilename + ".pem" + trustedClientKeysFilenameJWK := trustedClientKeysFilename + ".json" + + testTrustedClientKeysFile(t, trustedClientKeysFilenamePEM) + testTrustedClientKeysFile(t, trustedClientKeysFilenameJWK) + + os.Remove(trustedClientKeysFilename) + os.Remove(trustedClientKeysFilenamePEM) + os.Remove(trustedClientKeysFilenameJWK) +} + +func testTrustedClientKeysFile(t *testing.T, trustedClientKeysFilename string) { + clientKey1, err := GenerateECP256PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey1.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err := LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } + + clientKey2, err := GenerateRSA2048PrivateKey() + if err != nil { + t.Fatal(err) + } + + err = AddKeySetFile(trustedClientKeysFilename, clientKey2.PublicKey()) + if err != nil { + t.Fatal(err) + } + + trustedClientKeys, err = LoadKeySetFile(trustedClientKeysFilename) + if err != nil { + t.Fatal(err) + } + + for _, clientKey := range trustedClientKeys { + t.Logf("Client Key: %s\n", clientKey) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go new file mode 100644 index 000000000..9a98ae357 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_manager.go @@ -0,0 +1,175 @@ +package libtrust + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "sync" +) + +// ClientKeyManager manages client keys on the filesystem +type ClientKeyManager struct { + key PrivateKey + clientFile string + clientDir string + + clientLock sync.RWMutex + clients []PublicKey + + configLock sync.Mutex + configs []*tls.Config +} + +// NewClientKeyManager loads a new manager from a set of key files +// and managed by the given private key. +func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { + m := &ClientKeyManager{ + key: trustKey, + clientFile: clientFile, + clientDir: clientDir, + } + if err := m.loadKeys(); err != nil { + return nil, err + } + // TODO Start watching file and directory + + return m, nil +} + +func (c *ClientKeyManager) loadKeys() (err error) { + // Load authorized keys file + var clients []PublicKey + if c.clientFile != "" { + clients, err = LoadKeySetFile(c.clientFile) + if err != nil { + return fmt.Errorf("unable to load authorized keys: %s", err) + } + } + + // Add clients from authorized keys directory + files, err := ioutil.ReadDir(c.clientDir) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unable to open authorized keys directory: %s", err) + } + for _, f := range files { + if !f.IsDir() { + publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) + if err != nil { + return fmt.Errorf("unable to load authorized key file: %s", err) + } + clients = append(clients, publicKey) + } + } + + c.clientLock.Lock() + c.clients = clients + c.clientLock.Unlock() + + return nil +} + +// RegisterTLSConfig registers a tls configuration to manager +// such that any changes to the keys may be reflected in +// the tls client CA pool +func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { + c.clientLock.RLock() + certPool, err := GenerateCACertPool(c.key, c.clients) + if err != nil { + return fmt.Errorf("CA pool generation error: %s", err) + } + c.clientLock.RUnlock() + + tlsConfig.ClientCAs = certPool + + c.configLock.Lock() + c.configs = append(c.configs, tlsConfig) + c.configLock.Unlock() + + return nil +} + +// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for +// libtrust identity authentication for the domain specified +func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + if err := clients.RegisterTLSConfig(tlsConfig); err != nil { + return nil, err + } + + // Generate cert + ips, domains, err := parseAddr(addr) + if err != nil { + return nil, err + } + // add domain that it expects clients to use + domains = append(domains, domain) + x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + return tlsConfig, nil +} + +// NewCertAuthTLSConfig creates a tls.Config for the server to use for +// certificate authentication +func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) + } + tlsConfig.Certificates = []tls.Certificate{cert} + + // Verify client certificates against a CA? + if caPath != "" { + certPool := x509.NewCertPool() + file, err := ioutil.ReadFile(caPath) + if err != nil { + return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) + } + certPool.AppendCertsFromPEM(file) + + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + tlsConfig.ClientCAs = certPool + } + + return tlsConfig, nil +} + +func newTLSConfig() *tls.Config { + return &tls.Config{ + NextProtos: []string{"http/1.1"}, + // Avoid fallback on insecure SSL protocols + MinVersion: tls.VersionTLS10, + } +} + +// parseAddr parses an address into an array of IPs and domains +func parseAddr(addr string) ([]net.IP, []string, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, nil, err + } + var domains []string + var ips []net.IP + ip := net.ParseIP(host) + if ip != nil { + ips = []net.IP{ip} + } else { + domains = []string{host} + } + return ips, domains, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go new file mode 100644 index 000000000..f6c59cc42 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/key_test.go @@ -0,0 +1,80 @@ +package libtrust + +import ( + "testing" +) + +type generateFunc func() (PrivateKey, error) + +func runGenerateBench(b *testing.B, f generateFunc, name string) { + for i := 0; i < b.N; i++ { + _, err := f() + if err != nil { + b.Fatalf("Error generating %s: %s", name, err) + } + } +} + +func runFingerprintBench(b *testing.B, f generateFunc, name string) { + b.StopTimer() + // Don't count this relatively slow generation call. + key, err := f() + if err != nil { + b.Fatalf("Error generating %s: %s", name, err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if key.KeyID() == "" { + b.Fatalf("Error generating key ID for %s", name) + } + } +} + +func BenchmarkECP256Generate(b *testing.B) { + runGenerateBench(b, GenerateECP256PrivateKey, "P256") +} + +func BenchmarkECP384Generate(b *testing.B) { + runGenerateBench(b, GenerateECP384PrivateKey, "P384") +} + +func BenchmarkECP521Generate(b *testing.B) { + runGenerateBench(b, GenerateECP521PrivateKey, "P521") +} + +func BenchmarkRSA2048Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA2048PrivateKey, "RSA2048") +} + +func BenchmarkRSA3072Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA3072PrivateKey, "RSA3072") +} + +func BenchmarkRSA4096Generate(b *testing.B) { + runGenerateBench(b, GenerateRSA4096PrivateKey, "RSA4096") +} + +func BenchmarkECP256Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP256PrivateKey, "P256") +} + +func BenchmarkECP384Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP384PrivateKey, "P384") +} + +func BenchmarkECP521Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateECP521PrivateKey, "P521") +} + +func BenchmarkRSA2048Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA2048PrivateKey, "RSA2048") +} + +func BenchmarkRSA3072Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA3072PrivateKey, "RSA3072") +} + +func BenchmarkRSA4096Fingerprint(b *testing.B) { + runFingerprintBench(b, GenerateRSA4096PrivateKey, "RSA4096") +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go new file mode 100644 index 000000000..dac4cacf2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key.go @@ -0,0 +1,427 @@ +package libtrust + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/json" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" +) + +/* + * RSA DSA PUBLIC KEY + */ + +// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. +type rsaPublicKey struct { + *rsa.PublicKey + extended map[string]interface{} +} + +func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { + return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} +} + +// KeyType returns the JWK key type for RSA keys, i.e., "RSA". +func (k *rsaPublicKey) KeyType() string { + return "RSA" +} + +// KeyID returns a distinct identifier which is unique to this Public Key. +func (k *rsaPublicKey) KeyID() string { + return keyIDFromCryptoKey(k) +} + +func (k *rsaPublicKey) String() string { + return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) +} + +// Verify verifyies the signature of the data in the io.Reader using this Public Key. +// The alg parameter should be the name of the JWA digital signature algorithm +// which was used to produce the signature and should be supported by this +// public key. Returns a nil error if the signature is valid. +func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { + // Verify the signature of the given date, return non-nil error if valid. + sigAlg, err := rsaSignatureAlgorithmByName(alg) + if err != nil { + return fmt.Errorf("unable to verify Signature: %s", err) + } + + hasher := sigAlg.HashID().New() + _, err = io.Copy(hasher, data) + if err != nil { + return fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) + if err != nil { + return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) + } + + return nil +} + +// CryptoPublicKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return k.PublicKey +} + +func (k *rsaPublicKey) toMap() map[string]interface{} { + jwk := make(map[string]interface{}) + for k, v := range k.extended { + jwk[k] = v + } + jwk["kty"] = k.KeyType() + jwk["kid"] = k.KeyID() + jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) + jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) + + return jwk +} + +// MarshalJSON serializes this Public Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Public Key to DER-encoded PKIX format. +func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { + derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) + if err != nil { + return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) + } + k.extended["kid"] = k.KeyID() // For display purposes. + return createPemBlock("PUBLIC KEY", derBytes, k.extended) +} + +func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { + k.extended[field] = value +} + +func (k *rsaPublicKey) GetExtendedField(field string) interface{} { + v, ok := k.extended[field] + if !ok { + return nil + } + return v +} + +func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract 'n', 'e', and 'kid' and check for + // consistency. + + // Get the modulus parameter N. + nB64Url, err := stringFromMap(jwk, "n") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + n, err := parseRSAModulusParam(nB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) + } + + // Get the public exponent E. + eB64Url, err := stringFromMap(jwk, "e") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + e, err := parseRSAPublicExponentParam(eB64Url) + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) + } + + key := &rsaPublicKey{ + PublicKey: &rsa.PublicKey{N: n, E: e}, + } + + // Key ID is optional, but if it exists, it should match the key. + _, ok := jwk["kid"] + if ok { + kid, err := stringFromMap(jwk, "kid") + if err != nil { + return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) + } + if kid != key.KeyID() { + return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) + } + } + + if _, ok := jwk["d"]; ok { + return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") + } + + key.extended = jwk + + return key, nil +} + +/* + * RSA DSA PRIVATE KEY + */ + +// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. +type rsaPrivateKey struct { + rsaPublicKey + *rsa.PrivateKey +} + +func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { + return &rsaPrivateKey{ + *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), + cryptoPrivateKey, + } +} + +// PublicKey returns the Public Key data associated with this Private Key. +func (k *rsaPrivateKey) PublicKey() PublicKey { + return &k.rsaPublicKey +} + +func (k *rsaPrivateKey) String() string { + return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) +} + +// Sign signs the data read from the io.Reader using a signature algorithm supported +// by the RSA private key. If the specified hashing algorithm is supported by +// this key, that hash function is used to generate the signature otherwise the +// the default hashing algorithm for this key is used. Returns the signature +// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", +// "RS512". +func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { + // Generate a signature of the data using the internal alg. + sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) + hasher := sigAlg.HashID().New() + + _, err = io.Copy(hasher, data) + if err != nil { + return nil, "", fmt.Errorf("error reading data to sign: %s", err) + } + hash := hasher.Sum(nil) + + signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) + if err != nil { + return nil, "", fmt.Errorf("error producing signature: %s", err) + } + + alg = sigAlg.HeaderParam() + + return +} + +// CryptoPrivateKey returns the internal object which can be used as a +// crypto.PublicKey for use with other standard library operations. The type +// is either *rsa.PublicKey or *ecdsa.PublicKey +func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { + return k.PrivateKey +} + +func (k *rsaPrivateKey) toMap() map[string]interface{} { + k.Precompute() // Make sure the precomputed values are stored. + jwk := k.rsaPublicKey.toMap() + + jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) + jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) + jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) + jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) + jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) + jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) + + otherPrimes := k.Primes[2:] + + if len(otherPrimes) > 0 { + otherPrimesInfo := make([]interface{}, len(otherPrimes)) + for i, r := range otherPrimes { + otherPrimeInfo := make(map[string]string, 3) + otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) + crtVal := k.Precomputed.CRTValues[i] + otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) + otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) + otherPrimesInfo[i] = otherPrimeInfo + } + jwk["oth"] = otherPrimesInfo + } + + return jwk +} + +// MarshalJSON serializes this Private Key using the JWK JSON serialization format for +// RSA keys. +func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { + return json.Marshal(k.toMap()) +} + +// PEMBlock serializes this Private Key to DER-encoded PKIX format. +func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { + derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) + k.extended["keyID"] = k.KeyID() // For display purposes. + return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) +} + +func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { + // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that + // only the private key exponent 'd' is REQUIRED, the others are just for + // signature/decryption optimizations and SHOULD be included when the JWK + // is produced. We MAY choose to accept a JWK which only includes 'd', but + // we're going to go ahead and not choose to accept it without the extra + // fields. Only the 'oth' field will be optional (for multi-prime keys). + privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) + } + firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + var oth interface{} + if _, ok := jwk["oth"]; ok { + oth = jwk["oth"] + delete(jwk, "oth") + } + + // JWK key type (kty) has already been determined to be "RSA". + // Need to extract the public key information, then extract the private + // key values. + publicKey, err := rsaPublicKeyFromMap(jwk) + if err != nil { + return nil, err + } + + privateKey := &rsa.PrivateKey{ + PublicKey: *publicKey.PublicKey, + D: privateExponent, + Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, + Precomputed: rsa.PrecomputedValues{ + Dp: firstFactorCRT, + Dq: secondFactorCRT, + Qinv: crtCoeff, + }, + } + + if oth != nil { + // Should be an array of more JSON objects. + otherPrimesInfo, ok := oth.([]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") + } + numOtherPrimeFactors := len(otherPrimesInfo) + if numOtherPrimeFactors == 0 { + return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") + } + otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) + productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) + crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) + + for i, val := range otherPrimesInfo { + otherPrimeinfo, ok := val.(map[string]interface{}) + if !ok { + return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") + } + + otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) + } + otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) + } + otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") + if err != nil { + return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) + } + + crtValue := crtValues[i] + crtValue.Exp = otherFactorCRT + crtValue.Coeff = otherCrtCoeff + crtValue.R = productOfPrimes + otherPrimeFactors[i] = otherPrimeFactor + productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) + } + + privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) + privateKey.Precomputed.CRTValues = crtValues + } + + key := &rsaPrivateKey{ + rsaPublicKey: *publicKey, + PrivateKey: privateKey, + } + + return key, nil +} + +/* + * Key Generation Functions. + */ + +func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { + k = new(rsaPrivateKey) + k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + + k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey + k.extended = make(map[string]interface{}) + + return +} + +// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. +func GenerateRSA2048PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. +func GenerateRSA3072PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(3072) + if err != nil { + return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) + } + + return k, nil +} + +// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. +func GenerateRSA4096PrivateKey() (PrivateKey, error) { + k, err := generateRSAPrivateKey(4096) + if err != nil { + return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) + } + + return k, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go new file mode 100644 index 000000000..5ec7707aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/rsa_key_test.go @@ -0,0 +1,157 @@ +package libtrust + +import ( + "bytes" + "encoding/json" + "log" + "testing" +) + +var rsaKeys []PrivateKey + +func init() { + var err error + rsaKeys, err = generateRSATestKeys() + if err != nil { + log.Fatal(err) + } +} + +func generateRSATestKeys() (keys []PrivateKey, err error) { + log.Println("Generating RSA 2048-bit Test Key") + rsa2048Key, err := GenerateRSA2048PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 3072-bit Test Key") + rsa3072Key, err := GenerateRSA3072PrivateKey() + if err != nil { + return + } + + log.Println("Generating RSA 4096-bit Test Key") + rsa4096Key, err := GenerateRSA4096PrivateKey() + if err != nil { + return + } + + log.Println("Done generating RSA Test Keys!") + keys = []PrivateKey{rsa2048Key, rsa3072Key, rsa4096Key} + + return +} + +func TestRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + if rsaKey.KeyType() != "RSA" { + t.Fatalf("key type must be %q, instead got %q", "RSA", rsaKey.KeyType()) + } + } +} + +func TestRSASignVerify(t *testing.T) { + message := "Hello, World!" + data := bytes.NewReader([]byte(message)) + + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + + t.Logf("%s signature of %q with kid: %s\n", sigAlg.HeaderParam(), message, rsaKey.KeyID()) + + data.Seek(0, 0) // Reset the byte reader + + // Sign + sig, alg, err := rsaKey.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + + // Verify + err = rsaKey.Verify(data, alg, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func TestMarshalUnmarshalRSAKeys(t *testing.T) { + data := bytes.NewReader([]byte("This is a test. I repeat: this is only a test.")) + sigAlgs := []*signatureAlgorithm{rs256, rs384, rs512} + + for i, rsaKey := range rsaKeys { + sigAlg := sigAlgs[i] + privateJWKJSON, err := json.MarshalIndent(rsaKey, "", " ") + if err != nil { + t.Fatal(err) + } + + publicJWKJSON, err := json.MarshalIndent(rsaKey.PublicKey(), "", " ") + if err != nil { + t.Fatal(err) + } + + t.Logf("JWK Private Key: %s", string(privateJWKJSON)) + t.Logf("JWK Public Key: %s", string(publicJWKJSON)) + + privKey2, err := UnmarshalPrivateKeyJWK(privateJWKJSON) + if err != nil { + t.Fatal(err) + } + + pubKey2, err := UnmarshalPublicKeyJWK(publicJWKJSON) + if err != nil { + t.Fatal(err) + } + + // Ensure we can sign/verify a message with the unmarshalled keys. + data.Seek(0, 0) // Reset the byte reader + signature, alg, err := privKey2.Sign(data, sigAlg.HashID()) + if err != nil { + t.Fatal(err) + } + + data.Seek(0, 0) // Reset the byte reader + err = pubKey2.Verify(data, alg, signature) + if err != nil { + t.Fatal(err) + } + + // It's a good idea to validate the Private Key to make sure our + // (un)marshal process didn't corrupt the extra parameters. + k := privKey2.(*rsaPrivateKey) + err = k.PrivateKey.Validate() + if err != nil { + t.Fatal(err) + } + } +} + +func TestFromCryptoRSAKeys(t *testing.T) { + for _, rsaKey := range rsaKeys { + cryptoPrivateKey := rsaKey.CryptoPrivateKey() + cryptoPublicKey := rsaKey.CryptoPublicKey() + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + t.Fatal(err) + } + + if pubKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + + privKey, err := FromCryptoPrivateKey(cryptoPrivateKey) + if err != nil { + t.Fatal(err) + } + + if privKey.KeyID() != rsaKey.KeyID() { + t.Fatal("public key key ID mismatch") + } + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go new file mode 100644 index 000000000..89debf6b6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/testutil/certificates.go @@ -0,0 +1,94 @@ +package testutil + +import ( + "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" +) + +// GenerateTrustCA generates a new certificate authority for testing. +func GenerateTrustCA(pub crypto.PublicKey, priv crypto.PrivateKey) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "CA Root", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, pub, priv) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateIntermediate generates an intermediate certificate for testing using +// the parent certificate (likely a CA) and the provided keys. +func GenerateIntermediate(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Intermediate", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} + +// GenerateTrustCert generates a new trust certificate for testing. Unlike the +// intermediate certificates, this certificate should be used for signature +// only, not creating certificates. +func GenerateTrustCert(key crypto.PublicKey, parentKey crypto.PrivateKey, parent *x509.Certificate) (*x509.Certificate, error) { + cert := &x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{ + CommonName: "Trust Cert", + }, + NotBefore: time.Now().Add(-time.Second), + NotAfter: time.Now().Add(time.Hour), + IsCA: true, + KeyUsage: x509.KeyUsageDigitalSignature, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(rand.Reader, cert, parent, key, parentKey) + if err != nil { + return nil, err + } + + cert, err = x509.ParseCertificate(certDER) + if err != nil { + return nil, err + } + + return cert, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md new file mode 100644 index 000000000..24124db21 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/README.md @@ -0,0 +1,50 @@ +## Libtrust TLS Config Demo + +This program generates key pairs and trust files for a TLS client and server. + +To generate the keys, run: + +``` +$ go run genkeys.go +``` + +The generated files are: + +``` +$ ls -l client_data/ server_data/ +client_data/: +total 24 +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +-rw-r--r-- 1 jlhawn staff 275 Aug 8 16:21 trusted_hosts.json + +server_data/: +total 24 +-rw-r--r-- 1 jlhawn staff 348 Aug 8 16:21 trusted_clients.json +-rw------- 1 jlhawn staff 281 Aug 8 16:21 private_key.json +-rw-r--r-- 1 jlhawn staff 225 Aug 8 16:21 public_key.json +``` + +The private key and public key for the client and server are stored in `private_key.json` and `public_key.json`, respectively, and in their respective directories. They are represented as JSON Web Keys: JSON objects which represent either an ECDSA or RSA private key. The host keys trusted by the client are stored in `trusted_hosts.json` and contain a mapping of an internet address, `:`, to a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted server. The client keys trusted by the server are stored in `trusted_clients.json` and contain an array of JSON objects which contain a comment field which can be used describe the key and a JSON Web Key which is a JSON object representing either an ECDSA or RSA public key of the trusted client. + +To start the server, run: + +``` +$ go run server.go +``` + +This starts an HTTPS server which listens on `localhost:8888`. The server configures itself with a certificate which is valid for both `localhost` and `127.0.0.1` and uses the key from `server_data/private_key.json`. It accepts connections from clients which present a certificate for a key that it is configured to trust from the `trusted_clients.json` file and returns a simple 'hello' message. + +To make a request using the client, run: + +``` +$ go run client.go +``` + +This command creates an HTTPS client which makes a GET request to `https://localhost:8888`. The client configures itself with a certificate using the key from `client_data/private_key.json`. It only connects to a server which presents a certificate signed by the key specified for the `localhost:8888` address from `client_data/trusted_hosts.json` and made to be used for the `localhost` hostname. If the connection succeeds, it prints the response from the server. + +The file `gencert.go` can be used to generate PEM encoded version of the client key and certificate. If you save them to `key.pem` and `cert.pem` respectively, you can use them with `curl` to test out the server (if it is still running). + +``` +curl --cert cert.pem --key key.pem -k https://localhost:8888 +``` diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go new file mode 100644 index 000000000..0a699a0ee --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/client.go @@ -0,0 +1,89 @@ +package main + +import ( + "crypto/tls" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + // Load Client Key. + clientKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate Client Certificate. + selfSignedClientCert, err := libtrust.GenerateSelfSignedClientCert(clientKey) + if err != nil { + log.Fatal(err) + } + + // Load trusted host keys. + hostKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + // Ensure the host we want to connect to is trusted! + host, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + serverKeys, err := libtrust.FilterByHosts(hostKeys, host, false) + if err != nil { + log.Fatalf("%q is not a known and trusted host", host) + } + + // Generate a CA pool with the trusted host's key. + caPool, err := libtrust.GenerateCACertPool(clientKey, serverKeys) + if err != nil { + log.Fatal(err) + } + + // Create HTTP Client. + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedClientCert.Raw}, + PrivateKey: clientKey.CryptoPrivateKey(), + Leaf: selfSignedClientCert, + }, + }, + RootCAs: caPool, + }, + }, + } + + var makeRequest = func(url string) { + resp, err := client.Get(url) + if err != nil { + log.Fatal(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Fatal(err) + } + + log.Println(resp.Status) + log.Println(string(body)) + } + + // Make the request to the trusted server! + makeRequest(fmt.Sprintf("https://%s", serverAddress)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go new file mode 100644 index 000000000..c65f3b6b4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/gencert.go @@ -0,0 +1,62 @@ +package main + +import ( + "encoding/pem" + "fmt" + "log" + "net" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + clientPrivateKeyFilename = "client_data/private_key.pem" + trustedHostsFilename = "client_data/trusted_hosts.pem" +) + +func main() { + key, err := libtrust.LoadKeyFile(clientPrivateKeyFilename) + if err != nil { + log.Fatal(err) + } + + keyPEMBlock, err := key.PEMBlock() + if err != nil { + log.Fatal(err) + } + + encodedPrivKey := pem.EncodeToMemory(keyPEMBlock) + fmt.Printf("Client Key:\n\n%s\n", string(encodedPrivKey)) + + cert, err := libtrust.GenerateSelfSignedClientCert(key) + if err != nil { + log.Fatal(err) + } + + encodedCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}) + fmt.Printf("Client Cert:\n\n%s\n", string(encodedCert)) + + trustedServerKeys, err := libtrust.LoadKeySetFile(trustedHostsFilename) + if err != nil { + log.Fatal(err) + } + + hostname, _, err := net.SplitHostPort(serverAddress) + if err != nil { + log.Fatal(err) + } + + trustedServerKeys, err = libtrust.FilterByHosts(trustedServerKeys, hostname, false) + if err != nil { + log.Fatal(err) + } + + caCert, err := libtrust.GenerateCACert(key, trustedServerKeys[0]) + if err != nil { + log.Fatal(err) + } + + encodedCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: caCert.Raw}) + fmt.Printf("CA Cert:\n\n%s\n", string(encodedCert)) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go new file mode 100644 index 000000000..9dc8842ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/genkeys.go @@ -0,0 +1,61 @@ +package main + +import ( + "log" + + "github.com/docker/libtrust" +) + +func main() { + // Generate client key. + clientKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Add a comment for the client key. + clientKey.AddExtendedField("comment", "TLS Demo Client") + + // Save the client key, public and private versions. + err = libtrust.SaveKey("client_data/private_key.pem", clientKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("client_data/public_key.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate server key. + serverKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + log.Fatal(err) + } + + // Set the list of addresses to use for the server. + serverKey.AddExtendedField("hosts", []string{"localhost", "docker.example.com"}) + + // Save the server key, public and private versions. + err = libtrust.SaveKey("server_data/private_key.pem", serverKey) + if err != nil { + log.Fatal(err) + } + + err = libtrust.SavePublicKey("server_data/public_key.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Authorized Keys file for server. + err = libtrust.AddKeySetFile("server_data/trusted_clients.pem", clientKey.PublicKey()) + if err != nil { + log.Fatal(err) + } + + // Generate Known Host Keys file for client. + err = libtrust.AddKeySetFile("client_data/trusted_hosts.pem", serverKey.PublicKey()) + if err != nil { + log.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go new file mode 100644 index 000000000..d3cb2ea91 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/tlsdemo/server.go @@ -0,0 +1,80 @@ +package main + +import ( + "crypto/tls" + "fmt" + "html" + "log" + "net" + "net/http" + + "github.com/docker/libtrust" +) + +var ( + serverAddress = "localhost:8888" + privateKeyFilename = "server_data/private_key.pem" + authorizedClientsFilename = "server_data/trusted_clients.pem" +) + +func requestHandler(w http.ResponseWriter, r *http.Request) { + clientCert := r.TLS.PeerCertificates[0] + keyID := clientCert.Subject.CommonName + log.Printf("Request from keyID: %s\n", keyID) + fmt.Fprintf(w, "Hello, client! I'm a server! And you are %T: %s.\n", clientCert.PublicKey, html.EscapeString(keyID)) +} + +func main() { + // Load server key. + serverKey, err := libtrust.LoadKeyFile(privateKeyFilename) + if err != nil { + log.Fatal(err) + } + + // Generate server certificate. + selfSignedServerCert, err := libtrust.GenerateSelfSignedServerCert( + serverKey, []string{"localhost"}, []net.IP{net.ParseIP("127.0.0.1")}, + ) + if err != nil { + log.Fatal(err) + } + + // Load authorized client keys. + authorizedClients, err := libtrust.LoadKeySetFile(authorizedClientsFilename) + if err != nil { + log.Fatal(err) + } + + // Create CA pool using trusted client keys. + caPool, err := libtrust.GenerateCACertPool(serverKey, authorizedClients) + if err != nil { + log.Fatal(err) + } + + // Create TLS config, requiring client certificates. + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{ + tls.Certificate{ + Certificate: [][]byte{selfSignedServerCert.Raw}, + PrivateKey: serverKey.CryptoPrivateKey(), + Leaf: selfSignedServerCert, + }, + }, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: caPool, + } + + // Create HTTP server with simple request handler. + server := &http.Server{ + Addr: serverAddress, + Handler: http.HandlerFunc(requestHandler), + } + + // Listen and server HTTPS using the libtrust TLS config. + listener, err := net.Listen("tcp", server.Addr) + if err != nil { + log.Fatal(err) + } + tlsListener := tls.NewListener(listener, tlsConfig) + server.Serve(tlsListener) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go new file mode 100644 index 000000000..72b0fc366 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/graph.go @@ -0,0 +1,50 @@ +package trustgraph + +import "github.com/docker/libtrust" + +// TrustGraph represents a graph of authorization mapping +// public keys to nodes and grants between nodes. +type TrustGraph interface { + // Verifies that the given public key is allowed to perform + // the given action on the given node according to the trust + // graph. + Verify(libtrust.PublicKey, string, uint16) (bool, error) + + // GetGrants returns an array of all grant chains which are used to + // allow the requested permission. + GetGrants(libtrust.PublicKey, string, uint16) ([][]*Grant, error) +} + +// Grant represents a transfer of permission from one part of the +// trust graph to another. This is the only way to delegate +// permission between two different sub trees in the graph. +type Grant struct { + // Subject is the namespace being granted + Subject string + + // Permissions is a bit map of permissions + Permission uint16 + + // Grantee represents the node being granted + // a permission scope. The grantee can be + // either a namespace item or a key id where namespace + // items will always start with a '/'. + Grantee string + + // statement represents the statement used to create + // this object. + statement *Statement +} + +// Permissions +// Read node 0x01 (can read node, no sub nodes) +// Write node 0x02 (can write to node object, cannot create subnodes) +// Read subtree 0x04 (delegates read to each sub node) +// Write subtree 0x08 (delegates write to each sub node, included create on the subject) +// +// Permission shortcuts +// ReadItem = 0x01 +// WriteItem = 0x03 +// ReadAccess = 0x07 +// WriteAccess = 0x0F +// Delegate = 0x0F diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go new file mode 100644 index 000000000..247bfa7aa --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph.go @@ -0,0 +1,133 @@ +package trustgraph + +import ( + "strings" + + "github.com/docker/libtrust" +) + +type grantNode struct { + grants []*Grant + children map[string]*grantNode +} + +type memoryGraph struct { + roots map[string]*grantNode +} + +func newGrantNode() *grantNode { + return &grantNode{ + grants: []*Grant{}, + children: map[string]*grantNode{}, + } +} + +// NewMemoryGraph returns a new in memory trust graph created from +// a static list of grants. This graph is immutable after creation +// and any alterations should create a new instance. +func NewMemoryGraph(grants []*Grant) TrustGraph { + roots := map[string]*grantNode{} + for _, grant := range grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + if part != "" { + node.grants = append(node.grants, grant) + } + nodes = node.children + } + } + return &memoryGraph{roots} +} + +func (g *memoryGraph) getGrants(name string) []*Grant { + nameParts := strings.Split(name, "/") + nodes := g.roots + var node *grantNode + var nodeOk bool + for _, part := range nameParts { + node, nodeOk = nodes[part] + if !nodeOk { + return nil + } + nodes = node.children + } + return node.grants +} + +func isSubName(name, sub string) bool { + if strings.HasPrefix(name, sub) { + if len(name) == len(sub) || name[len(sub)] == '/' { + return true + } + } + return false +} + +type walkFunc func(*Grant, []*Grant) bool + +func foundWalkFunc(*Grant, []*Grant) bool { + return true +} + +func (g *memoryGraph) walkGrants(start, target string, permission uint16, f walkFunc, chain []*Grant, visited map[*Grant]bool, collect bool) bool { + if visited == nil { + visited = map[*Grant]bool{} + } + grants := g.getGrants(start) + subGrants := make([]*Grant, 0, len(grants)) + for _, grant := range grants { + if visited[grant] { + continue + } + visited[grant] = true + if grant.Permission&permission == permission { + if isSubName(target, grant.Subject) { + if f(grant, chain) { + return true + } + } else { + subGrants = append(subGrants, grant) + } + } + } + for _, grant := range subGrants { + var chainCopy []*Grant + if collect { + chainCopy = make([]*Grant, len(chain)+1) + copy(chainCopy, chain) + chainCopy[len(chainCopy)-1] = grant + } else { + chainCopy = nil + } + + if g.walkGrants(grant.Subject, target, permission, f, chainCopy, visited, collect) { + return true + } + } + return false +} + +func (g *memoryGraph) Verify(key libtrust.PublicKey, node string, permission uint16) (bool, error) { + return g.walkGrants(key.KeyID(), node, permission, foundWalkFunc, nil, nil, false), nil +} + +func (g *memoryGraph) GetGrants(key libtrust.PublicKey, node string, permission uint16) ([][]*Grant, error) { + grants := [][]*Grant{} + collect := func(grant *Grant, chain []*Grant) bool { + grantChain := make([]*Grant, len(chain)+1) + copy(grantChain, chain) + grantChain[len(grantChain)-1] = grant + grants = append(grants, grantChain) + return false + } + g.walkGrants(key.KeyID(), node, permission, collect, nil, nil, true) + return grants, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go new file mode 100644 index 000000000..49fd0f3b5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/memory_graph_test.go @@ -0,0 +1,174 @@ +package trustgraph + +import ( + "fmt" + "testing" + + "github.com/docker/libtrust" +) + +func createTestKeysAndGrants(count int) ([]*Grant, []libtrust.PrivateKey) { + grants := make([]*Grant, count) + keys := make([]libtrust.PrivateKey, count) + for i := 0; i < count; i++ { + pk, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + panic(err) + } + grant := &Grant{ + Subject: fmt.Sprintf("/user-%d", i+1), + Permission: 0x0f, + Grantee: pk.KeyID(), + } + keys[i] = pk + grants[i] = grant + } + return grants, keys +} + +func testVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if !ok { + t.Errorf("key failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func testNotVerified(t *testing.T, g TrustGraph, k libtrust.PublicKey, keyName, target string, permission uint16) { + if ok, err := g.Verify(k, target, permission); err != nil { + t.Fatalf("Unexpected error during verification: %s", err) + } else if ok { + t.Errorf("key should have failed verification\n\tKey: %s(%s)\n\tNamespace: %s", keyName, k.KeyID(), target) + } +} + +func TestVerify(t *testing.T) { + grants, keys := createTestKeysAndGrants(4) + extraGrants := make([]*Grant, 3) + extraGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-4", + Permission: 0x07, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1/some-project/sub-value", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x07) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2/", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-value", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project/app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3/sub-value", 0x0f) + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-4", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1/", 0x0f) + testNotVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-2", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-4", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) +} + +func TestCircularWalk(t *testing.T) { + grants, keys := createTestKeysAndGrants(3) + user1Grant := &Grant{ + Subject: "/user-2", + Permission: 0x0f, + Grantee: "/user-1", + } + user2Grant := &Grant{ + Subject: "/user-1", + Permission: 0x0f, + Grantee: "/user-2", + } + grants = append(grants, user1Grant, user2Grant) + + g := NewMemoryGraph(grants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-1", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-3", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) +} + +func assertGrantSame(t *testing.T, actual, expected *Grant) { + if actual != expected { + t.Fatalf("Unexpected grant retrieved\n\tExpected: %v\n\tActual: %v", expected, actual) + } +} + +func TestGetGrants(t *testing.T) { + grants, keys := createTestKeysAndGrants(5) + extraGrants := make([]*Grant, 4) + extraGrants[0] = &Grant{ + Subject: "/user-3/friend-project", + Permission: 0x0f, + Grantee: "/user-2/friends", + } + extraGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + extraGrants[2] = &Grant{ + Subject: "/user-2/friends", + Permission: 0x0f, + Grantee: "/user-5/fun-project", + } + extraGrants[3] = &Grant{ + Subject: "/user-5/fun-project", + Permission: 0x0f, + Grantee: "/user-1", + } + grants = append(grants, extraGrants...) + + g := NewMemoryGraph(grants) + + grantChains, err := g.GetGrants(keys[3], "/user-3/sub-project/specific-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 2 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[3]) + assertGrantSame(t, grantChains[0][1], extraGrants[1]) + + grantChains, err = g.GetGrants(keys[0], "/user-3/friend-project/fun-app", 0x0f) + if err != nil { + t.Fatalf("Error getting grants: %s", err) + } + if len(grantChains) != 1 { + t.Fatalf("Expected number of grant chains returned, expected %d, received %d", 1, len(grantChains)) + } + if len(grantChains[0]) != 4 { + t.Fatalf("Unexpected number of grants retrieved\n\tExpected: %d\n\tActual: %d", 2, len(grantChains[0])) + } + assertGrantSame(t, grantChains[0][0], grants[0]) + assertGrantSame(t, grantChains[0][1], extraGrants[3]) + assertGrantSame(t, grantChains[0][2], extraGrants[2]) + assertGrantSame(t, grantChains[0][3], extraGrants[0]) +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go new file mode 100644 index 000000000..7a74b553c --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement.go @@ -0,0 +1,227 @@ +package trustgraph + +import ( + "crypto/x509" + "encoding/json" + "io" + "io/ioutil" + "sort" + "strings" + "time" + + "github.com/docker/libtrust" +) + +type jsonGrant struct { + Subject string `json:"subject"` + Permission uint16 `json:"permission"` + Grantee string `json:"grantee"` +} + +type jsonRevocation struct { + Subject string `json:"subject"` + Revocation uint16 `json:"revocation"` + Grantee string `json:"grantee"` +} + +type jsonStatement struct { + Revocations []*jsonRevocation `json:"revocations"` + Grants []*jsonGrant `json:"grants"` + Expiration time.Time `json:"expiration"` + IssuedAt time.Time `json:"issuedAt"` +} + +func (g *jsonGrant) Grant(statement *Statement) *Grant { + return &Grant{ + Subject: g.Subject, + Permission: g.Permission, + Grantee: g.Grantee, + statement: statement, + } +} + +// Statement represents a set of grants made from a verifiable +// authority. A statement has an expiration associated with it +// set by the authority. +type Statement struct { + jsonStatement + + signature *libtrust.JSONSignature +} + +// IsExpired returns whether the statement has expired +func (s *Statement) IsExpired() bool { + return s.Expiration.Before(time.Now().Add(-10 * time.Second)) +} + +// Bytes returns an indented json representation of the statement +// in a byte array. This value can be written to a file or stream +// without alteration. +func (s *Statement) Bytes() ([]byte, error) { + return s.signature.PrettySignature("signatures") +} + +// LoadStatement loads and verifies a statement from an input stream. +func LoadStatement(r io.Reader, authority *x509.CertPool) (*Statement, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + js, err := libtrust.ParsePrettySignature(b, "signatures") + if err != nil { + return nil, err + } + payload, err := js.Payload() + if err != nil { + return nil, err + } + var statement Statement + err = json.Unmarshal(payload, &statement.jsonStatement) + if err != nil { + return nil, err + } + + if authority == nil { + _, err = js.Verify() + if err != nil { + return nil, err + } + } else { + _, err = js.VerifyChains(authority) + if err != nil { + return nil, err + } + } + statement.signature = js + + return &statement, nil +} + +// CreateStatements creates and signs a statement from a stream of grants +// and revocations in a JSON array. +func CreateStatement(grants, revocations io.Reader, expiration time.Duration, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + err := json.NewDecoder(grants).Decode(&statement.jsonStatement.Grants) + if err != nil { + return nil, err + } + err = json.NewDecoder(revocations).Decode(&statement.jsonStatement.Revocations) + if err != nil { + return nil, err + } + statement.jsonStatement.Expiration = time.Now().UTC().Add(expiration) + statement.jsonStatement.IssuedAt = time.Now().UTC() + + b, err := json.MarshalIndent(&statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + statement.signature, err = libtrust.NewJSONSignature(b) + if err != nil { + return nil, err + } + err = statement.signature.SignWithChain(key, chain) + if err != nil { + return nil, err + } + + return &statement, nil +} + +type statementList []*Statement + +func (s statementList) Len() int { + return len(s) +} + +func (s statementList) Less(i, j int) bool { + return s[i].IssuedAt.Before(s[j].IssuedAt) +} + +func (s statementList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// CollapseStatements returns a single list of the valid statements as well as the +// time when the next grant will expire. +func CollapseStatements(statements []*Statement, useExpired bool) ([]*Grant, time.Time, error) { + sorted := make(statementList, 0, len(statements)) + for _, statement := range statements { + if useExpired || !statement.IsExpired() { + sorted = append(sorted, statement) + } + } + sort.Sort(sorted) + + var minExpired time.Time + var grantCount int + roots := map[string]*grantNode{} + for i, statement := range sorted { + if statement.Expiration.Before(minExpired) || i == 0 { + minExpired = statement.Expiration + } + for _, grant := range statement.Grants { + parts := strings.Split(grant.Grantee, "/") + nodes := roots + g := grant.Grant(statement) + grantCount = grantCount + 1 + + for _, part := range parts { + node, nodeOk := nodes[part] + if !nodeOk { + node = newGrantNode() + nodes[part] = node + } + node.grants = append(node.grants, g) + nodes = node.children + } + } + + for _, revocation := range statement.Revocations { + parts := strings.Split(revocation.Grantee, "/") + nodes := roots + + var node *grantNode + var nodeOk bool + for _, part := range parts { + node, nodeOk = nodes[part] + if !nodeOk { + break + } + nodes = node.children + } + if node != nil { + for _, grant := range node.grants { + if isSubName(grant.Subject, revocation.Subject) { + grant.Permission = grant.Permission &^ revocation.Revocation + } + } + } + } + } + + retGrants := make([]*Grant, 0, grantCount) + for _, rootNodes := range roots { + retGrants = append(retGrants, rootNodes.grants...) + } + + return retGrants, minExpired, nil +} + +// FilterStatements filters the statements to statements including the given grants. +func FilterStatements(grants []*Grant) ([]*Statement, error) { + statements := map[*Statement]bool{} + for _, grant := range grants { + if grant.statement != nil { + statements[grant.statement] = true + } + } + retStatements := make([]*Statement, len(statements)) + var i int + for statement := range statements { + retStatements[i] = statement + i++ + } + return retStatements, nil +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go new file mode 100644 index 000000000..e50946865 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/trustgraph/statement_test.go @@ -0,0 +1,417 @@ +package trustgraph + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "testing" + "time" + + "github.com/docker/libtrust" + "github.com/docker/libtrust/testutil" +) + +const testStatementExpiration = time.Hour * 5 + +func generateStatement(grants []*Grant, key libtrust.PrivateKey, chain []*x509.Certificate) (*Statement, error) { + var statement Statement + + statement.Grants = make([]*jsonGrant, len(grants)) + for i, grant := range grants { + statement.Grants[i] = &jsonGrant{ + Subject: grant.Subject, + Permission: grant.Permission, + Grantee: grant.Grantee, + } + } + statement.IssuedAt = time.Now() + statement.Expiration = time.Now().Add(testStatementExpiration) + statement.Revocations = make([]*jsonRevocation, 0) + + marshalled, err := json.MarshalIndent(statement.jsonStatement, "", " ") + if err != nil { + return nil, err + } + + sig, err := libtrust.NewJSONSignature(marshalled) + if err != nil { + return nil, err + } + err = sig.SignWithChain(key, chain) + if err != nil { + return nil, err + } + statement.signature = sig + + return &statement, nil +} + +func generateTrustChain(t *testing.T, chainLen int) (libtrust.PrivateKey, *x509.CertPool, []*x509.Certificate) { + caKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generating key: %s", err) + } + ca, err := testutil.GenerateTrustCA(caKey.CryptoPublicKey(), caKey.CryptoPrivateKey()) + if err != nil { + t.Fatalf("Error generating ca: %s", err) + } + + parent := ca + parentKey := caKey + chain := make([]*x509.Certificate, chainLen) + for i := chainLen - 1; i > 0; i-- { + intermediatekey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[i], err = testutil.GenerateIntermediate(intermediatekey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generating intermdiate certificate: %s", err) + } + parent = chain[i] + parentKey = intermediatekey + } + trustKey, err := libtrust.GenerateECP256PrivateKey() + if err != nil { + t.Fatalf("Error generate key: %s", err) + } + chain[0], err = testutil.GenerateTrustCert(trustKey.CryptoPublicKey(), parentKey.CryptoPrivateKey(), parent) + if err != nil { + t.Fatalf("Error generate trust cert: %s", err) + } + + caPool := x509.NewCertPool() + caPool.AddCert(ca) + + return trustKey, caPool, chain +} + +func TestLoadStatement(t *testing.T) { + grantCount := 4 + grants, _ := createTestKeysAndGrants(grantCount) + + trustKey, caPool, chain := generateTrustChain(t, 6) + + statement, err := generateStatement(grants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementBytes, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + + s2, err := LoadStatement(bytes.NewReader(statementBytes), caPool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + pool := x509.NewCertPool() + _, err = LoadStatement(bytes.NewReader(statementBytes), pool) + if err == nil { + t.Fatalf("No error thrown verifying without an authority") + } else if _, ok := err.(x509.UnknownAuthorityError); !ok { + t.Fatalf("Unexpected error verifying without authority: %s", err) + } + + s2, err = LoadStatement(bytes.NewReader(statementBytes), nil) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + if len(s2.Grants) != grantCount { + t.Fatalf("Unexpected grant length\n\tExpected: %d\n\tActual: %d", grantCount, len(s2.Grants)) + } + + badData := make([]byte, len(statementBytes)) + copy(badData, statementBytes) + badData[0] = '[' + _, err = LoadStatement(bytes.NewReader(badData), nil) + if err == nil { + t.Fatalf("No error thrown parsing bad json") + } + + alteredData := make([]byte, len(statementBytes)) + copy(alteredData, statementBytes) + alteredData[30] = '0' + _, err = LoadStatement(bytes.NewReader(alteredData), nil) + if err == nil { + t.Fatalf("No error thrown from bad data") + } +} + +func TestCollapseGrants(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 4) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-3/sub-project", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-6", + Permission: 0x0f, + Grantee: "/user-7", + } + linkGrants[3] = &Grant{ + Subject: "/user-6/sub-project/specific-app", + Permission: 0x0f, + Grantee: "/user-5", + } + trustKey, pool, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 3) + var err error + statements[0], err = generateStatement(grants[0:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[4:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + + statementsCopy := make([]*Statement, len(statements)) + for i, statement := range statements { + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error getting statement bytes: %s", err) + } + verifiedStatement, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + // Force sort by reversing order + statementsCopy[len(statementsCopy)-i-1] = verifiedStatement + } + statements = statementsCopy + + collapsedGrants, expiration, err := CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g := NewMemoryGraph(collapsedGrants) + + testVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testVerified(t, g, keys[2].PublicKey(), "user-key-3", "/user-3", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-4", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-5", 0x0f) + testVerified(t, g, keys[5].PublicKey(), "user-key-6", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-7", 0x0f) + testVerified(t, g, keys[7].PublicKey(), "user-key-8", "/user-8", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3", 0x0f) + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-3/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3/sub-project", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6", 0x0f) + testVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + testVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project/specific-app", 0x0f) + + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-3", 0x0f) + testNotVerified(t, g, keys[3].PublicKey(), "user-key-4", "/user-6/sub-project", 0x0f) + testNotVerified(t, g, keys[4].PublicKey(), "user-key-5", "/user-6/sub-project", 0x0f) + + // Add revocation grant + statements = append(statements, &Statement{ + jsonStatement{ + IssuedAt: time.Now(), + Expiration: time.Now().Add(testStatementExpiration), + Grants: []*jsonGrant{}, + Revocations: []*jsonRevocation{ + &jsonRevocation{ + Subject: "/user-1", + Revocation: 0x0f, + Grantee: keys[0].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-2", + Revocation: 0x08, + Grantee: keys[1].KeyID(), + }, + &jsonRevocation{ + Subject: "/user-6", + Revocation: 0x0f, + Grantee: "/user-7", + }, + &jsonRevocation{ + Subject: "/user-9", + Revocation: 0x0f, + Grantee: "/user-10", + }, + }, + }, + nil, + }) + + collapsedGrants, expiration, err = CollapseStatements(statements, false) + if len(collapsedGrants) != 12 { + t.Fatalf("Unexpected number of grants\n\tExpected: %d\n\tActual: %d", 12, len(collapsedGrants)) + } + if expiration.After(time.Now().Add(time.Hour*5)) || expiration.Before(time.Now()) { + t.Fatalf("Unexpected expiration time: %s", expiration.String()) + } + g = NewMemoryGraph(collapsedGrants) + + testNotVerified(t, g, keys[0].PublicKey(), "user-key-1", "/user-1", 0x0f) + testNotVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x0f) + testNotVerified(t, g, keys[6].PublicKey(), "user-key-7", "/user-6/sub-project/specific-app", 0x0f) + + testVerified(t, g, keys[1].PublicKey(), "user-key-2", "/user-2", 0x07) +} + +func TestFilterStatements(t *testing.T) { + grantCount := 8 + grants, keys := createTestKeysAndGrants(grantCount) + linkGrants := make([]*Grant, 3) + linkGrants[0] = &Grant{ + Subject: "/user-3", + Permission: 0x0f, + Grantee: "/user-2", + } + linkGrants[1] = &Grant{ + Subject: "/user-5", + Permission: 0x0f, + Grantee: "/user-4", + } + linkGrants[2] = &Grant{ + Subject: "/user-7", + Permission: 0x0f, + Grantee: "/user-6", + } + + trustKey, _, chain := generateTrustChain(t, 3) + + statements := make([]*Statement, 5) + var err error + statements[0], err = generateStatement(grants[0:2], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[1], err = generateStatement(grants[2:4], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[2], err = generateStatement(grants[4:6], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[3], err = generateStatement(grants[6:], trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + statements[4], err = generateStatement(linkGrants, trustKey, chain) + if err != nil { + t.Fatalf("Error generating statement: %s", err) + } + collapsed, _, err := CollapseStatements(statements, false) + if err != nil { + t.Fatalf("Error collapsing grants: %s", err) + } + + // Filter 1, all 5 statements + filter1, err := FilterStatements(collapsed) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter1) != 5 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 5, len(filter1)) + } + + // Filter 2, one statement + filter2, err := FilterStatements([]*Grant{collapsed[0]}) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter2) != 1 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 1, len(filter2)) + } + + // Filter 3, 2 statements, from graph lookup + g := NewMemoryGraph(collapsed) + lookupGrants, err := g.GetGrants(keys[1], "/user-3", 0x0f) + if err != nil { + t.Fatalf("Error looking up grants: %s", err) + } + if len(lookupGrants) != 1 { + t.Fatalf("Wrong numberof grant chains returned from lookup, expected %d, received %d", 1, len(lookupGrants)) + } + if len(lookupGrants[0]) != 2 { + t.Fatalf("Wrong number of grants looked up, expected %d, received %d", 2, len(lookupGrants)) + } + filter3, err := FilterStatements(lookupGrants[0]) + if err != nil { + t.Fatalf("Error filtering statements: %s", err) + } + if len(filter3) != 2 { + t.Fatalf("Wrong number of statements, expected %d, received %d", 2, len(filter3)) + } + +} + +func TestCreateStatement(t *testing.T) { + grantJSON := bytes.NewReader([]byte(`[ + { + "subject": "/user-2", + "permission": 15, + "grantee": "/user-1" + }, + { + "subject": "/user-7", + "permission": 1, + "grantee": "/user-9" + }, + { + "subject": "/user-3", + "permission": 15, + "grantee": "/user-2" + } +]`)) + revocationJSON := bytes.NewReader([]byte(`[ + { + "subject": "user-8", + "revocation": 12, + "grantee": "user-9" + } +]`)) + + trustKey, pool, chain := generateTrustChain(t, 3) + + statement, err := CreateStatement(grantJSON, revocationJSON, testStatementExpiration, trustKey, chain) + if err != nil { + t.Fatalf("Error creating statement: %s", err) + } + + b, err := statement.Bytes() + if err != nil { + t.Fatalf("Error retrieving bytes: %s", err) + } + + verified, err := LoadStatement(bytes.NewReader(b), pool) + if err != nil { + t.Fatalf("Error loading statement: %s", err) + } + + if len(verified.Grants) != 3 { + t.Errorf("Unexpected number of grants, expected %d, received %d", 3, len(verified.Grants)) + } + + if len(verified.Revocations) != 1 { + t.Errorf("Unexpected number of revocations, expected %d, received %d", 1, len(verified.Revocations)) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util.go b/Godeps/_workspace/src/github.com/docker/libtrust/util.go new file mode 100644 index 000000000..d88176cc3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/util.go @@ -0,0 +1,363 @@ +package libtrust + +import ( + "bytes" + "crypto" + "crypto/elliptic" + "crypto/tls" + "crypto/x509" + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +// LoadOrCreateTrustKey will load a PrivateKey from the specified path +func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { + if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { + return nil, err + } + + trustKey, err := LoadKeyFile(trustKeyPath) + if err == ErrKeyFileDoesNotExist { + trustKey, err = GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("error generating key: %s", err) + } + + if err := SaveKey(trustKeyPath, trustKey); err != nil { + return nil, fmt.Errorf("error saving key file: %s", err) + } + + dir, file := filepath.Split(trustKeyPath) + if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { + return nil, fmt.Errorf("error saving public key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("error loading key file: %s", err) + } + return trustKey, nil +} + +// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity +// based authentication from the specified dockerUrl, the rootConfigPath and +// the server name to which it is connecting. +// If trustUnknownHosts is true it will automatically add the host to the +// known-hosts.json in rootConfigPath. +func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { + tlsConfig := newTLSConfig() + + trustKeyPath := filepath.Join(rootConfigPath, "key.json") + knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") + + u, err := url.Parse(dockerUrl) + if err != nil { + return nil, fmt.Errorf("unable to parse machine url") + } + + if u.Scheme == "unix" { + return nil, nil + } + + addr := u.Host + proto := "tcp" + + trustKey, err := LoadOrCreateTrustKey(trustKeyPath) + if err != nil { + return nil, fmt.Errorf("unable to load trust key: %s", err) + } + + knownHosts, err := LoadKeySetFile(knownHostsPath) + if err != nil { + return nil, fmt.Errorf("could not load trusted hosts file: %s", err) + } + + allowedHosts, err := FilterByHosts(knownHosts, addr, false) + if err != nil { + return nil, fmt.Errorf("error filtering hosts: %s", err) + } + + certPool, err := GenerateCACertPool(trustKey, allowedHosts) + if err != nil { + return nil, fmt.Errorf("Could not create CA pool: %s", err) + } + + tlsConfig.ServerName = serverName + tlsConfig.RootCAs = certPool + + x509Cert, err := GenerateSelfSignedClientCert(trustKey) + if err != nil { + return nil, fmt.Errorf("certificate generation error: %s", err) + } + + tlsConfig.Certificates = []tls.Certificate{{ + Certificate: [][]byte{x509Cert.Raw}, + PrivateKey: trustKey.CryptoPrivateKey(), + Leaf: x509Cert, + }} + + tlsConfig.InsecureSkipVerify = true + + testConn, err := tls.Dial(proto, addr, tlsConfig) + if err != nil { + return nil, fmt.Errorf("tls Handshake error: %s", err) + } + + opts := x509.VerifyOptions{ + Roots: tlsConfig.RootCAs, + CurrentTime: time.Now(), + DNSName: tlsConfig.ServerName, + Intermediates: x509.NewCertPool(), + } + + certs := testConn.ConnectionState().PeerCertificates + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + + if _, err := certs[0].Verify(opts); err != nil { + if _, ok := err.(x509.UnknownAuthorityError); ok { + if trustUnknownHosts { + pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) + if err != nil { + return nil, fmt.Errorf("error extracting public key from cert: %s", err) + } + + pubKey.AddExtendedField("hosts", []string{addr}) + + if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { + return nil, fmt.Errorf("error adding machine to known hosts: %s", err) + } + } else { + return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) + } + } + } + + testConn.Close() + tlsConfig.InsecureSkipVerify = false + + return tlsConfig, nil +} + +// joseBase64UrlEncode encodes the given data using the standard base64 url +// encoding format but with all trailing '=' characters ommitted in accordance +// with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + s = strings.Replace(s, "\n", "", -1) + s = strings.Replace(s, " ", "", -1) + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func keyIDEncode(b []byte) string { + s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") + var buf bytes.Buffer + var i int + for i = 0; i < len(s)/4-1; i++ { + start := i * 4 + end := start + 4 + buf.WriteString(s[start:end] + ":") + } + buf.WriteString(s[i*4:]) + return buf.String() +} + +func keyIDFromCryptoKey(pubKey PublicKey) string { + // Generate and return a 'libtrust' fingerprint of the public key. + // For an RSA key this should be: + // SHA256(DER encoded ASN1) + // Then truncated to 240 bits and encoded into 12 base32 groups like so: + // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP + derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) + if err != nil { + return "" + } + hasher := crypto.SHA256.New() + hasher.Write(derBytes) + return keyIDEncode(hasher.Sum(nil)[:30]) +} + +func stringFromMap(m map[string]interface{}, key string) (string, error) { + val, ok := m[key] + if !ok { + return "", fmt.Errorf("%q value not specified", key) + } + + str, ok := val.(string) + if !ok { + return "", fmt.Errorf("%q value must be a string", key) + } + delete(m, key) + + return str, nil +} + +func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { + curveByteLen := (curve.Params().BitSize + 7) >> 3 + + cBytes, err := joseBase64UrlDecode(cB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + cByteLength := len(cBytes) + if cByteLength != curveByteLen { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) + } + return new(big.Int).SetBytes(cBytes), nil +} + +func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { + dBytes, err := joseBase64UrlDecode(dB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + // The length of this octet string MUST be ceiling(log-base-2(n)/8) + // octets (where n is the order of the curve). This is because the private + // key d must be in the interval [1, n-1] so the bitlength of d should be + // no larger than the bitlength of n-1. The easiest way to find the octet + // length is to take bitlength(n-1), add 7 to force a carry, and shift this + // bit sequence right by 3, which is essentially dividing by 8 and adding + // 1 if there is any remainder. Thus, the private key value d should be + // output to (bitlength(n-1)+7)>>3 octets. + n := curve.Params().N + octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 + dByteLength := len(dBytes) + + if dByteLength != octetLength { + return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) + } + + return new(big.Int).SetBytes(dBytes), nil +} + +func parseRSAModulusParam(nB64Url string) (*big.Int, error) { + nBytes, err := joseBase64UrlDecode(nB64Url) + if err != nil { + return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(nBytes), nil +} + +func serializeRSAPublicExponentParam(e int) []byte { + // We MUST use the minimum number of octets to represent E. + // E is supposed to be 65537 for performance and security reasons + // and is what golang's rsa package generates, but it might be + // different if imported from some other generator. + buf := make([]byte, 4) + binary.BigEndian.PutUint32(buf, uint32(e)) + var i int + for i = 0; i < 8; i++ { + if buf[i] != 0 { + break + } + } + return buf[i:] +} + +func parseRSAPublicExponentParam(eB64Url string) (int, error) { + eBytes, err := joseBase64UrlDecode(eB64Url) + if err != nil { + return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) + } + // Only the minimum number of bytes were used to represent E, but + // binary.BigEndian.Uint32 expects at least 4 bytes, so we need + // to add zero padding if necassary. + byteLen := len(eBytes) + buf := make([]byte, 4-byteLen, 4) + eBytes = append(buf, eBytes...) + + return int(binary.BigEndian.Uint32(eBytes)), nil +} + +func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { + b64Url, err := stringFromMap(m, key) + if err != nil { + return nil, err + } + + paramBytes, err := joseBase64UrlDecode(b64Url) + if err != nil { + return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) + } + + return new(big.Int).SetBytes(paramBytes), nil +} + +func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { + pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} + for k, v := range headers { + switch val := v.(type) { + case string: + pemBlock.Headers[k] = val + case []string: + if k == "hosts" { + pemBlock.Headers[k] = strings.Join(val, ",") + } else { + // Return error, non-encodable type + } + default: + // Return error, non-encodable type + } + } + + return pemBlock, nil +} + +func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { + cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) + if err != nil { + return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) + } + + pubKey, err := FromCryptoPublicKey(cryptoPublicKey) + if err != nil { + return nil, err + } + + addPEMHeadersToKey(pemBlock, pubKey) + + return pubKey, nil +} + +func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { + for key, value := range pemBlock.Headers { + var safeVal interface{} + if key == "hosts" { + safeVal = strings.Split(value, ",") + } else { + safeVal = value + } + pubKey.AddExtendedField(key, safeVal) + } +} diff --git a/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go new file mode 100644 index 000000000..83b7cfb18 --- /dev/null +++ b/Godeps/_workspace/src/github.com/docker/libtrust/util_test.go @@ -0,0 +1,45 @@ +package libtrust + +import ( + "encoding/pem" + "reflect" + "testing" +) + +func TestAddPEMHeadersToKey(t *testing.T) { + pk := &rsaPublicKey{nil, map[string]interface{}{}} + blk := &pem.Block{Headers: map[string]string{"hosts": "localhost,127.0.0.1"}} + addPEMHeadersToKey(blk, pk) + + val := pk.GetExtendedField("hosts") + hosts, ok := val.([]string) + if !ok { + t.Fatalf("hosts type(%v), expected []string", reflect.TypeOf(val)) + } + expected := []string{"localhost", "127.0.0.1"} + if !reflect.DeepEqual(hosts, expected) { + t.Errorf("hosts(%v), expected %v", hosts, expected) + } +} + +func TestBase64URL(t *testing.T) { + clean := "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2ZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiY3R5IjoiandrK2pzb24ifQ" + + tests := []string{ + clean, // clean roundtrip + "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2\nZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2\nIiwiY3R5IjoiandrK2pzb24ifQ", // with newlines + "eyJhbGciOiJQQkVTMi1IUzI1NitBMTI4S1ciLCJwMnMiOiIyV0NUY0paMVJ2 \n ZF9DSnVKcmlwUTF3IiwicDJjIjo0MDk2LCJlbmMiOiJBMTI4Q0JDLUhTMjU2 \n IiwiY3R5IjoiandrK2pzb24ifQ", // with newlines and spaces + } + + for i, test := range tests { + b, err := joseBase64UrlDecode(test) + if err != nil { + t.Fatalf("on test %d: %s", i, err) + } + got := joseBase64UrlEncode(b) + + if got != clean { + t.Errorf("expected %q, got %q", clean, got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 000000000..ba8e0cb3a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,8 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml new file mode 100644 index 000000000..2f4e3c2f0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/.travis.yml @@ -0,0 +1,10 @@ +sudo: false +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - tip + +before_script: + - mysql -e 'create database gotest;' diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 000000000..6fc4c6f7b --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,44 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Arne Hormann +Carlos Nieto +Chris Moos +DisposaBoy +Frederick Mayle +Gustavo Kristic +Hanno Braun +Henri Yandell +INADA Naoki +James Harr +Jian Zhen +Joshua Prunier +Julien Schmidt +Kamil Dziedzic +Leonardo YongUk Kim +Lucas Liu +Luke Scott +Michael Woolnough +Nicola Peduzzi +Runrioter Wung +Soroush Pour +Stan Putrya +Xiaobing Jiang +Xiuming Chen + +# Organizations + +Barracuda Networks, Inc. +Google Inc. +Stripe Inc. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 000000000..161ad0fcc --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,92 @@ +## HEAD + +Changes: + + - Go 1.1 is no longer supported + - Use decimals field from MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + +Bugfixes: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + +New Features: + - Support for returning table alias on Columns() (#289) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md new file mode 100644 index 000000000..f87c19824 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# Contributing Guidelines + +## Reporting Issues + +Before creating a new Issue, please check first if a similar Issue [already exists](https://github.com/go-sql-driver/mysql/issues?state=open) or was [recently closed](https://github.com/go-sql-driver/mysql/issues?direction=desc&page=1&sort=updated&state=closed). + +Please provide the following minimum information: +* Your Go-MySQL-Driver version (or git SHA) +* Your Go version (run `go version` in your console) +* A detailed issue description +* Error Log if present +* If possible, a short example + + +## Contributing Code + +By contributing to this project, you share your code under the Mozilla Public License 2, as specified in the LICENSE file. +Don't forget to add yourself to the AUTHORS file. + +### Pull Requests Checklist + +Please check the following points before submitting your pull request: +- [x] Code compiles correctly +- [x] Created tests, if possible +- [x] All tests pass +- [x] Extended the README / documentation, if necessary +- [x] Added yourself to the AUTHORS file + +### Code Review + +Everyone is invited to review and comment on pull requests. +If it looks fine to you, comment with "LGTM" (Looks good to me). + +If changes are required, notice the reviewers with "PTAL" (Please take another look) after committing the fixes. + +Before merging the Pull Request, at least one [team member](https://github.com/go-sql-driver?tab=members) must have commented with "LGTM". + +## Development Ideas + +If you are looking for ideas for code contributions, please check our [Development Ideas](https://github.com/go-sql-driver/mysql/wiki/Development-Ideas) Wiki page. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 000000000..14e2f777f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 000000000..72b921eaf --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,386 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](http://golang.org/pkg/database/sql) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +**Latest stable Release:** [Version 1.2 (June 03, 2014)](https://github.com/go-sql-driver/mysql/releases) + +[![Build Status](https://travis-ci.org/go-sql-driver/mysql.png?branch=master)](https://travis-ci.org/go-sql-driver/mysql) + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](http://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](http://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file Whitelisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.2 or higher + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](http://code.google.com/p/go-wiki/wiki/GOPATH "GOPATH") with the [go tool](http://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](http://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](http://golang.org/pkg/database/sql) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: +```go +import "database/sql" +import _ "github.com/go-sql-driver/mysql" + +db, err := sql.Open("mysql", "user:password@/dbname") +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](http://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host:port`. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](http://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](http://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file Whitelist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are blacklisted as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](http://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` + + +##### `strict` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`strict=true` enables the strict mode in which MySQL warnings are treated as errors. + +By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example. + + +##### `timeout` + +``` +Type: decimal number +Default: OS default +``` + +*Driver* side connection timeout. The value must be a string of decimal numbers, each with optional fraction and a unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout). + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### System Variables + +All other parameters are interpreted as system variables: + * `autocommit`: `"SET autocommit="` + * [`time_zone`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `"SET time_zone="` + * [`tx_isolation`](https://dev.mysql.com/doc/refman/5.5/en/server-system-variables.html#sysvar_tx_isolation): `"SET tx_isolation="` + * `param`: `"SET ="` + +*The values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!* + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Use the [strict mode](#strict) but ignore notes: +``` +user:password@/dbname?strict=true&sql_notes=false +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine: +``` +user@cloudsql(project-id:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be whitelisted by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the Whitelist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. + +See the [godoc of Go-MySQL-Driver](http://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](http://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + +Alternatively you can use the [`NullTime`](http://godoc.org/github.com/go-sql-driver/mysql#NullTime) type as the scan destination, which works with both `time.Time` and `string` / `[]byte`. + + +### Unicode support +Since version 1.1 Go-MySQL-Driver automatically uses the collation `utf8_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support. + + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0) + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged** + +Please read the [MPL 2.0 FAQ](http://www.mozilla.org/MPL/2.0/FAQ.html) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") + diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go new file mode 100644 index 000000000..565614eef --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/appengine.go @@ -0,0 +1,19 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build appengine + +package mysql + +import ( + "appengine/cloudsql" +) + +func init() { + RegisterDial("cloudsql", cloudsql.Dial) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go new file mode 100644 index 000000000..fb8a2f5f3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/benchmark_test.go @@ -0,0 +1,246 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "math" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +type TB testing.B + +func (tb *TB) check(err error) { + if err != nil { + tb.Fatal(err) + } +} + +func (tb *TB) checkDB(db *sql.DB, err error) *sql.DB { + tb.check(err) + return db +} + +func (tb *TB) checkRows(rows *sql.Rows, err error) *sql.Rows { + tb.check(err) + return rows +} + +func (tb *TB) checkStmt(stmt *sql.Stmt, err error) *sql.Stmt { + tb.check(err) + return stmt +} + +func initDB(b *testing.B, queries ...string) *sql.DB { + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + for _, query := range queries { + if _, err := db.Exec(query); err != nil { + if w, ok := err.(MySQLWarnings); ok { + b.Logf("Warning on %q: %v", query, w) + } else { + b.Fatalf("Error on %q: %v", query, err) + } + } + } + return db +} + +const concurrencyLevel = 10 + +func BenchmarkQuery(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := initDB(b, + "DROP TABLE IF EXISTS foo", + "CREATE TABLE foo (id INT PRIMARY KEY, val CHAR(50))", + `INSERT INTO foo VALUES (1, "one")`, + `INSERT INTO foo VALUES (2, "two")`, + ) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("SELECT val FROM foo WHERE id=?")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + var got string + tb.check(stmt.QueryRow(1).Scan(&got)) + if got != "one" { + b.Errorf("query = %q; want one", got) + wg.Done() + return + } + } + }() + } +} + +func BenchmarkExec(b *testing.B) { + tb := (*TB)(b) + b.StopTimer() + b.ReportAllocs() + db := tb.checkDB(sql.Open("mysql", dsn)) + db.SetMaxIdleConns(concurrencyLevel) + defer db.Close() + + stmt := tb.checkStmt(db.Prepare("DO 1")) + defer stmt.Close() + + remain := int64(b.N) + var wg sync.WaitGroup + wg.Add(concurrencyLevel) + defer wg.Wait() + b.StartTimer() + + for i := 0; i < concurrencyLevel; i++ { + go func() { + for { + if atomic.AddInt64(&remain, -1) < 0 { + wg.Done() + return + } + + if _, err := stmt.Exec(); err != nil { + b.Fatal(err.Error()) + } + } + }() + } +} + +// data, but no db writes +var roundtripSample []byte + +func initRoundtripBenchmarks() ([]byte, int, int) { + if roundtripSample == nil { + roundtripSample = []byte(strings.Repeat("0123456789abcdef", 1024*1024)) + } + return roundtripSample, 16, len(roundtripSample) +} + +func BenchmarkRoundtripTxt(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + sampleString := string(sample) + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + b.StartTimer() + var result string + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sampleString[0:length] + rows := tb.checkRows(db.Query(`SELECT "` + test + `"`)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if result != test { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkRoundtripBin(b *testing.B) { + b.StopTimer() + sample, min, max := initRoundtripBenchmarks() + b.ReportAllocs() + tb := (*TB)(b) + db := tb.checkDB(sql.Open("mysql", dsn)) + defer db.Close() + stmt := tb.checkStmt(db.Prepare("SELECT ?")) + defer stmt.Close() + b.StartTimer() + var result sql.RawBytes + for i := 0; i < b.N; i++ { + length := min + i + if length > max { + length = max + } + test := sample[0:length] + rows := tb.checkRows(stmt.Query(test)) + if !rows.Next() { + rows.Close() + b.Fatalf("crashed") + } + err := rows.Scan(&result) + if err != nil { + rows.Close() + b.Fatalf("crashed") + } + if !bytes.Equal(result, test) { + rows.Close() + b.Errorf("mismatch") + } + rows.Close() + } +} + +func BenchmarkInterpolation(b *testing.B) { + mc := &mysqlConn{ + cfg: &config{ + interpolateParams: true, + loc: time.UTC, + }, + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + buf: newBuffer(nil), + } + + args := []driver.Value{ + int64(42424242), + float64(math.Pi), + false, + time.Unix(1423411542, 807015000), + []byte("bytes containing special chars ' \" \a \x00"), + "string containing special chars ' \" \a \x00", + } + q := "SELECT ?, ?, ?, ?, ?, ?" + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := mc.interpolateParams(q, args) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 000000000..509ce89e4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,136 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import "io" + +const defaultBufSize = 4096 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +type buffer struct { + buf []byte + rd io.Reader + idx int + length int +} + +func newBuffer(rd io.Reader) buffer { + var b [defaultBufSize]byte + return buffer{ + buf: b[:], + rd: rd, + } +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + + // move existing data to the beginning + if n > 0 && b.idx > 0 { + copy(b.buf[0:n], b.buf[b.idx:]) + } + + // grow buffer if necessary + // TODO: let the buffer shrink again at some point + // Maybe keep the org buf slice and swap back? + if need > len(b.buf) { + // Round up to the next multiple of the default size + newBuf := make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + copy(newBuf, b.buf) + b.buf = newBuf + } + + b.idx = 0 + + for { + nn, err := b.rd.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) []byte { + if b.length > 0 { + return nil + } + + // test (cheap) general case first + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf + } + return make([]byte, length) +} + +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) []byte { + if b.length == 0 { + return b.buf[:length] + } + return nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() []byte { + if b.length == 0 { + return b.buf + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 000000000..6c1d613d5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,250 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation byte = 33 // utf8_general_ci + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + "ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + "utf16_general_ci": 54, + "utf16_bin": 55, + "utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + "utf32_general_ci": 60, + "utf32_bin": 61, + "utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + "ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + "utf16_unicode_ci": 101, + "utf16_icelandic_ci": 102, + "utf16_latvian_ci": 103, + "utf16_romanian_ci": 104, + "utf16_slovenian_ci": 105, + "utf16_polish_ci": 106, + "utf16_estonian_ci": 107, + "utf16_spanish_ci": 108, + "utf16_swedish_ci": 109, + "utf16_turkish_ci": 110, + "utf16_czech_ci": 111, + "utf16_danish_ci": 112, + "utf16_lithuanian_ci": 113, + "utf16_slovak_ci": 114, + "utf16_spanish2_ci": 115, + "utf16_roman_ci": 116, + "utf16_persian_ci": 117, + "utf16_esperanto_ci": 118, + "utf16_hungarian_ci": 119, + "utf16_sinhala_ci": 120, + "utf16_german2_ci": 121, + "utf16_croatian_ci": 122, + "utf16_unicode_520_ci": 123, + "utf16_vietnamese_ci": 124, + "ucs2_unicode_ci": 128, + "ucs2_icelandic_ci": 129, + "ucs2_latvian_ci": 130, + "ucs2_romanian_ci": 131, + "ucs2_slovenian_ci": 132, + "ucs2_polish_ci": 133, + "ucs2_estonian_ci": 134, + "ucs2_spanish_ci": 135, + "ucs2_swedish_ci": 136, + "ucs2_turkish_ci": 137, + "ucs2_czech_ci": 138, + "ucs2_danish_ci": 139, + "ucs2_lithuanian_ci": 140, + "ucs2_slovak_ci": 141, + "ucs2_spanish2_ci": 142, + "ucs2_roman_ci": 143, + "ucs2_persian_ci": 144, + "ucs2_esperanto_ci": 145, + "ucs2_hungarian_ci": 146, + "ucs2_sinhala_ci": 147, + "ucs2_german2_ci": 148, + "ucs2_croatian_ci": 149, + "ucs2_unicode_520_ci": 150, + "ucs2_vietnamese_ci": 151, + "ucs2_general_mysql500_ci": 159, + "utf32_unicode_ci": 160, + "utf32_icelandic_ci": 161, + "utf32_latvian_ci": 162, + "utf32_romanian_ci": 163, + "utf32_slovenian_ci": 164, + "utf32_polish_ci": 165, + "utf32_estonian_ci": 166, + "utf32_spanish_ci": 167, + "utf32_swedish_ci": 168, + "utf32_turkish_ci": 169, + "utf32_czech_ci": 170, + "utf32_danish_ci": 171, + "utf32_lithuanian_ci": 172, + "utf32_slovak_ci": 173, + "utf32_spanish2_ci": 174, + "utf32_roman_ci": 175, + "utf32_persian_ci": 176, + "utf32_esperanto_ci": 177, + "utf32_hungarian_ci": 178, + "utf32_sinhala_ci": 179, + "utf32_german2_ci": 180, + "utf32_croatian_ci": 181, + "utf32_unicode_520_ci": 182, + "utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, +} + +// A blacklist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[byte]bool{ + 1: true, // big5_chinese_ci + 13: true, // sjis_japanese_ci + 28: true, // gbk_chinese_ci + 84: true, // big5_bin + 86: true, // gb2312_bin + 87: true, // gbk_bin + 88: true, // sjis_bin + 95: true, // cp932_japanese_ci + 96: true, // cp932_bin +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 000000000..caaae013f --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,403 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql/driver" + "errors" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + affectedRows uint64 + insertId uint64 + cfg *config + maxPacketAllowed int + maxWriteSize int + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + strict bool +} + +type config struct { + user string + passwd string + net string + addr string + dbname string + params map[string]string + loc *time.Location + tls *tls.Config + timeout time.Duration + collation uint8 + allowAllFiles bool + allowOldPasswords bool + allowCleartextPasswords bool + clientFoundRows bool + columnsWithAlias bool + interpolateParams bool +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + for param, val := range mc.cfg.params { + switch param { + // Charset + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // time.Time parsing + case "parseTime": + var isBool bool + mc.parseTime, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Strict mode + case "strict": + var isBool bool + mc.strict, isBool = readBool(val) + if !isBool { + return errors.New("Invalid Bool value: " + val) + } + + // Compression + case "compress": + err = errors.New("Compression not implemented yet") + return + + // System Vars + default: + err = mc.exec("SET " + param + "=" + val + "") + if err != nil { + return + } + } + } + + return +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + err := mc.exec("START TRANSACTION") + if err == nil { + return &mysqlTx{mc}, err + } + + return nil, err +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if mc.netConn != nil { + err = mc.writeCommandPacket(comQuit) + if err == nil { + err = mc.netConn.Close() + } else { + mc.netConn.Close() + } + mc.netConn = nil + } + + mc.cfg = nil + mc.buf.rd = nil + + return +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + return nil, err + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + buf := mc.buf.takeCompleteBuffer() + if buf == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return "", driver.ErrBadConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + v := v.In(mc.cfg.loc) + v = v.Add(time.Nanosecond * 500) // To round under microsecond + year := v.Year() + year100 := year / 100 + year1 := year % 100 + month := v.Month() + day := v.Day() + hour := v.Hour() + minute := v.Minute() + second := v.Second() + micro := v.Nanosecond() / 1000 + + buf = append(buf, []byte{ + '\'', + digits10[year100], digits01[year100], + digits10[year1], digits01[year1], + '-', + digits10[month], digits01[month], + '-', + digits10[day], digits01[day], + ' ', + digits10[hour], digits01[hour], + ':', + digits10[minute], digits01[minute], + ':', + digits10[second], digits01[second], + }...) + + if micro != 0 { + micro10000 := micro / 10000 + micro100 := micro / 100 % 100 + micro1 := micro % 100 + buf = append(buf, []byte{ + '.', + digits10[micro10000], digits01[micro10000], + digits10[micro100], digits01[micro100], + digits10[micro1], digits01[micro1], + }...) + } + buf = append(buf, '\'') + } + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxPacketAllowed { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, err +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err != nil { + return err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil && resLen > 0 { + if err = mc.readUntilEOF(); err != nil { + return err + } + + err = mc.readUntilEOF() + } + + return err +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + if mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.interpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + args = nil + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + // no columns, no more data + return emptyRows{}, nil + } + // Columns + rows.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, err +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 000000000..dddc12908 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,162 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + minProtocolVersion byte = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +const ( + fieldTypeDecimal byte = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeNewDecimal byte = iota + 0xf6 + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 000000000..d310624ad --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,149 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "database/sql" + "database/sql/driver" + "net" +) + +// This struct is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +type DialFunc func(addr string) (net.Conn, error) + +var dials map[string]DialFunc + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +func RegisterDial(net string, dial DialFunc) { + if dials == nil { + dials = make(map[string]DialFunc) + } + dials[net] = dial +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formated +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxPacketAllowed: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + } + mc.cfg, err = parseDSN(dsn) + if err != nil { + return nil, err + } + + // Connect to Server + if dial, ok := dials[mc.cfg.net]; ok { + mc.netConn, err = dial(mc.cfg.addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.timeout} + mc.netConn, err = nd.Dial(mc.cfg.net, mc.cfg.addr) + } + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + mc.buf = newBuffer(mc.netConn) + + // Reading Handshake Initialization Packet + cipher, err := mc.readInitPacket() + if err != nil { + mc.Close() + return nil, err + } + + // Send Client Authentication Packet + if err = mc.writeAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + + // Read Result Packet + err = mc.readResultOK() + if err != nil { + // Retry with old authentication method, if allowed + if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword { + if err = mc.writeOldAuthPacket(cipher); err != nil { + mc.Close() + return nil, err + } + if err = mc.readResultOK(); err != nil { + mc.Close() + return nil, err + } + } else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword { + if err = mc.writeClearAuthPacket(); err != nil { + mc.Close() + return nil, err + } + if err = mc.readResultOK(); err != nil { + mc.Close() + return nil, err + } + } else { + mc.Close() + return nil, err + } + + } + + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxPacketAllowed = stringToInt(maxap) - 1 + if mc.maxPacketAllowed < maxPacketSize { + mc.maxWriteSize = mc.maxPacketAllowed + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go new file mode 100644 index 000000000..f9da416ec --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/driver_test.go @@ -0,0 +1,1681 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "net" + "net/url" + "os" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +var ( + user string + pass string + prot string + addr string + dbname string + dsn string + netAddr string + available bool +) + +var ( + tDate = time.Date(2012, 6, 14, 0, 0, 0, 0, time.UTC) + sDate = "2012-06-14" + tDateTime = time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC) + sDateTime = "2011-11-20 21:27:37" + tDate0 = time.Time{} + sDate0 = "0000-00-00" + sDateTime0 = "0000-00-00 00:00:00" +) + +// See https://github.com/go-sql-driver/mysql/wiki/Testing +func init() { + // get environment variables + env := func(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue + } + user = env("MYSQL_TEST_USER", "root") + pass = env("MYSQL_TEST_PASS", "") + prot = env("MYSQL_TEST_PROT", "tcp") + addr = env("MYSQL_TEST_ADDR", "localhost:3306") + dbname = env("MYSQL_TEST_DBNAME", "gotest") + netAddr = fmt.Sprintf("%s(%s)", prot, addr) + dsn = fmt.Sprintf("%s:%s@%s/%s?timeout=30s&strict=true", user, pass, netAddr, dbname) + c, err := net.Dial(prot, addr) + if err == nil { + available = true + c.Close() + } +} + +type DBTest struct { + *testing.T + db *sql.DB +} + +func runTests(t *testing.T, dsn string, tests ...func(dbt *DBTest)) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + db, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + db.Exec("DROP TABLE IF EXISTS test") + + dsn2 := dsn + "&interpolateParams=true" + var db2 *sql.DB + if _, err := parseDSN(dsn2); err != errInvalidDSNUnsafeCollation { + db2, err = sql.Open("mysql", dsn2) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db2.Close() + } + + dbt := &DBTest{t, db} + dbt2 := &DBTest{t, db2} + for _, test := range tests { + test(dbt) + dbt.db.Exec("DROP TABLE IF EXISTS test") + if db2 != nil { + test(dbt2) + dbt2.db.Exec("DROP TABLE IF EXISTS test") + } + } +} + +func (dbt *DBTest) fail(method, query string, err error) { + if len(query) > 300 { + query = "[query too large to print]" + } + dbt.Fatalf("Error on %s %s: %s", method, query, err.Error()) +} + +func (dbt *DBTest) mustExec(query string, args ...interface{}) (res sql.Result) { + res, err := dbt.db.Exec(query, args...) + if err != nil { + dbt.fail("Exec", query, err) + } + return res +} + +func (dbt *DBTest) mustQuery(query string, args ...interface{}) (rows *sql.Rows) { + rows, err := dbt.db.Query(query, args...) + if err != nil { + dbt.fail("Query", query, err) + } + return rows +} + +func TestEmptyQuery(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // just a comment, no query + rows := dbt.mustQuery("--") + // will hang before #255 + if rows.Next() { + dbt.Errorf("Next on rows must be false") + } + }) +} + +func TestCRUD(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // Create Table + dbt.mustExec("CREATE TABLE test (value BOOL)") + + // Test for unexpected data + var out bool + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + dbt.Error("unexpected data in empty table") + } + + // Create Data + res := dbt.mustExec("INSERT INTO test VALUES (1)") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + id, err := res.LastInsertId() + if err != nil { + dbt.Fatalf("res.LastInsertId() returned error: %s", err.Error()) + } + if id != 0 { + dbt.Fatalf("Expected InsertID 0, got %d", id) + } + + // Read + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if true != out { + dbt.Errorf("true != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Update + res = dbt.mustExec("UPDATE test SET value = ? WHERE value = ?", false, true) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check Update + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if false != out { + dbt.Errorf("false != %t", out) + } + + if rows.Next() { + dbt.Error("unexpected data") + } + } else { + dbt.Error("no data") + } + + // Delete + res = dbt.mustExec("DELETE FROM test WHERE value = ?", false) + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 1 { + dbt.Fatalf("Expected 1 affected row, got %d", count) + } + + // Check for unexpected rows + res = dbt.mustExec("DELETE FROM test") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 0 { + dbt.Fatalf("Expected 0 affected row, got %d", count) + } + }) +} + +func TestInt(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [5]string{"TINYINT", "SMALLINT", "MEDIUMINT", "INT", "BIGINT"} + in := int64(42) + var out int64 + var rows *sql.Rows + + // SIGNED + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // UNSIGNED ZEROFILL + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + " ZEROFILL)") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s ZEROFILL: %d != %d", v, in, out) + } + } else { + dbt.Errorf("%s ZEROFILL: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestFloat(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [2]string{"FLOAT", "DOUBLE"} + in := float32(42.23) + var out float32 + var rows *sql.Rows + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ")") + dbt.mustExec("INSERT INTO test VALUES (?)", in) + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %g != %g", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + dbt.mustExec("DROP TABLE IF EXISTS test") + } + }) +} + +func TestString(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + types := [6]string{"CHAR(255)", "VARCHAR(255)", "TINYTEXT", "TEXT", "MEDIUMTEXT", "LONGTEXT"} + in := "κόσμε üöäßñóùéàâÿœ'îë Árvíztűrő いろはにほへとちりぬるを イロハニホヘト דג סקרן чащах น่าฟังเอย" + var out string + var rows *sql.Rows + + for _, v := range types { + dbt.mustExec("CREATE TABLE test (value " + v + ") CHARACTER SET utf8") + + dbt.mustExec("INSERT INTO test VALUES (?)", in) + + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Errorf("%s: %s != %s", v, in, out) + } + } else { + dbt.Errorf("%s: no data", v) + } + + dbt.mustExec("DROP TABLE IF EXISTS test") + } + + // BLOB + dbt.mustExec("CREATE TABLE test (id int, value BLOB) CHARACTER SET utf8") + + id := 2 + in = "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet. " + + "Lorem ipsum dolor sit amet, consetetur sadipscing elitr, " + + "sed diam nonumy eirmod tempor invidunt ut labore et dolore magna aliquyam erat, " + + "sed diam voluptua. At vero eos et accusam et justo duo dolores et ea rebum. " + + "Stet clita kasd gubergren, no sea takimata sanctus est Lorem ipsum dolor sit amet." + dbt.mustExec("INSERT INTO test VALUES (?, ?)", id, in) + + err := dbt.db.QueryRow("SELECT value FROM test WHERE id = ?", id).Scan(&out) + if err != nil { + dbt.Fatalf("Error on BLOB-Query: %s", err.Error()) + } else if out != in { + dbt.Errorf("BLOB: %s != %s", in, out) + } + }) +} + +type timeTests struct { + dbtype string + tlayout string + tests []timeTest +} + +type timeTest struct { + s string // leading "!": do not use t as value in queries + t time.Time +} + +type timeMode byte + +func (t timeMode) String() string { + switch t { + case binaryString: + return "binary:string" + case binaryTime: + return "binary:time.Time" + case textString: + return "text:string" + } + panic("unsupported timeMode") +} + +func (t timeMode) Binary() bool { + switch t { + case binaryString, binaryTime: + return true + } + return false +} + +const ( + binaryString timeMode = iota + binaryTime + textString +) + +func (t timeTest) genQuery(dbtype string, mode timeMode) string { + var inner string + if mode.Binary() { + inner = "?" + } else { + inner = `"%s"` + } + return `SELECT cast(` + inner + ` as ` + dbtype + `)` +} + +func (t timeTest) run(dbt *DBTest, dbtype, tlayout string, mode timeMode) { + var rows *sql.Rows + query := t.genQuery(dbtype, mode) + switch mode { + case binaryString: + rows = dbt.mustQuery(query, t.s) + case binaryTime: + rows = dbt.mustQuery(query, t.t) + case textString: + query = fmt.Sprintf(query, t.s) + rows = dbt.mustQuery(query) + default: + panic("unsupported mode") + } + defer rows.Close() + var err error + if !rows.Next() { + err = rows.Err() + if err == nil { + err = fmt.Errorf("no data") + } + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + var dst interface{} + err = rows.Scan(&dst) + if err != nil { + dbt.Errorf("%s [%s]: %s", dbtype, mode, err) + return + } + switch val := dst.(type) { + case []uint8: + str := string(val) + if str == t.s { + return + } + if mode.Binary() && dbtype == "DATETIME" && len(str) == 26 && str[:19] == t.s { + // a fix mainly for TravisCI: + // accept full microsecond resolution in result for DATETIME columns + // where the binary protocol was used + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, str, + ) + case time.Time: + if val == t.t { + return + } + dbt.Errorf("%s [%s] to string: expected %q, got %q", + dbtype, mode, + t.s, val.Format(tlayout), + ) + default: + fmt.Printf("%#v\n", []interface{}{dbtype, tlayout, mode, t.s, t.t}) + dbt.Errorf("%s [%s]: unhandled type %T (is '%v')", + dbtype, mode, + val, val, + ) + } +} + +func TestDateTime(t *testing.T) { + afterTime := func(t time.Time, d string) time.Time { + dur, err := time.ParseDuration(d) + if err != nil { + panic(err) + } + return t.Add(dur) + } + // NOTE: MySQL rounds DATETIME(x) up - but that's not included in the tests + format := "2006-01-02 15:04:05.999999" + t0 := time.Time{} + tstr0 := "0000-00-00 00:00:00.000000" + testcases := []timeTests{ + {"DATE", format[:10], []timeTest{ + {t: time.Date(2011, 11, 20, 0, 0, 0, 0, time.UTC)}, + {t: t0, s: tstr0[:10]}, + }}, + {"DATETIME", format[:19], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(0)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 0, time.UTC)}, + {t: t0, s: tstr0[:19]}, + }}, + {"DATETIME(1)", format[:21], []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 100000000, time.UTC)}, + {t: t0, s: tstr0[:21]}, + }}, + {"DATETIME(6)", format, []timeTest{ + {t: time.Date(2011, 11, 20, 21, 27, 37, 123456000, time.UTC)}, + {t: t0, s: tstr0}, + }}, + {"TIME", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(0)", format[11:19], []timeTest{ + {t: afterTime(t0, "12345s")}, + {s: "!-12:34:56"}, + {s: "!-838:59:59"}, + {s: "!838:59:59"}, + {t: t0, s: tstr0[11:19]}, + }}, + {"TIME(1)", format[11:21], []timeTest{ + {t: afterTime(t0, "12345600ms")}, + {s: "!-12:34:56.7"}, + {s: "!-838:59:58.9"}, + {s: "!838:59:58.9"}, + {t: t0, s: tstr0[11:21]}, + }}, + {"TIME(6)", format[11:], []timeTest{ + {t: afterTime(t0, "1234567890123000ns")}, + {s: "!-12:34:56.789012"}, + {s: "!-838:59:58.999999"}, + {s: "!838:59:58.999999"}, + {t: t0, s: tstr0[11:]}, + }}, + } + dsns := []string{ + dsn + "&parseTime=true", + dsn + "&parseTime=false", + } + for _, testdsn := range dsns { + runTests(t, testdsn, func(dbt *DBTest) { + microsecsSupported := false + zeroDateSupported := false + var rows *sql.Rows + var err error + rows, err = dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`) + if err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + rows, err = dbt.db.Query(`SELECT cast("0000-00-00" as DATE) = "0000-00-00"`) + if err == nil { + rows.Scan(&zeroDateSupported) + rows.Close() + } + for _, setups := range testcases { + if t := setups.dbtype; !microsecsSupported && t[len(t)-1:] == ")" { + // skip fractional second tests if unsupported by server + continue + } + for _, setup := range setups.tests { + allowBinTime := true + if setup.s == "" { + // fill time string whereever Go can reliable produce it + setup.s = setup.t.Format(setups.tlayout) + } else if setup.s[0] == '!' { + // skip tests using setup.t as source in queries + allowBinTime = false + // fix setup.s - remove the "!" + setup.s = setup.s[1:] + } + if !zeroDateSupported && setup.s == tstr0[:len(setup.s)] { + // skip disallowed 0000-00-00 date + continue + } + setup.run(dbt, setups.dbtype, setups.tlayout, textString) + setup.run(dbt, setups.dbtype, setups.tlayout, binaryString) + if allowBinTime { + setup.run(dbt, setups.dbtype, setups.tlayout, binaryTime) + } + } + } + }) + } +} + +func TestTimestampMicros(t *testing.T) { + format := "2006-01-02 15:04:05.999999" + f0 := format[:19] + f1 := format[:21] + f6 := format[:26] + runTests(t, dsn, func(dbt *DBTest) { + // check if microseconds are supported. + // Do not use timestamp(x) for that check - before 5.5.6, x would mean display width + // and not precision. + // Se last paragraph at http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html + microsecsSupported := false + if rows, err := dbt.db.Query(`SELECT cast("00:00:00.1" as TIME(1)) = "00:00:00.1"`); err == nil { + rows.Scan(µsecsSupported) + rows.Close() + } + if !microsecsSupported { + // skip test + return + } + _, err := dbt.db.Exec(` + CREATE TABLE test ( + value0 TIMESTAMP NOT NULL DEFAULT '` + f0 + `', + value1 TIMESTAMP(1) NOT NULL DEFAULT '` + f1 + `', + value6 TIMESTAMP(6) NOT NULL DEFAULT '` + f6 + `' + )`, + ) + if err != nil { + dbt.Error(err) + } + defer dbt.mustExec("DROP TABLE IF EXISTS test") + dbt.mustExec("INSERT INTO test SET value0=?, value1=?, value6=?", f0, f1, f6) + var res0, res1, res6 string + rows := dbt.mustQuery("SELECT * FROM test") + if !rows.Next() { + dbt.Errorf("test contained no selectable values") + } + err = rows.Scan(&res0, &res1, &res6) + if err != nil { + dbt.Error(err) + } + if res0 != f0 { + dbt.Errorf("expected %q, got %q", f0, res0) + } + if res1 != f1 { + dbt.Errorf("expected %q, got %q", f1, res1) + } + if res6 != f6 { + dbt.Errorf("expected %q, got %q", f6, res6) + } + }) +} + +func TestNULL(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + nullStmt, err := dbt.db.Prepare("SELECT NULL") + if err != nil { + dbt.Fatal(err) + } + defer nullStmt.Close() + + nonNullStmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + defer nonNullStmt.Close() + + // NullBool + var nb sql.NullBool + // Invalid + if err = nullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if nb.Valid { + dbt.Error("Valid NullBool which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nb); err != nil { + dbt.Fatal(err) + } + if !nb.Valid { + dbt.Error("Invalid NullBool which should be valid") + } else if nb.Bool != true { + dbt.Errorf("Unexpected NullBool value: %t (should be true)", nb.Bool) + } + + // NullFloat64 + var nf sql.NullFloat64 + // Invalid + if err = nullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if nf.Valid { + dbt.Error("Valid NullFloat64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&nf); err != nil { + dbt.Fatal(err) + } + if !nf.Valid { + dbt.Error("Invalid NullFloat64 which should be valid") + } else if nf.Float64 != float64(1) { + dbt.Errorf("Unexpected NullFloat64 value: %f (should be 1.0)", nf.Float64) + } + + // NullInt64 + var ni sql.NullInt64 + // Invalid + if err = nullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if ni.Valid { + dbt.Error("Valid NullInt64 which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ni); err != nil { + dbt.Fatal(err) + } + if !ni.Valid { + dbt.Error("Invalid NullInt64 which should be valid") + } else if ni.Int64 != int64(1) { + dbt.Errorf("Unexpected NullInt64 value: %d (should be 1)", ni.Int64) + } + + // NullString + var ns sql.NullString + // Invalid + if err = nullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if ns.Valid { + dbt.Error("Valid NullString which should be invalid") + } + // Valid + if err = nonNullStmt.QueryRow().Scan(&ns); err != nil { + dbt.Fatal(err) + } + if !ns.Valid { + dbt.Error("Invalid NullString which should be valid") + } else if ns.String != `1` { + dbt.Error("Unexpected NullString value:" + ns.String + " (should be `1`)") + } + + // nil-bytes + var b []byte + // Read nil + if err = nullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil []byte wich should be nil") + } + // Read non-nil + if err = nonNullStmt.QueryRow().Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("Nil []byte wich should be non-nil") + } + // Insert nil + b = nil + success := false + if err = dbt.db.QueryRow("SELECT ? IS NULL", b).Scan(&success); err != nil { + dbt.Fatal(err) + } + if !success { + dbt.Error("Inserting []byte(nil) as NULL failed") + } + // Check input==output with input==nil + b = nil + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b != nil { + dbt.Error("Non-nil echo from nil input") + } + // Check input==output with input!=nil + b = []byte("") + if err = dbt.db.QueryRow("SELECT ?", b).Scan(&b); err != nil { + dbt.Fatal(err) + } + if b == nil { + dbt.Error("nil echo from non-nil input") + } + + // Insert NULL + dbt.mustExec("CREATE TABLE test (dummmy1 int, value int, dummy2 int)") + + dbt.mustExec("INSERT INTO test VALUES (?, ?, ?)", 1, nil, 2) + + var out interface{} + rows := dbt.mustQuery("SELECT * FROM test") + if rows.Next() { + rows.Scan(&out) + if out != nil { + dbt.Errorf("%v != nil", out) + } + } else { + dbt.Error("no data") + } + }) +} + +func TestUint64(t *testing.T) { + const ( + u0 = uint64(0) + uall = ^u0 + uhigh = uall >> 1 + utop = ^uhigh + s0 = int64(0) + sall = ^s0 + shigh = int64(uhigh) + stop = ^shigh + ) + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare(`SELECT ?, ?, ? ,?, ?, ?, ?, ?`) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + row := stmt.QueryRow( + u0, uhigh, utop, uall, + s0, shigh, stop, sall, + ) + + var ua, ub, uc, ud uint64 + var sa, sb, sc, sd int64 + + err = row.Scan(&ua, &ub, &uc, &ud, &sa, &sb, &sc, &sd) + if err != nil { + dbt.Fatal(err) + } + switch { + case ua != u0, + ub != uhigh, + uc != utop, + ud != uall, + sa != s0, + sb != shigh, + sc != stop, + sd != sall: + dbt.Fatal("Unexpected result value") + } + }) +} + +func TestLongData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + var maxAllowedPacketSize int + err := dbt.db.QueryRow("select @@max_allowed_packet").Scan(&maxAllowedPacketSize) + if err != nil { + dbt.Fatal(err) + } + maxAllowedPacketSize-- + + // don't get too ambitious + if maxAllowedPacketSize > 1<<25 { + maxAllowedPacketSize = 1 << 25 + } + + dbt.mustExec("CREATE TABLE test (value LONGBLOB)") + + in := strings.Repeat(`a`, maxAllowedPacketSize+1) + var out string + var rows *sql.Rows + + // Long text data + const nonDataQueryLen = 28 // length query w/o value + inS := in[:maxAllowedPacketSize-nonDataQueryLen] + dbt.mustExec("INSERT INTO test VALUES('" + inS + "')") + rows = dbt.mustQuery("SELECT value FROM test") + if rows.Next() { + rows.Scan(&out) + if inS != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(inS), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + dbt.Fatalf("LONGBLOB: no data") + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Long binary data + dbt.mustExec("INSERT INTO test VALUES(?)", in) + rows = dbt.mustQuery("SELECT value FROM test WHERE 1=?", 1) + if rows.Next() { + rows.Scan(&out) + if in != out { + dbt.Fatalf("LONGBLOB: length in: %d, length out: %d", len(in), len(out)) + } + if rows.Next() { + dbt.Error("LONGBLOB: unexpexted row") + } + } else { + if err = rows.Err(); err != nil { + dbt.Fatalf("LONGBLOB: no data (err: %s)", err.Error()) + } else { + dbt.Fatal("LONGBLOB: no data (err: )") + } + } + }) +} + +func TestLoadData(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + verifyLoadDataResult := func() { + rows, err := dbt.db.Query("SELECT * FROM test") + if err != nil { + dbt.Fatal(err.Error()) + } + + i := 0 + values := [4]string{ + "a string", + "a string containing a \t", + "a string containing a \n", + "a string containing both \t\n", + } + + var id int + var value string + + for rows.Next() { + i++ + err = rows.Scan(&id, &value) + if err != nil { + dbt.Fatal(err.Error()) + } + if i != id { + dbt.Fatalf("%d != %d", i, id) + } + if values[i-1] != value { + dbt.Fatalf("%q != %q", values[i-1], value) + } + } + err = rows.Err() + if err != nil { + dbt.Fatal(err.Error()) + } + + if i != 4 { + dbt.Fatalf("Rows count mismatch. Got %d, want 4", i) + } + } + file, err := ioutil.TempFile("", "gotest") + defer os.Remove(file.Name()) + if err != nil { + dbt.Fatal(err) + } + file.WriteString("1\ta string\n2\ta string containing a \\t\n3\ta string containing a \\n\n4\ta string containing both \\t\\n\n") + file.Close() + + dbt.db.Exec("DROP TABLE IF EXISTS test") + dbt.mustExec("CREATE TABLE test (id INT NOT NULL PRIMARY KEY, value TEXT NOT NULL) CHARACTER SET utf8") + + // Local File + RegisterLocalFile(file.Name()) + dbt.mustExec(fmt.Sprintf("LOAD DATA LOCAL INFILE %q INTO TABLE test", file.Name())) + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent file didn't fail") + } else if err.Error() != "Local File 'doesnotexist' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files" { + dbt.Fatal(err.Error()) + } + + // Empty table + dbt.mustExec("TRUNCATE TABLE test") + + // Reader + RegisterReaderHandler("test", func() io.Reader { + file, err = os.Open(file.Name()) + if err != nil { + dbt.Fatal(err) + } + return file + }) + dbt.mustExec("LOAD DATA LOCAL INFILE 'Reader::test' INTO TABLE test") + verifyLoadDataResult() + // negative test + _, err = dbt.db.Exec("LOAD DATA LOCAL INFILE 'Reader::doesnotexist' INTO TABLE test") + if err == nil { + dbt.Fatal("Load non-existent Reader didn't fail") + } else if err.Error() != "Reader 'doesnotexist' is not registered" { + dbt.Fatal(err.Error()) + } + }) +} + +func TestFoundRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 affected rows, got %d", count) + } + }) + runTests(t, dsn+"&clientFoundRows=true", func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (id INT NOT NULL ,data INT NOT NULL)") + dbt.mustExec("INSERT INTO test (id, data) VALUES (0, 0),(0, 0),(1, 0),(1, 0),(1, 1)") + + res := dbt.mustExec("UPDATE test SET data = 1 WHERE id = 0") + count, err := res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 2 { + dbt.Fatalf("Expected 2 matched rows, got %d", count) + } + res = dbt.mustExec("UPDATE test SET data = 1 WHERE id = 1") + count, err = res.RowsAffected() + if err != nil { + dbt.Fatalf("res.RowsAffected() returned error: %s", err.Error()) + } + if count != 3 { + dbt.Fatalf("Expected 3 matched rows, got %d", count) + } + }) +} + +func TestStrict(t *testing.T) { + // ALLOW_INVALID_DATES to get rid of stricter modes - we want to test for warnings, not errors + relaxedDsn := dsn + "&sql_mode=ALLOW_INVALID_DATES" + // make sure the MySQL version is recent enough with a separate connection + // before running the test + conn, err := MySQLDriver{}.Open(relaxedDsn) + if conn != nil { + conn.Close() + } + if me, ok := err.(*MySQLError); ok && me.Number == 1231 { + // Error 1231: Variable 'sql_mode' can't be set to the value of 'ALLOW_INVALID_DATES' + // => skip test, MySQL server version is too old + return + } + runTests(t, relaxedDsn, func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (a TINYINT NOT NULL, b CHAR(4))") + + var queries = [...]struct { + in string + codes []string + }{ + {"DROP TABLE IF EXISTS no_such_table", []string{"1051"}}, + {"INSERT INTO test VALUES(10,'mysql'),(NULL,'test'),(300,'Open Source')", []string{"1265", "1048", "1264", "1265"}}, + } + var err error + + var checkWarnings = func(err error, mode string, idx int) { + if err == nil { + dbt.Errorf("Expected STRICT error on query [%s] %s", mode, queries[idx].in) + } + + if warnings, ok := err.(MySQLWarnings); ok { + var codes = make([]string, len(warnings)) + for i := range warnings { + codes[i] = warnings[i].Code + } + if len(codes) != len(queries[idx].codes) { + dbt.Errorf("Unexpected STRICT error count on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + } + + for i := range warnings { + if codes[i] != queries[idx].codes[i] { + dbt.Errorf("Unexpected STRICT error codes on query [%s] %s: Wanted %v, Got %v", mode, queries[idx].in, queries[idx].codes, codes) + return + } + } + + } else { + dbt.Errorf("Unexpected error on query [%s] %s: %s", mode, queries[idx].in, err.Error()) + } + } + + // text protocol + for i := range queries { + _, err = dbt.db.Exec(queries[i].in) + checkWarnings(err, "text", i) + } + + var stmt *sql.Stmt + + // binary protocol + for i := range queries { + stmt, err = dbt.db.Prepare(queries[i].in) + if err != nil { + dbt.Errorf("Error on preparing query %s: %s", queries[i].in, err.Error()) + } + + _, err = stmt.Exec() + checkWarnings(err, "binary", i) + + err = stmt.Close() + if err != nil { + dbt.Errorf("Error on closing stmt for query %s: %s", queries[i].in, err.Error()) + } + } + }) +} + +func TestTLS(t *testing.T) { + tlsTest := func(dbt *DBTest) { + if err := dbt.db.Ping(); err != nil { + if err == ErrNoTLS { + dbt.Skip("Server does not support TLS") + } else { + dbt.Fatalf("Error on Ping: %s", err.Error()) + } + } + + rows := dbt.mustQuery("SHOW STATUS LIKE 'Ssl_cipher'") + + var variable, value *sql.RawBytes + for rows.Next() { + if err := rows.Scan(&variable, &value); err != nil { + dbt.Fatal(err.Error()) + } + + if value == nil { + dbt.Fatal("No Cipher") + } + } + } + + runTests(t, dsn+"&tls=skip-verify", tlsTest) + + // Verify that registering / using a custom cfg works + RegisterTLSConfig("custom-skip-verify", &tls.Config{ + InsecureSkipVerify: true, + }) + runTests(t, dsn+"&tls=custom-skip-verify", tlsTest) +} + +func TestReuseClosedConnection(t *testing.T) { + // this test does not use sql.database, it uses the driver directly + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + md := &MySQLDriver{} + conn, err := md.Open(dsn) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + stmt, err := conn.Prepare("DO 1") + if err != nil { + t.Fatalf("Error preparing statement: %s", err.Error()) + } + _, err = stmt.Exec(nil) + if err != nil { + t.Fatalf("Error executing statement: %s", err.Error()) + } + err = conn.Close() + if err != nil { + t.Fatalf("Error closing connection: %s", err.Error()) + } + + defer func() { + if err := recover(); err != nil { + t.Errorf("Panic after reusing a closed connection: %v", err) + } + }() + _, err = stmt.Exec(nil) + if err != nil && err != driver.ErrBadConn { + t.Errorf("Unexpected error '%s', expected '%s'", + err.Error(), driver.ErrBadConn.Error()) + } +} + +func TestCharset(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + mustSetCharset := func(charsetParam, expected string) { + runTests(t, dsn+"&"+charsetParam, func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT @@character_set_connection") + defer rows.Close() + + if !rows.Next() { + dbt.Fatalf("Error getting connection charset: %s", rows.Err()) + } + + var got string + rows.Scan(&got) + + if got != expected { + dbt.Fatalf("Expected connection charset %s but got %s", expected, got) + } + }) + } + + // non utf8 test + mustSetCharset("charset=ascii", "ascii") + + // when the first charset is invalid, use the second + mustSetCharset("charset=none,utf8", "utf8") + + // when the first charset is valid, use it + mustSetCharset("charset=ascii,utf8", "ascii") + mustSetCharset("charset=utf8,ascii", "utf8") +} + +func TestFailingCharset(t *testing.T) { + runTests(t, dsn+"&charset=none", func(dbt *DBTest) { + // run query to really establish connection... + _, err := dbt.db.Exec("SELECT 1") + if err == nil { + dbt.db.Close() + t.Fatalf("Connection must not succeed without a valid charset") + } + }) +} + +func TestCollation(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + defaultCollation := "utf8_general_ci" + testCollations := []string{ + "", // do not set + defaultCollation, // driver default + "latin1_general_ci", + "binary", + "utf8_unicode_ci", + "cp1257_bin", + } + + for _, collation := range testCollations { + var expected, tdsn string + if collation != "" { + tdsn = dsn + "&collation=" + collation + expected = collation + } else { + tdsn = dsn + expected = defaultCollation + } + + runTests(t, tdsn, func(dbt *DBTest) { + var got string + if err := dbt.db.QueryRow("SELECT @@collation_connection").Scan(&got); err != nil { + dbt.Fatal(err) + } + + if got != expected { + dbt.Fatalf("Expected connection collation %s but got %s", expected, got) + } + }) + } +} + +func TestColumnsWithAlias(t *testing.T) { + runTests(t, dsn+"&columnsWithAlias=true", func(dbt *DBTest) { + rows := dbt.mustQuery("SELECT 1 AS A") + defer rows.Close() + cols, _ := rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A" { + t.Fatalf("expected column name \"A\", got \"%s\"", cols[0]) + } + rows.Close() + + rows = dbt.mustQuery("SELECT * FROM (SELECT 1 AS one) AS A") + cols, _ = rows.Columns() + if len(cols) != 1 { + t.Fatalf("expected 1 column, got %d", len(cols)) + } + if cols[0] != "A.one" { + t.Fatalf("expected column name \"A.one\", got \"%s\"", cols[0]) + } + }) +} + +func TestRawBytesResultExceedsBuffer(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + // defaultBufSize from buffer.go + expected := strings.Repeat("abc", defaultBufSize) + + rows := dbt.mustQuery("SELECT '" + expected + "'") + defer rows.Close() + if !rows.Next() { + dbt.Error("expected result, got none") + } + var result sql.RawBytes + rows.Scan(&result) + if expected != string(result) { + dbt.Error("result did not match expected value") + } + }) +} + +func TestTimezoneConversion(t *testing.T) { + zones := []string{"UTC", "US/Central", "US/Pacific", "Local"} + + // Regression test for timezone handling + tzTest := func(dbt *DBTest) { + + // Create table + dbt.mustExec("CREATE TABLE test (ts TIMESTAMP)") + + // Insert local time into database (should be converted) + usCentral, _ := time.LoadLocation("US/Central") + reftime := time.Date(2014, 05, 30, 18, 03, 17, 0, time.UTC).In(usCentral) + dbt.mustExec("INSERT INTO test VALUE (?)", reftime) + + // Retrieve time from DB + rows := dbt.mustQuery("SELECT ts FROM test") + if !rows.Next() { + dbt.Fatal("Didn't get any rows out") + } + + var dbTime time.Time + err := rows.Scan(&dbTime) + if err != nil { + dbt.Fatal("Err", err) + } + + // Check that dates match + if reftime.Unix() != dbTime.Unix() { + dbt.Errorf("Times don't match.\n") + dbt.Errorf(" Now(%v)=%v\n", usCentral, reftime) + dbt.Errorf(" Now(UTC)=%v\n", dbTime) + } + } + + for _, tz := range zones { + runTests(t, dsn+"&parseTime=true&loc="+url.QueryEscape(tz), tzTest) + } +} + +// Special cases + +func TestRowsClose(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + rows, err := dbt.db.Query("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + err = rows.Close() + if err != nil { + dbt.Fatal(err) + } + + if rows.Next() { + dbt.Fatal("Unexpected row after rows.Close()") + } + + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + }) +} + +// dangling statements +// http://code.google.com/p/go/issues/detail?id=3865 +func TestCloseStmtBeforeRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1") + if err != nil { + dbt.Fatal(err) + } + + rows, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + + err = stmt.Close() + if err != nil { + dbt.Fatal(err) + } + + if !rows.Next() { + dbt.Fatal("Getting row failed") + } else { + err = rows.Err() + if err != nil { + dbt.Fatal(err) + } + + var out bool + err = rows.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + }) +} + +// It is valid to have multiple Rows for the same Stmt +// http://code.google.com/p/go/issues/detail?id=3734 +func TestStmtMultiRows(t *testing.T) { + runTests(t, dsn, func(dbt *DBTest) { + stmt, err := dbt.db.Prepare("SELECT 1 UNION SELECT 0") + if err != nil { + dbt.Fatal(err) + } + + rows1, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows1.Close() + + rows2, err := stmt.Query() + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows2.Close() + + var out bool + + // 1 + if !rows1.Next() { + dbt.Fatal("1st rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + if !rows2.Next() { + dbt.Fatal("1st rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != true { + dbt.Errorf("true != %t", out) + } + } + + // 2 + if !rows1.Next() { + dbt.Fatal("2nd rows1.Next failed") + } else { + err = rows1.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows1.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows1.Next() { + dbt.Fatal("Unexpected row on rows1") + } + err = rows1.Close() + if err != nil { + dbt.Fatal(err) + } + } + + if !rows2.Next() { + dbt.Fatal("2nd rows2.Next failed") + } else { + err = rows2.Err() + if err != nil { + dbt.Fatal(err) + } + + err = rows2.Scan(&out) + if err != nil { + dbt.Fatalf("Error on rows.Scan(): %s", err.Error()) + } + if out != false { + dbt.Errorf("false != %t", out) + } + + if rows2.Next() { + dbt.Fatal("Unexpected row on rows2") + } + err = rows2.Close() + if err != nil { + dbt.Fatal(err) + } + } + }) +} + +// Regression test for +// * more than 32 NULL parameters (issue 209) +// * more parameters than fit into the buffer (issue 201) +func TestPreparedManyCols(t *testing.T) { + const numParams = defaultBufSize + runTests(t, dsn, func(dbt *DBTest) { + query := "SELECT ?" + strings.Repeat(",?", numParams-1) + stmt, err := dbt.db.Prepare(query) + if err != nil { + dbt.Fatal(err) + } + defer stmt.Close() + // create more parameters than fit into the buffer + // which will take nil-values + params := make([]interface{}, numParams) + rows, err := stmt.Query(params...) + if err != nil { + stmt.Close() + dbt.Fatal(err) + } + defer rows.Close() + }) +} + +func TestConcurrent(t *testing.T) { + if enabled, _ := readBool(os.Getenv("MYSQL_TEST_CONCURRENT")); !enabled { + t.Skip("MYSQL_TEST_CONCURRENT env var not set") + } + + runTests(t, dsn, func(dbt *DBTest) { + var max int + err := dbt.db.QueryRow("SELECT @@max_connections").Scan(&max) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + dbt.Logf("Testing up to %d concurrent connections \r\n", max) + + var remaining, succeeded int32 = int32(max), 0 + + var wg sync.WaitGroup + wg.Add(max) + + var fatalError string + var once sync.Once + fatalf := func(s string, vals ...interface{}) { + once.Do(func() { + fatalError = fmt.Sprintf(s, vals...) + }) + } + + for i := 0; i < max; i++ { + go func(id int) { + defer wg.Done() + + tx, err := dbt.db.Begin() + atomic.AddInt32(&remaining, -1) + + if err != nil { + if err.Error() != "Error 1040: Too many connections" { + fatalf("Error on Conn %d: %s", id, err.Error()) + } + return + } + + // keep the connection busy until all connections are open + for remaining > 0 { + if _, err = tx.Exec("DO 1"); err != nil { + fatalf("Error on Conn %d: %s", id, err.Error()) + return + } + } + + if err = tx.Commit(); err != nil { + fatalf("Error on Conn %d: %s", id, err.Error()) + return + } + + // everything went fine with this connection + atomic.AddInt32(&succeeded, 1) + }(i) + } + + // wait until all conections are open + wg.Wait() + + if fatalError != "" { + dbt.Fatal(fatalError) + } + + dbt.Logf("Reached %d concurrent connections\r\n", succeeded) + }) +} + +// Tests custom dial functions +func TestCustomDial(t *testing.T) { + if !available { + t.Skipf("MySQL-Server not running on %s", netAddr) + } + + // our custom dial function which justs wraps net.Dial here + RegisterDial("mydial", func(addr string) (net.Conn, error) { + return net.Dial(prot, addr) + }) + + db, err := sql.Open("mysql", fmt.Sprintf("%s:%s@mydial(%s)/%s?timeout=30s&strict=true", user, pass, addr, dbname)) + if err != nil { + t.Fatalf("Error connecting: %s", err.Error()) + } + defer db.Close() + + if _, err = db.Exec("DO 1"); err != nil { + t.Fatalf("Connection failed: %s", err.Error()) + } +} + +func TestSqlInjection(t *testing.T) { + createTest := func(arg string) func(dbt *DBTest) { + return func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v INTEGER)") + dbt.mustExec("INSERT INTO test VALUES (?)", 1) + + var v int + // NULL can't be equal to anything, the idea here is to inject query so it returns row + // This test verifies that escapeQuotes and escapeBackslash are working properly + err := dbt.db.QueryRow("SELECT v FROM test WHERE NULL = ?", arg).Scan(&v) + if err == sql.ErrNoRows { + return // success, sql injection failed + } else if err == nil { + dbt.Errorf("Sql injection successful with arg: %s", arg) + } else { + dbt.Errorf("Error running query with arg: %s; err: %s", arg, err.Error()) + } + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", + } + for _, testdsn := range dsns { + runTests(t, testdsn, createTest("1 OR 1=1")) + runTests(t, testdsn, createTest("' OR '1'='1")) + } +} + +// Test if inserted data is correctly retrieved after being escaped +func TestInsertRetrieveEscapedData(t *testing.T) { + testData := func(dbt *DBTest) { + dbt.mustExec("CREATE TABLE test (v VARCHAR(255))") + + // All sequences that are escaped by escapeQuotes and escapeBackslash + v := "foo \x00\n\r\x1a\"'\\" + dbt.mustExec("INSERT INTO test VALUES (?)", v) + + var out string + err := dbt.db.QueryRow("SELECT v FROM test").Scan(&out) + if err != nil { + dbt.Fatalf("%s", err.Error()) + } + + if out != v { + dbt.Errorf("%q != %q", out, v) + } + } + + dsns := []string{ + dsn, + dsn + "&sql_mode=NO_BACKSLASH_ESCAPES", + } + for _, testdsn := range dsns { + runTests(t, testdsn, testData) + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 000000000..44cf30db6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,131 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "errors" + "fmt" + "io" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("Invalid Connection") + ErrMalformPkt = errors.New("Malformed Packet") + ErrNoTLS = errors.New("TLS encryption requested but server does not support TLS") + ErrOldPassword = errors.New("This user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrCleartextPassword = errors.New("This user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN.") + ErrUnknownPlugin = errors.New("The authentication plugin is not supported.") + ErrOldProtocol = errors.New("MySQL-Server does not support required Protocol 41+") + ErrPktSync = errors.New("Commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("Commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("Packet for query is too large. You can change this value on the server by adjusting the 'max_allowed_packet' variable.") + ErrBusyBuffer = errors.New("Busy buffer") +) + +var errLog Logger = log.New(os.Stderr, "[MySQL] ", log.Ldate|log.Ltime|log.Lshortfile) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} + +// MySQLWarnings is an error type which represents a group of one or more MySQL +// warnings +type MySQLWarnings []MySQLWarning + +func (mws MySQLWarnings) Error() string { + var msg string + for i, warning := range mws { + if i > 0 { + msg += "\r\n" + } + msg += fmt.Sprintf( + "%s %s: %s", + warning.Level, + warning.Code, + warning.Message, + ) + } + return msg +} + +// MySQLWarning is an error type which represents a single MySQL warning. +// Warnings are returned in groups only. See MySQLWarnings +type MySQLWarning struct { + Level string + Code string + Message string +} + +func (mc *mysqlConn) getWarnings() (err error) { + rows, err := mc.Query("SHOW WARNINGS", nil) + if err != nil { + return + } + + var warnings = MySQLWarnings{} + var values = make([]driver.Value, 3) + + for { + err = rows.Next(values) + switch err { + case nil: + warning := MySQLWarning{} + + if raw, ok := values[0].([]byte); ok { + warning.Level = string(raw) + } else { + warning.Level = fmt.Sprintf("%s", values[0]) + } + if raw, ok := values[1].([]byte); ok { + warning.Code = string(raw) + } else { + warning.Code = fmt.Sprintf("%s", values[1]) + } + if raw, ok := values[2].([]byte); ok { + warning.Message = string(raw) + } else { + warning.Message = fmt.Sprintf("%s", values[0]) + } + + warnings = append(warnings, warning) + + case io.EOF: + return warnings + + default: + rows.Close() + return + } + } +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go new file mode 100644 index 000000000..96f9126d6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/errors_test.go @@ -0,0 +1,42 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "log" + "testing" +) + +func TestErrorsSetLogger(t *testing.T) { + previous := errLog + defer func() { + errLog = previous + }() + + // set up logger + const expected = "prefix: test\n" + buffer := bytes.NewBuffer(make([]byte, 0, 64)) + logger := log.New(buffer, "prefix: ", 0) + + // print + SetLogger(logger) + errLog.Print("test") + + // check result + if actual := buffer.String(); actual != expected { + t.Errorf("expected %q, got %q", expected, actual) + } +} + +func TestErrorsStrictIgnoreNotes(t *testing.T) { + runTests(t, dsn+"&sql_notes=false", func(dbt *DBTest) { + dbt.mustExec("DROP TABLE IF EXISTS does_not_exist") + }) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 000000000..a2dedb3c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,164 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" +) + +var ( + fileRegister map[string]bool + readerRegister map[string]func() io.Reader +) + +// RegisterLocalFile adds the given file to the file whitelist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true +} + +// DeregisterLocalFile removes the given filepath from the whitelist. +func DeregisterLocalFile(filePath string) { + delete(fileRegister, strings.Trim(filePath, `"`)) +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + delete(readerRegister, name) +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + if handler, inMap := readerRegister[name]; inMap { + rdr = handler() + if rdr != nil { + data = make([]byte, 4+mc.maxWriteSize) + + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + if mc.cfg.allowAllFiles || fileRegister[name] { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize <= mc.maxWriteSize { + data = make([]byte, 4+fileSize) + } else if fileSize <= mc.maxPacketAllowed { + data = make([]byte, 4+mc.maxWriteSize) + } else { + err = fmt.Errorf("Local File '%s' too large: Size: %d, Max: %d", name, fileSize, mc.maxPacketAllowed) + } + } + } + } else { + err = fmt.Errorf("Local File '%s' is not registered. Use the DSN parameter 'allowAllFiles=true' to allow all files", name) + } + } + + // send content packets + if err == nil { + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } else { + mc.readPacket() + } + return err +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 000000000..14395bf9a --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1179 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var payload []byte + for { + // Read packet header + data, err := mc.buf.readNext(4) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + // Packet Length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + if pktLen < 1 { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, driver.ErrBadConn + } + + // Check Packet Sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } else { + return nil, ErrPktSync + } + } + mc.sequence++ + + // Read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + errLog.Print(err) + mc.Close() + return nil, driver.ErrBadConn + } + + isLastPacket := (pktLen < maxPacketSize) + + // Zero allocations for non-splitting packets + if isLastPacket && payload == nil { + return data, nil + } + + payload = append(payload, data...) + + if isLastPacket { + return payload, nil + } + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxPacketAllowed { + return ErrPktTooLarge + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + errLog.Print(ErrMalformPkt) + } else { + errLog.Print(err) + } + return driver.ErrBadConn + } +} + +/****************************************************************************** +* Initialisation Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readInitPacket() ([]byte, error) { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + if data[0] == iERR { + return nil, mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, fmt.Errorf( + "Unsupported MySQL Protocol Version %d. Protocol Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + cipher := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + return nil, ErrNoTLS + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + cipher = append(cipher, data[pos:pos+12]...) + + // TODO: Verify string termination + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + // + //if data[len(data)-1] == 0 { + // return + //} + //return ErrMalformPkt + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], cipher) + return b[:], nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], cipher) + return b[:], nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeAuthPacket(cipher []byte) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + mc.flags&clientLongFlag + + if mc.cfg.clientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + // User Password + scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.passwd)) + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.user) + 1 + 1 + len(scrambleBuff) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.dbname); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + data[12] = mc.cfg.collation + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.netConn = tlsConn + mc.buf.rd = tlsConn + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + 23 + + // User [null terminated string] + if len(mc.cfg.user) > 0 { + pos += copy(data[pos:], mc.cfg.user) + } + data[pos] = 0x00 + pos++ + + // ScrambleBuffer [length encoded integer] + data[pos] = byte(len(scrambleBuff)) + pos += 1 + copy(data[pos+1:], scrambleBuff) + + // Databasename [null terminated string] + if len(mc.cfg.dbname) > 0 { + pos += copy(data[pos:], mc.cfg.dbname) + data[pos] = 0x00 + pos++ + } + + // Assume native client during response + pos += copy(data[pos:], "mysql_native_password") + data[pos] = 0x00 + + // Send Auth packet + return mc.writePacket(data) +} + +// Client old authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error { + // User password + scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.passwd)) + + // Calculate the packet length and add a tailing 0 + pktLen := len(scrambleBuff) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the scrambled password [null terminated string] + copy(data[4:], scrambleBuff) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +// Client clear text authentication packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeClearAuthPacket() error { + // Calculate the packet length and add a tailing 0 + pktLen := len(mc.cfg.passwd) + 1 + data := mc.buf.takeSmallBuffer(4 + pktLen) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add the clear password [null terminated string] + copy(data[4:], mc.cfg.passwd) + data[4+pktLen-1] = 0x00 + + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err == nil { + // packet indicator + switch data[0] { + + case iOK: + return mc.handleOkPacket(data) + + case iEOF: + if len(data) > 1 { + plugin := string(data[1:bytes.IndexByte(data, 0x00)]) + if plugin == "mysql_old_password" { + // using old_passwords + return ErrOldPassword + } else if plugin == "mysql_clear_password" { + // using clear text password + return ErrCleartextPassword + } else { + return ErrUnknownPlugin + } + } else { + return ErrOldPassword + } + + default: // Error otherwise + return mc.handleErrorPacket(data) + } + } + return err +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = statusFlag(data[1+n+m]) | statusFlag(data[1+n+m+1])<<8 + + // warning count [2 bytes] + if !mc.strict { + return nil + } else { + pos := 1 + n + m + 2 + if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 { + return mc.getWarnings() + } + return nil + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("ColumnsCount mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.columnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + + // Filler [uint8] + // Charset [charset, collation uint8] + // Length [uint32] + pos += n + 1 + 2 + 4 + + // Field type [uint8] + columns[i].fieldType = data[pos] + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc = nil + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + string(dest[i].([]byte)), + mc.cfg.loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + + // No Err and no EOF Packet + if err == nil && data[0] != iEOF { + continue + } + return err // Err or EOF + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + if !stmt.mc.strict { + return columnCount, nil + } else { + // Check for warnings count > 0, only available in MySQL > 4.1 + if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 { + return columnCount, stmt.mc.getWarnings() + } + return columnCount, nil + } + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxPacketAllowed - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Can not use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "Arguments count mismatch (Got: %d Has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + + if len(args) == 0 { + data = mc.buf.takeBuffer(minPktLen) + } else { + data = mc.buf.takeCompleteBuffer() + } + if data == nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(ErrBusyBuffer) + return driver.ErrBadConn + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := 0; i < maskLen; i++ { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + continue + } + + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = fieldTypeLongLong + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = fieldTypeDouble + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = fieldTypeTiny + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = fieldTypeNULL + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = fieldTypeString + paramTypes[i+i+1] = 0x00 + + var val []byte + if v.IsZero() { + val = []byte("0000-00-00") + } else { + val = []byte(v.In(mc.cfg.loc).Format(timeFormat)) + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(val)), + ) + paramValues = append(paramValues, val...) + + default: + return fmt.Errorf("Can't convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + mc.buf.buf = data + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + rows.mc = nil + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + return io.EOF + } + + // Error otherwise + return rows.mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = float64(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "MySQL protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.loc) + default: + var dstlen uint8 + if rows.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "MySQL protocol error, illegal decimals value %d", + rows.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, false) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("Unknown FieldType %d", rows.columns[i].fieldType) + } + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 000000000..c6438d034 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 000000000..ba606e146 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,106 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" +) + +type mysqlField struct { + tableName string + name string + flags fieldFlag + fieldType byte + decimals byte +} + +type mysqlRows struct { + mc *mysqlConn + columns []mysqlField +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +type emptyRows struct{} + +func (rows *mysqlRows) Columns() []string { + columns := make([]string, len(rows.columns)) + if rows.mc.cfg.columnsWithAlias { + for i := range columns { + if tableName := rows.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.columns[i].name + } else { + columns[i] = rows.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.columns[i].name + } + } + return columns +} + +func (rows *mysqlRows) Close() error { + mc := rows.mc + if mc == nil { + return nil + } + if mc.netConn == nil { + return ErrInvalidConn + } + + // Remove unread packets from stream + err := mc.readUntilEOF() + rows.mc = nil + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if mc.netConn == nil { + return ErrInvalidConn + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows emptyRows) Columns() []string { + return nil +} + +func (rows emptyRows) Close() error { + return nil +} + +func (rows emptyRows) Next(dest []driver.Value) error { + return io.EOF +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 000000000..6e869b340 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,150 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "reflect" + "strconv" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int + columns []mysqlField // cached from the first query +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + if resLen > 0 { + // Columns + err = mc.readUntilEOF() + if err != nil { + return nil, err + } + + // Rows + err = mc.readUntilEOF() + } + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil + } + } + + return nil, err +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + if stmt.mc.netConn == nil { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, err + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + rows.mc = mc + + if resLen > 0 { + // Columns + // If not cached, read them and cache them + if stmt.columns == nil { + rows.columns, err = mc.readColumns(resLen) + stmt.columns = rows.columns + } else { + rows.columns = stmt.columns + err = mc.readUntilEOF() + } + } + + return rows, err +} + +type converter struct{} + +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } + return c.ConvertValue(rv.Elem().Interface()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(rv.Uint()), nil + case reflect.Uint64: + u64 := rv.Uint() + if u64 >= 1<<63 { + return strconv.FormatUint(u64, 10), nil + } + return int64(u64), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 000000000..33c749b35 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.netConn == nil { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 000000000..6a26ad129 --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,973 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/sha1" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "net/url" + "strings" + "time" +) + +var ( + tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs + + errInvalidDSNUnescaped = errors.New("Invalid DSN: Did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("Invalid DSN: Network Address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("Invalid DSN: Missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("Invalid DSN: interpolateParams can be used with ascii, latin1, utf8 and utf8mb4 charset") +) + +func init() { + tlsConfigRegister = make(map[string]*tls.Config) +} + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" { + return fmt.Errorf("Key '%s' is reserved", key) + } + + tlsConfigRegister[key] = config + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + delete(tlsConfigRegister, key) +} + +// parseDSN parses the DSN string to a config +func parseDSN(dsn string) (cfg *config, err error) { + // New config with some default values + cfg = &config{ + loc: time.UTC, + collation: defaultCollation, + } + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.passwd = dsn[k+1 : j] + break + } + } + cfg.user = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.addr = dsn[k+1 : i-1] + break + } + } + cfg.net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.dbname = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if cfg.interpolateParams && unsafeCollations[cfg.collation] { + return nil, errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.net == "" { + cfg.net = "tcp" + } + + // Set default address if empty + if cfg.addr == "" { + switch cfg.net { + case "tcp": + cfg.addr = "127.0.0.1:3306" + case "unix": + cfg.addr = "/tmp/mysql.sock" + default: + return nil, errors.New("Default addr for network '" + cfg.net + "' unknown") + } + + } + + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.interpolateParams, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Disable INFILE whitelist / enable all files + case "allowAllFiles": + var isBool bool + cfg.allowAllFiles, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.allowCleartextPasswords, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.allowOldPasswords, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.clientFoundRows, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Collation + case "collation": + collation, ok := collations[value] + if !ok { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + err = errors.New("unknown collation") + return + } + cfg.collation = collation + break + + case "columnsWithAlias": + var isBool bool + cfg.columnsWithAlias, isBool = readBool(value) + if !isBool { + return fmt.Errorf("Invalid Bool value: %s", value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // Dial Timeout + case "timeout": + cfg.timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.tls = &tls.Config{} + } + } else { + if strings.ToLower(value) == "skip-verify" { + cfg.tls = &tls.Config{InsecureSkipVerify: true} + } else if tlsConfig, ok := tlsConfigRegister[value]; ok { + if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.addr) + if err == nil { + tlsConfig.ServerName = host + } + } + + cfg.tls = tlsConfig + } else { + return fmt.Errorf("Invalid value / unknown config name: %s", value) + } + } + + default: + // lazy init + if cfg.params == nil { + cfg.params = make(map[string]string) + } + + if cfg.params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Authentication * +******************************************************************************/ + +// Encrypt password using 4.1+ method +func scramblePassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write(password) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Encrypt password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Encrypt password using insecure pre 4.1 method +func scrambleOldPassword(scramble, password []byte) []byte { + if len(password) == 0 { + return nil + } + + scramble = scramble[:8] + + hashPw := pwHash(password) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(string(v), time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} + +func parseDateTime(str string, loc *time.Location) (t time.Time, err error) { + base := "0000-00-00 00:00:00.0000000" + switch len(str) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if str == base[:len(str)] { + return + } + t, err = time.Parse(timeFormat[:len(str)], str) + default: + err = fmt.Errorf("Invalid Time-String: %s", str) + return + } + + // Adjust location + if err == nil && loc != time.UTC { + y, mo, d := t.Date() + h, mi, s := t.Clock() + t, err = time.Date(y, mo, d, h, mi, s, t.Nanosecond(), loc), nil + } + + return +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("Invalid DATETIME-packet length %d", num) +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func formatBinaryDateTime(src []byte, length uint8, justTime bool) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + if justTime { + return zeroDateTime[11 : 11+length], nil + } + return zeroDateTime[:length], nil + } + var dst []byte // return value + var pt, p1, p2, p3 byte // current digit pair + var zOffs byte // offset of value in zeroDateTime + if justTime { + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("Invalid TIME-packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + if src[1] != 0 { + hour := uint16(src[1])*24 + uint16(src[5]) + pt = byte(hour / 100) + p1 = byte(hour - 100*uint16(pt)) + dst = append(dst, digits01[pt]) + } else { + p1 = src[5] + } + zOffs = 11 + src = src[6:] + } else { + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s-packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt = byte(year / 100) + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + } + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + if length <= byte(len(dst)) { + return dst, nil + } + src = src[2:] + if len(src) == 0 { + return append(dst, zeroDateTime[19:zOffs+length]...), nil + } + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 = byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 = byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 = byte(microsecs) + switch decimals := zOffs + length - 20; decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ), nil + case 1: + return append(dst, '.', + digits10[p1], + ), nil + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ), nil + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ), nil + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ), nil + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ), nil + } +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + switch b[0] { + + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos += 1 + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos += 1 + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} diff --git a/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go new file mode 100644 index 000000000..79fbdd1eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/go-sql-driver/mysql/utils_test.go @@ -0,0 +1,346 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "fmt" + "testing" + "time" +) + +var testDSNs = []struct { + in string + out string + loc *time.Location +}{ + {"username:password@protocol(address)/dbname?param=value", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"username:password@protocol(address)/dbname?param=value&columnsWithAlias=true", "&{user:username passwd:password net:protocol addr:address dbname:dbname params:map[param:value] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:true interpolateParams:false}", time.UTC}, + {"user@unix(/path/to/socket)/dbname?charset=utf8", "&{user:user passwd: net:unix addr:/path/to/socket dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8&tls=true", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@tcp(localhost:5555)/dbname?charset=utf8mb4,utf8&tls=skip-verify", "&{user:user passwd:password net:tcp addr:localhost:5555 dbname:dbname params:map[charset:utf8mb4,utf8] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:password@/dbname?loc=UTC&timeout=30s&allowAllFiles=1&clientFoundRows=true&allowOldPasswords=TRUE&collation=utf8mb4_unicode_ci", "&{user:user passwd:password net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:30000000000 collation:224 allowAllFiles:true allowOldPasswords:true allowCleartextPasswords:false clientFoundRows:true columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:p@ss(word)@tcp([de:ad:be:ef::ca:fe]:80)/dbname?loc=Local", "&{user:user passwd:p@ss(word) net:tcp addr:[de:ad:be:ef::ca:fe]:80 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.Local}, + {"/dbname", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname:dbname params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"@/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"/", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"", "&{user: passwd: net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"user:p@/ssword@/", "&{user:user passwd:p@/ssword net:tcp addr:127.0.0.1:3306 dbname: params:map[] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, + {"unix/?arg=%2Fsome%2Fpath.ext", "&{user: passwd: net:unix addr:/tmp/mysql.sock dbname: params:map[arg:/some/path.ext] loc:%p tls: timeout:0 collation:33 allowAllFiles:false allowOldPasswords:false allowCleartextPasswords:false clientFoundRows:false columnsWithAlias:false interpolateParams:false}", time.UTC}, +} + +func TestDSNParser(t *testing.T) { + var cfg *config + var err error + var res string + + for i, tst := range testDSNs { + cfg, err = parseDSN(tst.in) + if err != nil { + t.Error(err.Error()) + } + + // pointer not static + cfg.tls = nil + + res = fmt.Sprintf("%+v", cfg) + if res != fmt.Sprintf(tst.out, tst.loc) { + t.Errorf("%d. parseDSN(%q) => %q, want %q", i, tst.in, res, fmt.Sprintf(tst.out, tst.loc)) + } + } +} + +func TestDSNParserInvalid(t *testing.T) { + var invalidDSNs = []string{ + "@net(addr/", // no closing brace + "@tcp(/", // no closing brace + "tcp(/", // no closing brace + "(/", // no closing brace + "net(addr)//", // unescaped + "user:pass@tcp(1.2.3.4:3306)", // no trailing slash + //"/dbname?arg=/some/unescaped/path", + } + + for i, tst := range invalidDSNs { + if _, err := parseDSN(tst); err == nil { + t.Errorf("invalid DSN #%d. (%s) didn't error!", i, tst) + } + } +} + +func TestDSNWithCustomTLS(t *testing.T) { + baseDSN := "user:password@tcp(localhost:5555)/dbname?tls=" + tlsCfg := tls.Config{} + + RegisterTLSConfig("utils_test", &tlsCfg) + + // Custom TLS is missing + tst := baseDSN + "invalid_tls" + cfg, err := parseDSN(tst) + if err == nil { + t.Errorf("Invalid custom TLS in DSN (%s) but did not error. Got config: %#v", tst, cfg) + } + + tst = baseDSN + "utils_test" + + // Custom TLS with a server name + name := "foohost" + tlsCfg.ServerName = name + cfg, err = parseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("Did not get the correct TLS ServerName (%s) parsing DSN (%s).", name, tst) + } + + // Custom TLS without a server name + name = "localhost" + tlsCfg.ServerName = "" + cfg, err = parseDSN(tst) + + if err != nil { + t.Error(err.Error()) + } else if cfg.tls.ServerName != name { + t.Errorf("Did not get the correct ServerName (%s) parsing DSN (%s).", name, tst) + } + + DeregisterTLSConfig("utils_test") +} + +func TestDSNUnsafeCollation(t *testing.T) { + _, err := parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=true") + if err != errInvalidDSNUnsafeCollation { + t.Error("Expected %v, Got %v", errInvalidDSNUnsafeCollation, err) + } + + _, err = parseDSN("/dbname?collation=gbk_chinese_ci&interpolateParams=false") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=gbk_chinese_ci") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=ascii_bin&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=latin1_german1_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=utf8_general_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } + + _, err = parseDSN("/dbname?collation=utf8mb4_general_ci&interpolateParams=true") + if err != nil { + t.Error("Expected %v, Got %v", nil, err) + } +} + +func BenchmarkParseDSN(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + for _, tst := range testDSNs { + if _, err := parseDSN(tst.in); err != nil { + b.Error(err.Error()) + } + } + } +} + +func TestScanNullTime(t *testing.T) { + var scanTests = []struct { + in interface{} + error bool + valid bool + time time.Time + }{ + {tDate, false, true, tDate}, + {sDate, false, true, tDate}, + {[]byte(sDate), false, true, tDate}, + {tDateTime, false, true, tDateTime}, + {sDateTime, false, true, tDateTime}, + {[]byte(sDateTime), false, true, tDateTime}, + {tDate0, false, true, tDate0}, + {sDate0, false, true, tDate0}, + {[]byte(sDate0), false, true, tDate0}, + {sDateTime0, false, true, tDate0}, + {[]byte(sDateTime0), false, true, tDate0}, + {"", true, false, tDate0}, + {"1234", true, false, tDate0}, + {0, true, false, tDate0}, + } + + var nt = NullTime{} + var err error + + for _, tst := range scanTests { + err = nt.Scan(tst.in) + if (err != nil) != tst.error { + t.Errorf("%v: expected error status %t, got %t", tst.in, tst.error, (err != nil)) + } + if nt.Valid != tst.valid { + t.Errorf("%v: expected valid status %t, got %t", tst.in, tst.valid, nt.Valid) + } + if nt.Time != tst.time { + t.Errorf("%v: expected time %v, got %v", tst.in, tst.time, nt.Time) + } + } +} + +func TestLengthEncodedInteger(t *testing.T) { + var integerTests = []struct { + num uint64 + encoded []byte + }{ + {0x0000000000000000, []byte{0x00}}, + {0x0000000000000012, []byte{0x12}}, + {0x00000000000000fa, []byte{0xfa}}, + {0x0000000000000100, []byte{0xfc, 0x00, 0x01}}, + {0x0000000000001234, []byte{0xfc, 0x34, 0x12}}, + {0x000000000000ffff, []byte{0xfc, 0xff, 0xff}}, + {0x0000000000010000, []byte{0xfd, 0x00, 0x00, 0x01}}, + {0x0000000000123456, []byte{0xfd, 0x56, 0x34, 0x12}}, + {0x0000000000ffffff, []byte{0xfd, 0xff, 0xff, 0xff}}, + {0x0000000001000000, []byte{0xfe, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00}}, + {0x123456789abcdef0, []byte{0xfe, 0xf0, 0xde, 0xbc, 0x9a, 0x78, 0x56, 0x34, 0x12}}, + {0xffffffffffffffff, []byte{0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, + } + + for _, tst := range integerTests { + num, isNull, numLen := readLengthEncodedInteger(tst.encoded) + if isNull { + t.Errorf("%x: expected %d, got NULL", tst.encoded, tst.num) + } + if num != tst.num { + t.Errorf("%x: expected %d, got %d", tst.encoded, tst.num, num) + } + if numLen != len(tst.encoded) { + t.Errorf("%x: expected size %d, got %d", tst.encoded, len(tst.encoded), numLen) + } + encoded := appendLengthEncodedInteger(nil, num) + if !bytes.Equal(encoded, tst.encoded) { + t.Errorf("%v: expected %x, got %x", num, tst.encoded, encoded) + } + } +} + +func TestOldPass(t *testing.T) { + scramble := []byte{9, 8, 7, 6, 5, 4, 3, 2} + vectors := []struct { + pass string + out string + }{ + {" pass", "47575c5a435b4251"}, + {"pass ", "47575c5a435b4251"}, + {"123\t456", "575c47505b5b5559"}, + {"C0mpl!ca ted#PASS123", "5d5d554849584a45"}, + } + for _, tuple := range vectors { + ours := scrambleOldPassword(scramble, []byte(tuple.pass)) + if tuple.out != fmt.Sprintf("%x", ours) { + t.Errorf("Failed old password %q", tuple.pass) + } + } +} + +func TestFormatBinaryDateTime(t *testing.T) { + rawDate := [11]byte{} + binary.LittleEndian.PutUint16(rawDate[:2], 1978) // years + rawDate[2] = 12 // months + rawDate[3] = 30 // days + rawDate[4] = 15 // hours + rawDate[5] = 46 // minutes + rawDate[6] = 23 // seconds + binary.LittleEndian.PutUint32(rawDate[7:], 987654) // microseconds + expect := func(expected string, inlen, outlen uint8) { + actual, _ := formatBinaryDateTime(rawDate[:inlen], outlen, false) + bytes, ok := actual.([]byte) + if !ok { + t.Errorf("formatBinaryDateTime must return []byte, was %T", actual) + } + if string(bytes) != expected { + t.Errorf( + "expected %q, got %q for length in %d, out %d", + bytes, actual, inlen, outlen, + ) + } + } + expect("0000-00-00", 0, 10) + expect("0000-00-00 00:00:00", 0, 19) + expect("1978-12-30", 4, 10) + expect("1978-12-30 15:46:23", 7, 19) + expect("1978-12-30 15:46:23.987654", 11, 26) +} + +func TestEscapeBackslash(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesBackslash([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringBackslash([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\\0bar", "foo\x00bar") + expect("foo\\nbar", "foo\nbar") + expect("foo\\rbar", "foo\rbar") + expect("foo\\Zbar", "foo\x1abar") + expect("foo\\\"bar", "foo\"bar") + expect("foo\\\\bar", "foo\\bar") + expect("foo\\'bar", "foo'bar") +} + +func TestEscapeQuotes(t *testing.T) { + expect := func(expected, value string) { + actual := string(escapeBytesQuotes([]byte{}, []byte(value))) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + + actual = string(escapeStringQuotes([]byte{}, value)) + if actual != expected { + t.Errorf( + "expected %s, got %s", + expected, actual, + ) + } + } + + expect("foo\x00bar", "foo\x00bar") // not affected + expect("foo\nbar", "foo\nbar") // not affected + expect("foo\rbar", "foo\rbar") // not affected + expect("foo\x1abar", "foo\x1abar") // not affected + expect("foo''bar", "foo'bar") // affected + expect("foo\"bar", "foo\"bar") // not affected +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml new file mode 100644 index 000000000..d87d46576 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/gorilla/context/README.md new file mode 100644 index 000000000..c60a31b05 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/README.md @@ -0,0 +1,7 @@ +context +======= +[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) + +gorilla/context is a general purpose registry for global request variables. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/gorilla/context/context.go new file mode 100644 index 000000000..81cb128b1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context.go @@ -0,0 +1,143 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "sync" + "time" +) + +var ( + mutex sync.RWMutex + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) +) + +// Set stores a value for a given key in a given request. +func Set(r *http.Request, key, val interface{}) { + mutex.Lock() + if data[r] == nil { + data[r] = make(map[interface{}]interface{}) + datat[r] = time.Now().Unix() + } + data[r][key] = val + mutex.Unlock() +} + +// Get returns a value stored for a given key in a given request. +func Get(r *http.Request, key interface{}) interface{} { + mutex.RLock() + if ctx := data[r]; ctx != nil { + value := ctx[key] + mutex.RUnlock() + return value + } + mutex.RUnlock() + return nil +} + +// GetOk returns stored value and presence state like multi-value return of map access. +func GetOk(r *http.Request, key interface{}) (interface{}, bool) { + mutex.RLock() + if _, ok := data[r]; ok { + value, ok := data[r][key] + mutex.RUnlock() + return value, ok + } + mutex.RUnlock() + return nil, false +} + +// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. +func GetAll(r *http.Request) map[interface{}]interface{} { + mutex.RLock() + if context, ok := data[r]; ok { + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result + } + mutex.RUnlock() + return nil +} + +// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if +// the request was registered. +func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { + mutex.RLock() + context, ok := data[r] + result := make(map[interface{}]interface{}, len(context)) + for k, v := range context { + result[k] = v + } + mutex.RUnlock() + return result, ok +} + +// Delete removes a value stored for a given key in a given request. +func Delete(r *http.Request, key interface{}) { + mutex.Lock() + if data[r] != nil { + delete(data[r], key) + } + mutex.Unlock() +} + +// Clear removes all values stored for a given request. +// +// This is usually called by a handler wrapper to clean up request +// variables at the end of a request lifetime. See ClearHandler(). +func Clear(r *http.Request) { + mutex.Lock() + clear(r) + mutex.Unlock() +} + +// clear is Clear without the lock. +func clear(r *http.Request) { + delete(data, r) + delete(datat, r) +} + +// Purge removes request data stored for longer than maxAge, in seconds. +// It returns the amount of requests removed. +// +// If maxAge <= 0, all request data is removed. +// +// This is only used for sanity check: in case context cleaning was not +// properly set some request data can be kept forever, consuming an increasing +// amount of memory. In case this is detected, Purge() must be called +// periodically until the problem is fixed. +func Purge(maxAge int) int { + mutex.Lock() + count := 0 + if maxAge <= 0 { + count = len(data) + data = make(map[*http.Request]map[interface{}]interface{}) + datat = make(map[*http.Request]int64) + } else { + min := time.Now().Unix() - int64(maxAge) + for r := range data { + if datat[r] < min { + clear(r) + count++ + } + } + } + mutex.Unlock() + return count +} + +// ClearHandler wraps an http.Handler and clears request values at the end +// of a request lifetime. +func ClearHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer Clear(r) + h.ServeHTTP(w, r) + }) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go new file mode 100644 index 000000000..9814c501e --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go @@ -0,0 +1,161 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context + +import ( + "net/http" + "testing" +) + +type keyType int + +const ( + key1 keyType = iota + key2 +) + +func TestContext(t *testing.T) { + assertEqual := func(val interface{}, exp interface{}) { + if val != exp { + t.Errorf("Expected %v, got %v.", exp, val) + } + } + + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + + // Get() + assertEqual(Get(r, key1), nil) + + // Set() + Set(r, key1, "1") + assertEqual(Get(r, key1), "1") + assertEqual(len(data[r]), 1) + + Set(r, key2, "2") + assertEqual(Get(r, key2), "2") + assertEqual(len(data[r]), 2) + + //GetOk + value, ok := GetOk(r, key1) + assertEqual(value, "1") + assertEqual(ok, true) + + value, ok = GetOk(r, "not exists") + assertEqual(value, nil) + assertEqual(ok, false) + + Set(r, "nil value", nil) + value, ok = GetOk(r, "nil value") + assertEqual(value, nil) + assertEqual(ok, true) + + // GetAll() + values := GetAll(r) + assertEqual(len(values), 3) + + // GetAll() for empty request + values = GetAll(emptyR) + if values != nil { + t.Error("GetAll didn't return nil value for invalid request") + } + + // GetAllOk() + values, ok = GetAllOk(r) + assertEqual(len(values), 3) + assertEqual(ok, true) + + // GetAllOk() for empty request + values, ok = GetAllOk(emptyR) + assertEqual(value, nil) + assertEqual(ok, false) + + // Delete() + Delete(r, key1) + assertEqual(Get(r, key1), nil) + assertEqual(len(data[r]), 2) + + Delete(r, key2) + assertEqual(Get(r, key2), nil) + assertEqual(len(data[r]), 1) + + // Clear() + Clear(r) + assertEqual(len(data), 0) +} + +func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Get(r, key) + } + done <- struct{}{} + +} + +func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { + <-wait + for i := 0; i < iterations; i++ { + Set(r, key, value) + } + done <- struct{}{} + +} + +func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { + + b.StopTimer() + r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + done := make(chan struct{}) + b.StartTimer() + + for i := 0; i < b.N; i++ { + wait := make(chan struct{}) + + for i := 0; i < numReaders; i++ { + go parallelReader(r, "test", iterations, wait, done) + } + + for i := 0; i < numWriters; i++ { + go parallelWriter(r, "test", "123", iterations, wait, done) + } + + close(wait) + + for i := 0; i < numReaders+numWriters; i++ { + <-done + } + + } + +} + +func BenchmarkMutexSameReadWrite1(b *testing.B) { + benchmarkMutex(b, 1, 1, 32) +} +func BenchmarkMutexSameReadWrite2(b *testing.B) { + benchmarkMutex(b, 2, 2, 32) +} +func BenchmarkMutexSameReadWrite4(b *testing.B) { + benchmarkMutex(b, 4, 4, 32) +} +func BenchmarkMutex1(b *testing.B) { + benchmarkMutex(b, 2, 8, 32) +} +func BenchmarkMutex2(b *testing.B) { + benchmarkMutex(b, 16, 4, 64) +} +func BenchmarkMutex3(b *testing.B) { + benchmarkMutex(b, 1, 2, 128) +} +func BenchmarkMutex4(b *testing.B) { + benchmarkMutex(b, 128, 32, 256) +} +func BenchmarkMutex5(b *testing.B) { + benchmarkMutex(b, 1024, 2048, 64) +} +func BenchmarkMutex6(b *testing.B) { + benchmarkMutex(b, 2048, 1024, 512) +} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/gorilla/context/doc.go new file mode 100644 index 000000000..73c740031 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/context/doc.go @@ -0,0 +1,82 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package context stores values shared during a request lifetime. + +For example, a router can set variables extracted from the URL and later +application handlers can access those values, or it can be used to store +sessions values to be saved at the end of a request. There are several +others common uses. + +The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: + + http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 + +Here's the basic usage: first define the keys that you will need. The key +type is interface{} so a key can be of any type that supports equality. +Here we define a key using a custom int type to avoid name collisions: + + package foo + + import ( + "github.com/gorilla/context" + ) + + type key int + + const MyKey key = 0 + +Then set a variable. Variables are bound to an http.Request object, so you +need a request instance to set a value: + + context.Set(r, MyKey, "bar") + +The application can later access the variable using the same key you provided: + + func MyHandler(w http.ResponseWriter, r *http.Request) { + // val is "bar". + val := context.Get(r, foo.MyKey) + + // returns ("bar", true) + val, ok := context.GetOk(r, foo.MyKey) + // ... + } + +And that's all about the basic usage. We discuss some other ideas below. + +Any type can be stored in the context. To enforce a given type, make the key +private and wrap Get() and Set() to accept and return values of a specific +type: + + type key int + + const mykey key = 0 + + // GetMyKey returns a value for this package from the request values. + func GetMyKey(r *http.Request) SomeType { + if rv := context.Get(r, mykey); rv != nil { + return rv.(SomeType) + } + return nil + } + + // SetMyKey sets a value for this package in the request values. + func SetMyKey(r *http.Request, val SomeType) { + context.Set(r, mykey, val) + } + +Variables must be cleared at the end of a request, to remove all values +that were stored. This can be done in an http.Handler, after a request was +served. Just call Clear() passing the request: + + context.Clear(r) + +...or use ClearHandler(), which conveniently wraps an http.Handler to clear +variables at the end of a request lifetime. + +The Routers from the packages gorilla/mux and gorilla/pat call Clear() +so if you are using either of them you don't need to clear the context manually. +*/ +package context diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml new file mode 100644 index 000000000..d87d46576 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.0 + - 1.1 + - 1.2 + - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE new file mode 100644 index 000000000..0e5fb8728 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/gorilla/mux/README.md new file mode 100644 index 000000000..e60301b03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/README.md @@ -0,0 +1,7 @@ +mux +=== +[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) + +gorilla/mux is a powerful URL router and dispatcher. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go new file mode 100644 index 000000000..c5f97b2b2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go @@ -0,0 +1,21 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "net/http" + "testing" +) + +func BenchmarkMux(b *testing.B) { + router := new(Router) + handler := func(w http.ResponseWriter, r *http.Request) {} + router.HandleFunc("/v1/{v1}", handler) + + request, _ := http.NewRequest("GET", "/v1/anything", nil) + for i := 0; i < b.N; i++ { + router.ServeHTTP(nil, request) + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go new file mode 100644 index 000000000..b2deed34c --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts and paths can have variables with an optional regular + expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.domain.com". + r.Host("www.domain.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.domain.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.domain.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.domain.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.domain.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +*/ +package mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go new file mode 100644 index 000000000..5b5f8e7db --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go @@ -0,0 +1,353 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "path" + + "github.com/gorilla/context" +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + // Parent route, if this is a subrouter. + parent parentRoute + // Routes to be matched, in order. + routes []*Route + // Routes by name for URL building. + namedRoutes map[string]*Route + // See Router.StrictSlash(). This defines the flag for new routes. + strictSlash bool + // If true, do not clear the request context after handling the request + KeepContext bool +} + +// Match matches registered routes against the request. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + return true + } + } + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + // Clean path to canonical form and redirect. + if p := cleanPath(req.URL.Path); p != req.URL.Path { + + // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + setVars(req, match.Vars) + setCurrentRoute(req, match.Route) + } + if handler == nil { + handler = r.NotFoundHandler + if handler == nil { + handler = http.NotFoundHandler() + } + } + if !r.KeepContext { + defer context.Clear(req) + } + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.getNamedRoutes()[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.getNamedRoutes()[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// getNamedRoutes returns the map where named routes are registered. +func (r *Router) getNamedRoutes() map[string]*Route { + if r.namedRoutes == nil { + if r.parent != nil { + r.namedRoutes = r.parent.getNamedRoutes() + } else { + r.namedRoutes = make(map[string]*Route) + } + } + return r.namedRoutes +} + +// getRegexpGroup returns regexp definitions from the parent route, if any. +func (r *Router) getRegexpGroup() *routeRegexpGroup { + if r.parent != nil { + return r.parent.getRegexpGroup() + } + return nil +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + route := &Route{parent: r, strictSlash: r.strictSlash} + r.routes = append(r.routes, route) + return route +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := context.Get(r, varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +func CurrentRoute(r *http.Request) *Route { + if rv := context.Get(r, routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func setVars(r *http.Request, val interface{}) { + context.Set(r, varsKey, val) +} + +func setCurrentRoute(r *http.Request, val interface{}) { + context.Set(r, routeKey, val) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// mapFromPairs converts variadic string parameters to a string map. +func mapFromPairs(pairs ...string) (map[string]string, error) { + length := len(pairs) + if length%2 != 0 { + return nil, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMap returns true if the given key/value pairs exist in a given map. +func matchMap(toCheck map[string]string, toMatch map[string][]string, + canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go new file mode 100644 index 000000000..e455bce8f --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go @@ -0,0 +1,943 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gorilla/context" +) + +type routeTest struct { + title string // title of the test + route *Route // the route being tested + request *http.Request // a request to test the route + vars map[string]string // the expected vars of the match + host string // the expected host of the match + path string // the expected path of the match + shouldMatch bool // whether the request is expected to match the route at all + shouldRedirect bool // whether the request should result in a redirect +} + +func TestHost(t *testing.T) { + // newRequestHost a new request with a method, url, and host header + newRequestHost := func(method, url, host string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + req.Host = host + return req + } + + tests := []routeTest{ + { + title: "Host route match", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with port, match", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: true, + }, + { + title: "Host route with port, wrong port in request URL", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route, match with host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc"), + request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), + vars: map[string]string{}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, + { + title: "Host route with port, wrong host in request header", + route: new(Route).Host("aaa.bbb.ccc:1234"), + request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), + vars: map[string]string{}, + host: "aaa.bbb.ccc:1234", + path: "", + shouldMatch: false, + }, + { + title: "Host route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with pattern, wrong host in request URL", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + { + title: "Host route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: true, + }, + { + title: "Host route with multiple patterns, wrong host in request URL", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, + host: "aaa.bbb.ccc", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPath(t *testing.T) { + tests := []routeTest{ + { + title: "Path route, match", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route, match with trailing slash in request and path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + }, + { + title: "Path route, do not match with trailing slash in path", + route: new(Route).Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "Path route, do not match with trailing slash in request", + route: new(Route).Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: false, + }, + { + title: "Path route, wrong path in request in request URL", + route: new(Route).Path("/111/222/333"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with pattern, match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with pattern, URL in request does not match", + route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Path route with multiple patterns, match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Path route with multiple patterns, URL in request does not match", + route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, + host: "", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestPathPrefix(t *testing.T) { + tests := []routeTest{ + { + title: "PathPrefix route, match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + }, + { + title: "PathPrefix route, match substring", + route: new(Route).PathPrefix("/1"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{}, + host: "", + path: "/1", + shouldMatch: true, + }, + { + title: "PathPrefix route, URL prefix in request does not match", + route: new(Route).PathPrefix("/111"), + request: newRequest("GET", "http://localhost/1/2/3"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: false, + }, + { + title: "PathPrefix route with pattern, match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with pattern, URL prefix in request does not match", + route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + { + title: "PathPrefix route with multiple patterns, match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/222/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: true, + }, + { + title: "PathPrefix route with multiple patterns, URL prefix in request does not match", + route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), + request: newRequest("GET", "http://localhost/111/aaa/333"), + vars: map[string]string{"v1": "111", "v2": "222"}, + host: "", + path: "/111/222", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHostPath(t *testing.T) { + tests := []routeTest{ + { + title: "Host and Path route, match", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Host and Path route, wrong host in request URL", + route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Host and Path route with pattern, match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with pattern, URL in request does not match", + route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "bbb", "v2": "222"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + { + title: "Host and Path route with multiple patterns, match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: true, + }, + { + title: "Host and Path route with multiple patterns, URL in request does not match", + route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), + request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), + vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, + host: "aaa.bbb.ccc", + path: "/111/222/333", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestHeaders(t *testing.T) { + // newRequestHeaders creates a new request with a method, url, and headers + newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + for k, v := range headers { + req.Header.Add(k, v) + } + return req + } + + tests := []routeTest{ + { + title: "Headers route, match", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Headers route, bad header values", + route: new(Route).Headers("foo", "bar", "baz", "ding"), + request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } + +} + +func TestMethods(t *testing.T) { + tests := []routeTest{ + { + title: "Methods route, match GET", + route: new(Route).Methods("GET", "POST"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, match POST", + route: new(Route).Methods("GET", "POST"), + request: newRequest("POST", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Methods route, bad method", + route: new(Route).Methods("GET", "POST"), + request: newRequest("PUT", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestQueries(t *testing.T) { + tests := []routeTest{ + { + title: "Queries route, match", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, match with a query string out of order", + route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route, bad query", + route: new(Route).Queries("foo", "bar", "baz", "ding"), + request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + { + title: "Queries route with pattern, match", + route: new(Route).Queries("foo", "{v1}"), + request: newRequest("GET", "http://localhost?foo=bar"), + vars: map[string]string{"v1": "bar"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with multiple patterns, match", + route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), + request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=10"), + vars: map[string]string{"v1": "10"}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Queries route with regexp pattern, regexp does not match", + route: new(Route).Queries("foo", "{v1:[0-9]+}"), + request: newRequest("GET", "http://localhost?foo=a"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSchemes(t *testing.T) { + tests := []routeTest{ + // Schemes + { + title: "Schemes route, match https", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "https://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, match ftp", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "ftp://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "Schemes route, bad scheme", + route: new(Route).Schemes("https", "ftp"), + request: newRequest("GET", "http://localhost"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + for _, test := range tests { + testRoute(t, test) + } +} + +func TestMatcherFunc(t *testing.T) { + m := func(r *http.Request, m *RouteMatch) bool { + if r.URL.Host == "aaa.bbb.ccc" { + return true + } + return false + } + + tests := []routeTest{ + { + title: "MatchFunc route, match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.bbb.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: true, + }, + { + title: "MatchFunc route, non-match", + route: new(Route).MatcherFunc(m), + request: newRequest("GET", "http://aaa.222.ccc"), + vars: map[string]string{}, + host: "", + path: "", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestSubRouter(t *testing.T) { + subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() + subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() + + tests := []routeTest{ + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://aaa.google.com/bbb"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: true, + }, + { + route: subrouter1.Path("/{v2:[a-z]+}"), + request: newRequest("GET", "http://111.google.com/111"), + vars: map[string]string{"v1": "aaa", "v2": "bbb"}, + host: "aaa.google.com", + path: "/bbb", + shouldMatch: false, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: true, + }, + { + route: subrouter2.Path("/baz/{v2}"), + request: newRequest("GET", "http://localhost/foo/bar"), + vars: map[string]string{"v1": "bar", "v2": "ding"}, + host: "", + path: "/foo/bar/baz/ding", + shouldMatch: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +func TestNamedRoutes(t *testing.T) { + r1 := NewRouter() + r1.NewRoute().Name("a") + r1.NewRoute().Name("b") + r1.NewRoute().Name("c") + + r2 := r1.NewRoute().Subrouter() + r2.NewRoute().Name("d") + r2.NewRoute().Name("e") + r2.NewRoute().Name("f") + + r3 := r2.NewRoute().Subrouter() + r3.NewRoute().Name("g") + r3.NewRoute().Name("h") + r3.NewRoute().Name("i") + + if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { + t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) + } else if r1.Get("i") == nil { + t.Errorf("Subroute name not registered") + } +} + +func TestStrictSlash(t *testing.T) { + r := NewRouter() + r.StrictSlash(true) + + tests := []routeTest{ + { + title: "Redirect path without slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path with slash", + route: r.NewRoute().Path("/111/"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111/", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Redirect path with slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111/"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Do not redirect path without slash", + route: r.NewRoute().Path("/111"), + request: newRequest("GET", "http://localhost/111"), + vars: map[string]string{}, + host: "", + path: "/111", + shouldMatch: true, + shouldRedirect: false, + }, + { + title: "Propagate StrictSlash to subrouters", + route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), + request: newRequest("GET", "http://localhost/static/images"), + vars: map[string]string{}, + host: "", + path: "/static/images/", + shouldMatch: true, + shouldRedirect: true, + }, + { + title: "Ignore StrictSlash for path prefix", + route: r.NewRoute().PathPrefix("/static/"), + request: newRequest("GET", "http://localhost/static/logo.png"), + vars: map[string]string{}, + host: "", + path: "/static/", + shouldMatch: true, + shouldRedirect: false, + }, + } + + for _, test := range tests { + testRoute(t, test) + } +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +func getRouteTemplate(route *Route) string { + host, path := "none", "none" + if route.regexp != nil { + if route.regexp.host != nil { + host = route.regexp.host.template + } + if route.regexp.path != nil { + path = route.regexp.path.template + } + } + return fmt.Sprintf("Host: %v, Path: %v", host, path) +} + +func testRoute(t *testing.T, test routeTest) { + request := test.request + route := test.route + vars := test.vars + shouldMatch := test.shouldMatch + host := test.host + path := test.path + url := test.host + test.path + shouldRedirect := test.shouldRedirect + + var match RouteMatch + ok := route.Match(request, &match) + if ok != shouldMatch { + msg := "Should match" + if !shouldMatch { + msg = "Should not match" + } + t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) + return + } + if shouldMatch { + if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { + t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) + return + } + if host != "" { + u, _ := test.route.URLHost(mapToPairs(match.Vars)...) + if host != u.Host { + t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) + return + } + } + if path != "" { + u, _ := route.URLPath(mapToPairs(match.Vars)...) + if path != u.Path { + t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) + return + } + } + if url != "" { + u, _ := route.URL(mapToPairs(match.Vars)...) + if url != u.Host+u.Path { + t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) + return + } + } + if shouldRedirect && match.Handler == nil { + t.Errorf("(%v) Did not redirect", test.title) + return + } + if !shouldRedirect && match.Handler != nil { + t.Errorf("(%v) Unexpected redirect", test.title) + return + } + } +} + +// Tests that the context is cleared or not cleared properly depending on +// the configuration of the router +func TestKeepContext(t *testing.T) { + func1 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + res := new(http.ResponseWriter) + r.ServeHTTP(*res, req) + + if _, ok := context.GetOk(req, "t"); ok { + t.Error("Context should have been cleared at end of request") + } + + r.KeepContext = true + + req, _ = http.NewRequest("GET", "http://localhost/", nil) + context.Set(req, "t", 1) + + r.ServeHTTP(*res, req) + if _, ok := context.GetOk(req, "t"); !ok { + t.Error("Context should NOT have been cleared at end of request") + } + +} + +type TestA301ResponseWriter struct { + hh http.Header + status int +} + +func (ho TestA301ResponseWriter) Header() http.Header { + return http.Header(ho.hh) +} + +func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { + return 0, nil +} + +func (ho TestA301ResponseWriter) WriteHeader(code int) { + ho.status = code +} + +func Test301Redirect(t *testing.T) { + m := make(http.Header) + + func1 := func(w http.ResponseWriter, r *http.Request) {} + func2 := func(w http.ResponseWriter, r *http.Request) {} + + r := NewRouter() + r.HandleFunc("/api/", func2).Name("func2") + r.HandleFunc("/", func1).Name("func1") + + req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) + + res := TestA301ResponseWriter{ + hh: m, + status: 0, + } + r.ServeHTTP(&res, req) + + if "http://localhost/api/?abc=def" != res.hh["Location"][0] { + t.Errorf("Should have complete URL with query string") + } +} + +// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW +func TestSubrouterHeader(t *testing.T) { + expected := "func1 response" + func1 := func(w http.ResponseWriter, r *http.Request) { + fmt.Fprint(w, expected) + } + func2 := func(http.ResponseWriter, *http.Request) {} + + r := NewRouter() + s := r.Headers("SomeSpecialHeader", "").Subrouter() + s.HandleFunc("/", func1).Name("func1") + r.HandleFunc("/", func2).Name("func2") + + req, _ := http.NewRequest("GET", "http://localhost/", nil) + req.Header.Add("SomeSpecialHeader", "foo") + match := new(RouteMatch) + matched := r.Match(req, match) + if !matched { + t.Errorf("Should match request") + } + if match.Route.GetName() != "func1" { + t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) + } + resp := NewRecorder() + match.Handler.ServeHTTP(resp, req) + if resp.Body.String() != expected { + t.Errorf("Expecting %q", expected) + } +} + +// mapToPairs converts a string map to a slice of string pairs +func mapToPairs(m map[string]string) []string { + var i int + p := make([]string, len(m)*2) + for k, v := range m { + p[i] = k + p[i+1] = v + i += 2 + } + return p +} + +// stringMapEqual checks the equality of two string maps +func stringMapEqual(m1, m2 map[string]string) bool { + nil1 := m1 == nil + nil2 := m2 == nil + if nil1 != nil2 || len(m1) != len(m2) { + return false + } + for k, v := range m1 { + if v != m2[k] { + return false + } + } + return true +} + +// newRequest is a helper function to create a new request with a method and url +func newRequest(method, url string) *http.Request { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return req +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go new file mode 100644 index 000000000..1f7c190c0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go @@ -0,0 +1,714 @@ +// Old tests ported to Go1. This is a mess. Want to drop it one day. + +// Copyright 2011 Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "net/http" + "testing" +) + +// ---------------------------------------------------------------------------- +// ResponseRecorder +// ---------------------------------------------------------------------------- +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ResponseRecorder is an implementation of http.ResponseWriter that +// records its mutations for later inspection in tests. +type ResponseRecorder struct { + Code int // the HTTP response code from WriteHeader + HeaderMap http.Header // the HTTP response headers + Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to + Flushed bool +} + +// NewRecorder returns an initialized ResponseRecorder. +func NewRecorder() *ResponseRecorder { + return &ResponseRecorder{ + HeaderMap: make(http.Header), + Body: new(bytes.Buffer), + } +} + +// DefaultRemoteAddr is the default remote address to return in RemoteAddr if +// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +const DefaultRemoteAddr = "1.2.3.4" + +// Header returns the response headers. +func (rw *ResponseRecorder) Header() http.Header { + return rw.HeaderMap +} + +// Write always succeeds and writes to rw.Body, if not nil. +func (rw *ResponseRecorder) Write(buf []byte) (int, error) { + if rw.Body != nil { + rw.Body.Write(buf) + } + if rw.Code == 0 { + rw.Code = http.StatusOK + } + return len(buf), nil +} + +// WriteHeader sets rw.Code. +func (rw *ResponseRecorder) WriteHeader(code int) { + rw.Code = code +} + +// Flush sets rw.Flushed to true. +func (rw *ResponseRecorder) Flush() { + rw.Flushed = true +} + +// ---------------------------------------------------------------------------- + +func TestRouteMatchers(t *testing.T) { + var scheme, host, path, query, method string + var headers map[string]string + var resultVars map[bool]map[string]string + + router := NewRouter() + router.NewRoute().Host("{var1}.google.com"). + Path("/{var2:[a-z]+}/{var3:[0-9]+}"). + Queries("foo", "bar"). + Methods("GET"). + Schemes("https"). + Headers("x-requested-with", "XMLHttpRequest") + router.NewRoute().Host("www.{var4}.com"). + PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). + Queries("baz", "ding"). + Methods("POST"). + Schemes("http"). + Headers("Content-Type", "application/json") + + reset := func() { + // Everything match. + scheme = "https" + host = "www.google.com" + path = "/product/42" + query = "?foo=bar" + method = "GET" + headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} + resultVars = map[bool]map[string]string{ + true: {"var1": "www", "var2": "product", "var3": "42"}, + false: {}, + } + } + + reset2 := func() { + // Everything match. + scheme = "http" + host = "www.google.com" + path = "/foo/product/42/path/that/is/ignored" + query = "?baz=ding" + method = "POST" + headers = map[string]string{"Content-Type": "application/json"} + resultVars = map[bool]map[string]string{ + true: {"var4": "google", "var5": "product", "var6": "42"}, + false: {}, + } + } + + match := func(shouldMatch bool) { + url := scheme + "://" + host + path + query + request, _ := http.NewRequest(method, url, nil) + for key, value := range headers { + request.Header.Add(key, value) + } + + var routeMatch RouteMatch + matched := router.Match(request, &routeMatch) + if matched != shouldMatch { + // Need better messages. :) + if matched { + t.Errorf("Should match.") + } else { + t.Errorf("Should not match.") + } + } + + if matched { + currentRoute := routeMatch.Route + if currentRoute == nil { + t.Errorf("Expected a current route.") + } + vars := routeMatch.Vars + expectedVars := resultVars[shouldMatch] + if len(vars) != len(expectedVars) { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + for name, value := range vars { + if expectedVars[name] != value { + t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) + } + } + } + } + + // 1st route -------------------------------------------------------------- + + // Everything match. + reset() + match(true) + + // Scheme doesn't match. + reset() + scheme = "http" + match(false) + + // Host doesn't match. + reset() + host = "www.mygoogle.com" + match(false) + + // Path doesn't match. + reset() + path = "/product/notdigits" + match(false) + + // Query doesn't match. + reset() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset() + method = "POST" + match(false) + + // Header doesn't match. + reset() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset() + match(true) + + // 2nd route -------------------------------------------------------------- + + // Everything match. + reset2() + match(true) + + // Scheme doesn't match. + reset2() + scheme = "https" + match(false) + + // Host doesn't match. + reset2() + host = "sub.google.com" + match(false) + + // Path doesn't match. + reset2() + path = "/bar/product/42" + match(false) + + // Query doesn't match. + reset2() + query = "?foo=baz" + match(false) + + // Method doesn't match. + reset2() + method = "GET" + match(false) + + // Header doesn't match. + reset2() + headers = map[string]string{} + match(false) + + // Everything match, again. + reset2() + match(true) +} + +type headerMatcherTest struct { + matcher headerMatcher + headers map[string]string + result bool +} + +var headerMatcherTests = []headerMatcherTest{ + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": ""}), + headers: map[string]string{"X-Requested-With": "anything"}, + result: true, + }, + { + matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), + headers: map[string]string{}, + result: false, + }, +} + +type hostMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var hostMatcherTests = []hostMatcherTest{ + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://abc.def.ghi/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), + url: "http://a.b.c/", + vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, + result: false, + }, +} + +type methodMatcherTest struct { + matcher methodMatcher + method string + result bool +} + +var methodMatcherTests = []methodMatcherTest{ + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "GET", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "POST", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "PUT", + result: true, + }, + { + matcher: methodMatcher([]string{"GET", "POST", "PUT"}), + method: "DELETE", + result: false, + }, +} + +type pathMatcherTest struct { + matcher *Route + url string + vars map[string]string + result bool +} + +var pathMatcherTests = []pathMatcherTest{ + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/123/456/789", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: true, + }, + { + matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), + url: "http://localhost:8080/1/2/3", + vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, + result: false, + }, +} + +type schemeMatcherTest struct { + matcher schemeMatcher + url string + result bool +} + +var schemeMatcherTests = []schemeMatcherTest{ + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "http://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"http", "https"}), + url: "https://localhost:8080/", + result: true, + }, + { + matcher: schemeMatcher([]string{"https"}), + url: "http://localhost:8080/", + result: false, + }, + { + matcher: schemeMatcher([]string{"http"}), + url: "https://localhost:8080/", + result: false, + }, +} + +type urlBuildingTest struct { + route *Route + vars []string + url string +} + +var urlBuildingTests = []urlBuildingTest{ + { + route: new(Route).Host("foo.domain.com"), + vars: []string{}, + url: "http://foo.domain.com", + }, + { + route: new(Route).Host("{subdomain}.domain.com"), + vars: []string{"subdomain", "bar"}, + url: "http://bar.domain.com", + }, + { + route: new(Route).Host("foo.domain.com").Path("/articles"), + vars: []string{}, + url: "http://foo.domain.com/articles", + }, + { + route: new(Route).Path("/articles"), + vars: []string{}, + url: "/articles", + }, + { + route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"category", "technology", "id", "42"}, + url: "/articles/technology/42", + }, + { + route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), + vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, + url: "http://foo.domain.com/articles/technology/42", + }, +} + +func TestHeaderMatcher(t *testing.T) { + for _, v := range headerMatcherTests { + request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) + for key, value := range v.headers { + request.Header.Add(key, value) + } + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, request.Header) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, request.Header) + } + } + } +} + +func TestHostMatcher(t *testing.T) { + for _, v := range hostMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestMethodMatcher(t *testing.T) { + for _, v := range methodMatcherTests { + request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.method) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.method) + } + } + } +} + +func TestPathMatcher(t *testing.T) { + for _, v := range pathMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + vars := routeMatch.Vars + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + if result { + if len(vars) != len(v.vars) { + t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) + } + for name, value := range vars { + if v.vars[name] != value { + t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) + } + } + } else { + if len(vars) != 0 { + t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) + } + } + } +} + +func TestSchemeMatcher(t *testing.T) { + for _, v := range schemeMatcherTests { + request, _ := http.NewRequest("GET", v.url, nil) + var routeMatch RouteMatch + result := v.matcher.Match(request, &routeMatch) + if result != v.result { + if v.result { + t.Errorf("%#v: should match %v.", v.matcher, v.url) + } else { + t.Errorf("%#v: should not match %v.", v.matcher, v.url) + } + } + } +} + +func TestUrlBuilding(t *testing.T) { + + for _, v := range urlBuildingTests { + u, _ := v.route.URL(v.vars...) + url := u.String() + if url != v.url { + t.Errorf("expected %v, got %v", v.url, url) + /* + reversePath := "" + reverseHost := "" + if v.route.pathTemplate != nil { + reversePath = v.route.pathTemplate.Reverse + } + if v.route.hostTemplate != nil { + reverseHost = v.route.hostTemplate.Reverse + } + + t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) + */ + } + } + + ArticleHandler := func(w http.ResponseWriter, r *http.Request) { + } + + router := NewRouter() + router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") + + url, _ := router.Get("article").URL("category", "technology", "id", "42") + expected := "/articles/technology/42" + if url.String() != expected { + t.Errorf("Expected %v, got %v", expected, url.String()) + } +} + +func TestMatchedRouteName(t *testing.T) { + routeName := "stock" + router := NewRouter() + route := router.NewRoute().Path("/products/").Name(routeName) + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + retName := rv.Route.GetName() + if retName != routeName { + t.Errorf("Expected %q, got %q.", routeName, retName) + } +} + +func TestSubRouting(t *testing.T) { + // Example from docs. + router := NewRouter() + subrouter := router.NewRoute().Host("www.domain.com").Subrouter() + route := subrouter.NewRoute().Path("/products/").Name("products") + + url := "http://www.domain.com/products/" + request, _ := http.NewRequest("GET", url, nil) + var rv RouteMatch + ok := router.Match(request, &rv) + + if !ok || rv.Route != route { + t.Errorf("Expected same route, got %+v.", rv.Route) + } + + u, _ := router.Get("products").URL() + builtUrl := u.String() + // Yay, subroute aware of the domain when building! + if builtUrl != url { + t.Errorf("Expected %q, got %q.", url, builtUrl) + } +} + +func TestVariableNames(t *testing.T) { + route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") + if route.err == nil { + t.Errorf("Expected error for duplicated variable names") + } +} + +func TestRedirectSlash(t *testing.T) { + var route *Route + var routeMatch RouteMatch + r := NewRouter() + + r.StrictSlash(false) + route = r.NewRoute() + if route.strictSlash != false { + t.Errorf("Expected false redirectSlash.") + } + + r.StrictSlash(true) + route = r.NewRoute() + if route.strictSlash != true { + t.Errorf("Expected true redirectSlash.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}/") + request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars := routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp := NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { + t.Errorf("Expected redirect header.") + } + + route = new(Route) + route.strictSlash = true + route.Path("/{arg1}/{arg2:[0-9]+}") + request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) + routeMatch = RouteMatch{} + _ = route.Match(request, &routeMatch) + vars = routeMatch.Vars + if vars["arg1"] != "foo" { + t.Errorf("Expected foo.") + } + if vars["arg2"] != "123" { + t.Errorf("Expected 123.") + } + rsp = NewRecorder() + routeMatch.Handler.ServeHTTP(rsp, request) + if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { + t.Errorf("Expected redirect header.") + } +} + +// Test for the new regexp library, still not available in stable Go. +func TestNewRegexp(t *testing.T) { + var p *routeRegexp + var matches []string + + tests := map[string]map[string][]string{ + "/{foo:a{2}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": nil, + "/aaaa": nil, + }, + "/{foo:a{2,}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": {"aaaa"}, + }, + "/{foo:a{2,3}}": { + "/a": nil, + "/aa": {"aa"}, + "/aaa": {"aaa"}, + "/aaaa": nil, + }, + "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abcd": nil, + "/abc/ab": {"abc", "ab"}, + "/abc/abc": nil, + "/abcd/ab": nil, + }, + `/{foo:\w{3,}}/{bar:\d{2,}}`: { + "/a": nil, + "/ab": nil, + "/abc": nil, + "/abc/1": nil, + "/abc/12": {"abc", "12"}, + "/abcd/12": {"abcd", "12"}, + "/abcd/123": {"abcd", "123"}, + }, + } + + for pattern, paths := range tests { + p, _ = newRouteRegexp(pattern, false, false, false, false) + for path, result := range paths { + matches = p.regexp.FindStringSubmatch(path) + if result == nil { + if matches != nil { + t.Errorf("%v should not match %v.", pattern, path) + } + } else { + if len(matches) != len(result)+1 { + t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) + } else { + for k, v := range result { + if matches[k+1] != v { + t.Errorf("Expected %v, got %v.", v, matches[k+1]) + } + } + } + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go new file mode 100644 index 000000000..a6305483d --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go @@ -0,0 +1,276 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if matchQuery { + defaultPattern = "[^?&]+" + matchPrefix = true + } else if matchHost { + defaultPattern = "[^.]+" + matchPrefix = false + } + // Only match strict slash if not matching + if matchPrefix || matchHost || matchQuery { + strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + if !matchQuery { + pattern.WriteByte('^') + } + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if strictSlash { + pattern.WriteString("[/]?") + } + if !matchPrefix { + pattern.WriteByte('$') + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + // Done! + return &routeRegexp{ + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // True for host match, false for path or query string match. + matchHost bool + // True for query string match, false for path and host match. + matchQuery bool + // The strictSlash value defined on the route, but disabled if PathPrefix was used. + strictSlash bool + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if !r.matchHost { + if r.matchQuery { + return r.regexp.MatchString(req.URL.RawQuery) + } else { + return r.regexp.MatchString(req.URL.Path) + } + } + return r.regexp.MatchString(getHost(req)) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(pairs ...string) (string, error) { + values, err := mapFromPairs(pairs...) + if err != nil { + return "", err + } + urlValues := make([]interface{}, len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + idxs := make([]int, 0) + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) + if hostVars != nil { + for k, v := range v.host.varsN { + m.Vars[v] = hostVars[k+1] + } + } + } + // Store path variables. + if v.path != nil { + pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) + if pathVars != nil { + for k, v := range v.path.varsN { + m.Vars[v] = pathVars[k+1] + } + // Check if we should redirect. + if v.path.strictSlash { + p1 := strings.HasSuffix(req.URL.Path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), 301) + } + } + } + } + // Store query string variables. + rawQuery := req.URL.RawQuery + for _, q := range v.queries { + queryVars := q.regexp.FindStringSubmatch(rawQuery) + if queryVars != nil { + for k, v := range q.varsN { + m.Vars[v] = queryVars[k+1] + } + } + } +} + +// getHost tries its best to return the request host. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + host := r.Host + // Slice off any port information. + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + return host + +} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/gorilla/mux/route.go new file mode 100644 index 000000000..c310e66bc --- /dev/null +++ b/Godeps/_workspace/src/github.com/gorilla/mux/route.go @@ -0,0 +1,524 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Parent where the route was registered (a Router). + parent parentRoute + // Request handler for the route. + handler http.Handler + // List of matchers. + matchers []matcher + // Manager for the variables from host and path. + regexp *routeRegexpGroup + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + return false + } + } + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + // Set variables. + if r.regexp != nil { + r.regexp.setMatch(req, match, r) + } + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// If the name was registered already it will be overwritten. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.getNamedRoutes()[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { + if r.err != nil { + return r.err + } + r.regexp = r.getRegexpGroup() + if !matchHost && !matchQuery { + if len(tpl) == 0 || tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if matchHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if matchQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMap(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// +// It the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairs(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.domain.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, true, false, false) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, false, false) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, false, true, false) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// It the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to me matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, true, true); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.URL.Scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.domain.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + router := &Router{parent: r, strictSlash: r.strictSlash} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.Host("{subdomain}.domain.com"). +// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil { + return nil, errors.New("mux: route doesn't have a host or path") + } + var scheme, host, path string + var err error + if r.regexp.host != nil { + // Set a default scheme. + scheme = "http" + if host, err = r.regexp.host.url(pairs...); err != nil { + return nil, err + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(pairs...); err != nil { + return nil, err + } + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + host, err := r.regexp.host.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Scheme: "http", + Host: host, + }, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp == nil || r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + path, err := r.regexp.path.url(pairs...) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// ---------------------------------------------------------------------------- +// parentRoute +// ---------------------------------------------------------------------------- + +// parentRoute allows routes to know about parent host and path definitions. +type parentRoute interface { + getNamedRoutes() map[string]*Route + getRegexpGroup() *routeRegexpGroup +} + +// getNamedRoutes returns the map where named routes are registered. +func (r *Route) getNamedRoutes() map[string]*Route { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + return r.parent.getNamedRoutes() +} + +// getRegexpGroup returns regexp definitions from this route. +func (r *Route) getRegexpGroup() *routeRegexpGroup { + if r.regexp == nil { + if r.parent == nil { + // During tests router is not always set. + r.parent = NewRouter() + } + regexp := r.parent.getRegexpGroup() + if regexp == nil { + r.regexp = new(routeRegexpGroup) + } else { + // Copy. + r.regexp = &routeRegexpGroup{ + host: regexp.host, + path: regexp.path, + queries: regexp.queries, + } + } + } + return r.regexp +} diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/LICENCE.txt b/Godeps/_workspace/src/github.com/mqu/openldap/LICENCE.txt new file mode 100644 index 000000000..6d6a608da --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/LICENCE.txt @@ -0,0 +1,24 @@ +Copyright (C) 2012 - Marc Quinton. + +Use of this source code is governed by the MIT Licence : + http://opensource.org/licenses/mit-license.php + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/README.md b/Godeps/_workspace/src/github.com/mqu/openldap/README.md new file mode 100644 index 000000000..b32af5eaa --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/README.md @@ -0,0 +1,82 @@ +OpenLDAP +==== + +this is Openldap binding in GO language. I don't work any more with golang, so, please fork this project. + + +Installation : +----- + +Installation is easy and very quick, as you can see : + + # install openldap library and devel packages + sudo apt-get install libldap libldap2-dev # debian/ubuntu. + sudo urpmi openldap-devel # fedora, RH, ... + + # install go + go get github.com/mqu/openldap + + # verify you've got it : + (cd $GOPATH ; go list ./...) | grep openldap + +Usage +---- + +- Look a this [exemple](https://github.com/mqu/openldap/blob/master/_examples/test-openldap.go). +- a more complex example making [LDAP search](https://github.com/mqu/openldap/blob/master/_examples/ldapsearch.go) that mimics ldapsearch command, printing out result on console. + +Doc: +--- +- run _go doc openldap_, +- will come soon, complete documentation in this [Wiki](https://github.com/mqu/openldap/wiki). +- look at [_examples/](https://github.com/mqu/openldap/blob/master/_examples/)*.go to see how to use this library. + +Todo : +---- + + - thread-safe test, + - complete LDAP:GetOption() and LDAP:SetOption() method : now, they work only for integer values, + - avoid using deprecated function (see LDAP_DEPRECATED flag and "// DEPRECATED" comments in *.go sources), + - write some tests, + - verify memory leaks (Valgrind), + - support LDIF format (in, out), + - add support for external commands (ldapadd, ldapdelete) + - create an LDAP CLI (command line interface), like lftp, with commands like shell, + - a nice GUI with GTK, + - proxy, server, + - what else ? + + +Links : +---- + + - goc : http://code.google.com/p/go-wiki/wiki/cgo (how to bind native libraries to GO) + - Openldap library (and server) : http://www.openldap.org/ + - Pure Go [LDAP](https://github.com/mmitton/ldap) library, with [ASN1](https://github.com/mmitton/asn1-ber) support. + +Licence : +---- + +Copyright (C) 2012 - Marc Quinton. + +Use of this source code is governed by the MIT Licence : + http://opensource.org/licenses/mit-license.php + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/add-modify-delete.go b/Godeps/_workspace/src/github.com/mqu/openldap/add-modify-delete.go new file mode 100644 index 000000000..78675343d --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/add-modify-delete.go @@ -0,0 +1,230 @@ +/* + * + * Copyright (C) 2012 - Marc Quinton. + * + * Use of this source code is governed by the MIT Licence : + * http://opensource.org/licenses/mit-license.php + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +package openldap + +/* #include +#include +#define LDAP_DEPRECATED 1 +#include + +// goc can not use union on structs ; so create a new type with same attributes and size +// fixme : support binary mods (mod_bvalues) +typedef struct ldapmod_str { + int mod_op; + char *mod_type; + char **mod_vals; +} LDAPModStr; + + +int _ldap_add(LDAP *ld, char* dn, LDAPModStr **attrs){ + + //API: int ldap_add_ext_s(LDAP *ld, char *dn, LDAPMod **attrs, LDAPControl *sctrls, LDAPControl *cctrls ); + // nota : cast (LDAPMod **) is possible because structs have same size + return ldap_add_ext_s(ld, dn, (LDAPMod **)attrs, NULL, NULL); +} + +int _ldap_modify(LDAP *ld, char* dn, LDAPModStr **mods ){ + + // nota : cast (LDAPMod **) is possible because structs have same size + return ldap_modify_ext_s( ld, dn, (LDAPMod **)mods, NULL, NULL); +} + +int _ldap_rename (LDAP *ld, char *dn, char *newrdn, char *newSuperior, int deleteoldrdn){ + //API: int ldap_rename_s( ld, dn, newrdn, newparent, deleteoldrdn, sctrls[], cctrls[]) + + return ldap_rename_s(ld, dn, newrdn, newSuperior, deleteoldrdn, NULL, NULL); +} + +void _ldap_mods_free (LDAPModStr **mods, int freemods){ + //API: void ldap_mods_free(LDAPMod **mods, int freemods); + return ldap_mods_free((LDAPMod **)mods, freemods); +} + + +*/ +// #cgo CFLAGS: -DLDAP_DEPRECATED=1 +// #cgo linux CFLAGS: -DLINUX=1 +// #cgo LDFLAGS: -lldap -llber +import "C" + +import ( + "errors" + "unsafe" + "fmt" +) + + +func (self *Ldap) doModify(dn string, attrs map[string][]string, changeType int, full_add bool) (int){ + + _dn := C.CString(dn) + defer C.free(unsafe.Pointer(_dn)) + + mods := make([]*C.LDAPModStr, len(attrs)+1) + // mods[len] = nil by default + + count:= 0 + for key, values := range attrs { + + // transform []string to C.char** null terminated array (attributes argument) + _values := make([]*C.char, len(values)+1) // default set to nil (NULL in C) + + for i, value := range values { + _values[i] = C.CString(value) + defer C.free(unsafe.Pointer(_values[i])) + } + + var mod C.LDAPModStr + + mod.mod_op = C.int(changeType) + mod.mod_type = C.CString(key) + mod.mod_vals = &_values[0] + + defer C.free(unsafe.Pointer(mod.mod_type)) + + mods[count] = &mod + + count++ + } + + var rv int + + if full_add { + // API: int ldap_add (LDAP *ld, LDAP_CONST char *dn, LDAPMod **mods ) + rv = int(C._ldap_add(self.conn, _dn, &mods[0])) + } else{ + // API: int ldap_modify (LDAP *ld, LDAP_CONST char *dn, LDAPMod **mods ) + rv = int(C._ldap_modify(self.conn, _dn, &mods[0])) + } + + // FIXME: need to call ldap_mods_free(&mods) some where. + // C._ldap_mods_free(&mods[0], 1) // not OK. + return rv +} + +func (self *Ldap) Modify(dn string, attrs map[string][]string) (error){ + + changeType := C.LDAP_MOD_REPLACE + full_add := false + rv := self.doModify(dn, attrs, changeType, full_add) + + if rv != LDAP_OPT_SUCCESS { + return errors.New(fmt.Sprintf("LDAP::Modify() error : %d (%s)", rv, ErrorToString(rv))) + } + return nil +} + +func (self *Ldap) ModifyDel(dn string, attrs map[string][]string) (error){ + + changeType := C.LDAP_MOD_DELETE + full_add := false + rv := self.doModify(dn, attrs, changeType, full_add) + + if rv != LDAP_OPT_SUCCESS { + return errors.New(fmt.Sprintf("LDAP::ModifyDel() error : %d (%s)", rv, ErrorToString(rv))) + } + return nil +} + +func (self *Ldap) ModifyAdd(dn string, attrs map[string][]string) (error){ + + changeType := C.LDAP_MOD_ADD + full_add := false + rv := self.doModify(dn, attrs, changeType, full_add) + if rv != LDAP_OPT_SUCCESS { + return errors.New(fmt.Sprintf("LDAP::ModifyAdd() error : %d (%s)", rv, ErrorToString(rv))) + } + return nil +} + +func (self *Ldap) Add(dn string, attrs map[string][]string) (error){ + + changeType := C.LDAP_MOD_ADD + full_add := true + rv := self.doModify(dn, attrs, changeType, full_add) + if rv != LDAP_OPT_SUCCESS { + return errors.New(fmt.Sprintf("LDAP::Add() error : %d (%s)", rv, ErrorToString(rv))) + } + return nil +} + +func (self *Ldap) Delete(dn string) (error){ + + _dn := C.CString(dn) + defer C.free(unsafe.Pointer(_dn)) + + // API: int ldap_delete (LDAP *ld, LDAP_CONST char *dn) + rv := C.ldap_delete_s(self.conn, _dn) + + if rv != LDAP_OPT_SUCCESS { + return errors.New(fmt.Sprintf("LDAP::Delete() error : %d (%s)", rv, ErrorToString(int(rv)))) + } + + return nil +} + +// Rename() to rename LDAP entries. +// +// These routines are used to perform a LDAP rename operation. The function changes the leaf compo- +// nent of an entry's distinguished name and optionally moves the entry to a new parent container. +// The ldap_rename_s performs a rename operation synchronously. The method takes dn, which points to +// the distinguished name of the entry whose attribute is being compared, newparent,the distinguished +// name of the entry's new parent. If this parameter is NULL, only the RDN is changed. The root DN is +// specified by passing a zero length string, "". deleteoldrdn specifies whether the old RDN should +// be retained or deleted. Zero indicates that the old RDN should be retained. If you choose this +// option, the attribute will contain both names (the old and the new). Non-zero indicates that the +// old RDN should be deleted. serverctrls points to an array of LDAPControl structures that list the +// client controls to use with this extended operation. Use NULL to specify no client controls. +// clientctrls points to an array of LDAPControl structures that list the client controls to use with +// the search. +// FIXME: support NULL and "" values for newSuperior parameter. +// +func (self *Ldap) Rename(dn string, newrdn string, newSuperior string, deleteOld bool) (error){ + + _dn := C.CString(dn) + defer C.free(unsafe.Pointer(_dn)) + + _newrdn := C.CString(newrdn) + defer C.free(unsafe.Pointer(_newrdn)) + + _newSuperior := C.CString(newSuperior) + defer C.free(unsafe.Pointer(_newSuperior)) + + var _delete C.int = 0 + if deleteOld { + _delete = 1 + } + + // API: int ldap_rename (LDAP *ld, char *newrdn, char *newSuperior, int deleteoldrdn) + rv := C._ldap_rename(self.conn, _dn, _newrdn, _newSuperior, _delete) + + if rv != LDAP_OPT_SUCCESS { + return errors.New(fmt.Sprintf("LDAP::Rename() error : %d (%s)", rv, ErrorToString(int(rv)))) + } + + return nil +} diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/defines.go b/Godeps/_workspace/src/github.com/mqu/openldap/defines.go new file mode 100644 index 000000000..576f96a11 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/defines.go @@ -0,0 +1,350 @@ +/* + * + * Copyright (C) 2012 - Marc Quinton. + * + * Use of this source code is governed by the MIT Licence : + * http://opensource.org/licenses/mit-license.php + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +package openldap + +const ( + // first version for this GO API binding + OPENLDAP_API_BINDING_VERSION = "0.2" +) + +const ( + LDAP_VERSION1 = 1 + LDAP_VERSION2 = 2 + LDAP_VERSION3 = 3 +) + +const ( + LDAP_VERSION_MIN = LDAP_VERSION2 + LDAP_VERSION = LDAP_VERSION2 + LDAP_VERSION_MAX = LDAP_VERSION3 +) + +const ( + LDAP_API_VERSION = 3001 + LDAP_VENDOR_NAME = "OpenLDAP" +) + +const ( + LDAP_PORT = 389 + LDAPS_PORT = 636 +) + +const ( + LDAP_OPT_SUCCESS = 0 + LDAP_OPT_ERROR = -1 +) + +// search scopes +const ( + LDAP_SCOPE_BASE = 0x0000 + LDAP_SCOPE_ONELEVEL = 0x0001 + LDAP_SCOPE_SUBTREE = 0x0002 + LDAP_SCOPE_SUBORDINATE = 0x0003 // OpenLDAP extension + LDAP_SCOPE_DEFAULT = -1 // OpenLDAP extension +) + +const ( + LDAP_SCOPE_BASEOBJECT = LDAP_SCOPE_BASE + LDAP_SCOPE_ONE = LDAP_SCOPE_ONELEVEL + LDAP_SCOPE_SUB = LDAP_SCOPE_SUBTREE + LDAP_SCOPE_CHILDREN = LDAP_SCOPE_SUBORDINATE +) + +const ( + LDAP_RES_ANY = -1 + LDAP_RES_UNSOLICITED = 0 +) + +//const ( +//LDAP_API_FEATURE_THREAD_SAFE = 1 +//LDAP_API_FEATURE_SESSION_THREAD_SAFE = 1 +//LDAP_API_FEATURE_OPERATION_THREAD_SAFE = 1 +//) + +const ( + LDAP_SUCCESS = 0x00 + LDAP_OPERATIONS_ERROR = 0x01 + LDAP_PROTOCOL_ERROR = 0x02 + LDAP_TIMELIMIT_EXCEEDED = 0x03 + LDAP_SIZELIMIT_EXCEEDED = 0x04 + LDAP_COMPARE_FALSE = 0x05 + LDAP_COMPARE_TRUE = 0x06 + LDAP_AUTH_METHOD_NOT_SUPPORTED = 0x07 + LDAP_STRONG_AUTH_REQUIRED = 0x08 + // Not used in LDAPv3 + LDAP_PARTIAL_RESULTS = 0x09 + + // Next 5 new in LDAPv3 + LDAP_REFERRAL = 0x0a + LDAP_ADMINLIMIT_EXCEEDED = 0x0b + LDAP_UNAVAILABLE_CRITICAL_EXTENSION = 0x0c + LDAP_CONFIDENTIALITY_REQUIRED = 0x0d + LDAP_SASL_BIND_INPROGRESS = 0x0e + + LDAP_NO_SUCH_ATTRIBUTE = 0x10 + LDAP_UNDEFINED_TYPE = 0x11 + LDAP_INAPPROPRIATE_MATCHING = 0x12 + LDAP_CONSTRAINT_VIOLATION = 0x13 + LDAP_TYPE_OR_VALUE_EXISTS = 0x14 + LDAP_INVALID_SYNTAX = 0x15 + + LDAP_NO_SUCH_OBJECT = 0x20 /* 32 */ + LDAP_ALIAS_PROBLEM = 0x21 + LDAP_INVALID_DN_SYNTAX = 0x22 + // Next two not used in LDAPv3 + LDAP_IS_LEAF = 0x23 + LDAP_ALIAS_DEREF_PROBLEM = 0x24 + + LDAP_INAPPROPRIATE_AUTH = 0x30 /* 48 */ + LDAP_INVALID_CREDENTIALS = 0x31 /* 49 */ + LDAP_INSUFFICIENT_ACCESS = 0x32 + LDAP_BUSY = 0x33 + LDAP_UNAVAILABLE = 0x34 + LDAP_UNWILLING_TO_PERFORM = 0x35 + LDAP_LOOP_DETECT = 0x36 + + LDAP_SORT_CONTROL_MISSING = 0x3C /* 60 */ + LDAP_INDEX_RANGE_ERROR = 0x3D /* 61 */ + + LDAP_NAMING_VIOLATION = 0x40 + LDAP_OBJECT_CLASS_VIOLATION = 0x41 + LDAP_NOT_ALLOWED_ON_NONLEAF = 0x42 + LDAP_NOT_ALLOWED_ON_RDN = 0x43 + LDAP_ALREADY_EXISTS = 0x44 /* 68 */ + LDAP_NO_OBJECT_CLASS_MODS = 0x45 + LDAP_RESULTS_TOO_LARGE = 0x46 + // Next two for LDAPv3 + LDAP_AFFECTS_MULTIPLE_DSAS = 0x47 + LDAP_OTHER = 0x50 + + // Used by some APIs + LDAP_SERVER_DOWN = 0x51 + LDAP_LOCAL_ERROR = 0x52 + LDAP_ENCODING_ERROR = 0x53 + LDAP_DECODING_ERROR = 0x54 + LDAP_TIMEOUT = 0x55 + LDAP_AUTH_UNKNOWN = 0x56 + LDAP_FILTER_ERROR = 0x57 /* 87 */ + LDAP_USER_CANCELLED = 0x58 + LDAP_PARAM_ERROR = 0x59 + LDAP_NO_MEMORY = 0x5a + + // Preliminary LDAPv3 codes + LDAP_CONNECT_ERROR = 0x5b + LDAP_NOT_SUPPORTED = 0x5c + LDAP_CONTROL_NOT_FOUND = 0x5d + LDAP_NO_RESULTS_RETURNED = 0x5e + LDAP_MORE_RESULTS_TO_RETURN = 0x5f + LDAP_CLIENT_LOOP = 0x60 + LDAP_REFERRAL_LIMIT_EXCEEDED = 0x61 +) + +const ( + LDAP_DEREF_NEVER = 0 + LDAP_DEREF_SEARCHING = 1 + LDAP_DEREF_FINDING = 2 + LDAP_DEREF_ALWAYS = 3 +) + +const ( + LDAP_NO_LIMIT = 0 +) + +const ( + LDAP_MSG_ONE = 0 + LDAP_MSG_ALL = 1 + LDAP_MSG_RECEIVED = 2 +) + +// LDAP_OPTions +// 0x0000 - 0x0fff reserved for api options +// 0x1000 - 0x3fff reserved for api extended options +// 0x4000 - 0x7fff reserved for private and experimental options + +const ( + LDAP_OPT_API_INFO = 0x0000 + LDAP_OPT_DESC = 0x0001 // historic + LDAP_OPT_DEREF = 0x0002 + LDAP_OPT_SIZELIMIT = 0x0003 + LDAP_OPT_TIMELIMIT = 0x0004 + // 0x05 - 0x07 not defined + + LDAP_OPT_REFERRALS = 0x0008 + LDAP_OPT_RESTART = 0x0009 + // 0x0a - 0x10 not defined + + LDAP_OPT_PROTOCOL_VERSION = 0x0011 + LDAP_OPT_SERVER_CONTROLS = 0x0012 + LDAP_OPT_CLIENT_CONTROLS = 0x0013 + // 0x14 not defined + + LDAP_OPT_API_FEATURE_INFO = 0x0015 + // 0x16 - 0x2f not defined + + LDAP_OPT_HOST_NAME = 0x0030 + LDAP_OPT_RESULT_CODE = 0x0031 + LDAP_OPT_ERROR_NUMBER = LDAP_OPT_RESULT_CODE + LDAP_OPT_DIAGNOSTIC_MESSAGE = 0x0032 + LDAP_OPT_ERROR_STRING = LDAP_OPT_DIAGNOSTIC_MESSAGE + LDAP_OPT_MATCHED_DN = 0x0033 + // 0x0034 - 0x3fff not defined + + // 0x0091 used by Microsoft for LDAP_OPT_AUTO_RECONNECT + + LDAP_OPT_SSPI_FLAGS = 0x0092 + // 0x0093 used by Microsoft for LDAP_OPT_SSL_INFO + + // 0x0094 used by Microsoft for LDAP_OPT_REF_DEREF_CONN_PER_MSG + + LDAP_OPT_SIGN = 0x0095 + LDAP_OPT_ENCRYPT = 0x0096 + LDAP_OPT_SASL_METHOD = 0x0097 + // 0x0098 used by Microsoft for LDAP_OPT_AREC_EXCLUSIVE + + LDAP_OPT_SECURITY_CONTEXT = 0x0099 + +// 0x009A used by Microsoft for LDAP_OPT_ROOTDSE_CACHE + +// 0x009B - 0x3fff not defined + +) + +// API Extensions + +const LDAP_OPT_API_EXTENSION_BASE = 0x4000 // API extensions + +// private and experimental options + +// OpenLDAP specific options + +const ( + LDAP_OPT_DEBUG_LEVEL = 0x5001 // debug level + LDAP_OPT_TIMEOUT = 0x5002 // default timeout + LDAP_OPT_REFHOPLIMIT = 0x5003 // ref hop limit + LDAP_OPT_NETWORK_TIMEOUT = 0x5005 // socket level timeout + LDAP_OPT_URI = 0x5006 + LDAP_OPT_REFERRAL_URLS = 0x5007 // Referral URLs + LDAP_OPT_SOCKBUF = 0x5008 // sockbuf + LDAP_OPT_DEFBASE = 0x5009 // searchbase + LDAP_OPT_CONNECT_ASYNC = 0x5010 // create connections asynchronously + LDAP_OPT_CONNECT_CB = 0x5011 // connection callbacks + LDAP_OPT_SESSION_REFCNT = 0x5012 // session reference count +) + +// OpenLDAP TLS options + +const ( + LDAP_OPT_X_TLS = 0x6000 + LDAP_OPT_X_TLS_CTX = 0x6001 // OpenSSL CTX* + LDAP_OPT_X_TLS_CACERTFILE = 0x6002 + LDAP_OPT_X_TLS_CACERTDIR = 0x6003 + LDAP_OPT_X_TLS_CERTFILE = 0x6004 + LDAP_OPT_X_TLS_KEYFILE = 0x6005 + LDAP_OPT_X_TLS_REQUIRE_CERT = 0x6006 + LDAP_OPT_X_TLS_PROTOCOL_MIN = 0x6007 + LDAP_OPT_X_TLS_CIPHER_SUITE = 0x6008 + LDAP_OPT_X_TLS_RANDOM_FILE = 0x6009 + LDAP_OPT_X_TLS_SSL_CTX = 0x600a // OpenSSL SSL* + LDAP_OPT_X_TLS_CRLCHECK = 0x600b + LDAP_OPT_X_TLS_CONNECT_CB = 0x600c + LDAP_OPT_X_TLS_CONNECT_ARG = 0x600d + LDAP_OPT_X_TLS_DHFILE = 0x600e + LDAP_OPT_X_TLS_NEWCTX = 0x600f + LDAP_OPT_X_TLS_CRLFILE = 0x6010 // GNUtls only + LDAP_OPT_X_TLS_PACKAGE = 0x6011 +) + +const ( + LDAP_OPT_X_TLS_NEVER = 0 + LDAP_OPT_X_TLS_HARD = 1 + LDAP_OPT_X_TLS_DEMAND = 2 + LDAP_OPT_X_TLS_ALLOW = 3 + LDAP_OPT_X_TLS_TRY = 4 +) + +const ( + LDAP_OPT_X_TLS_CRL_NONE = 0 + LDAP_OPT_X_TLS_CRL_PEER = 1 + LDAP_OPT_X_TLS_CRL_ALL = 2 +) + +// for LDAP_OPT_X_TLS_PROTOCOL_MIN + +//!!! const ( +//!!! LDAP_OPT_X_TLS_PROTOCOL(maj,min) = (((maj) << 8) + (min)) +//!!! LDAP_OPT_X_TLS_PROTOCOL_SSL2 = (2 << 8) +//!!! LDAP_OPT_X_TLS_PROTOCOL_SSL3 = (3 << 8) +//!!! LDAP_OPT_X_TLS_PROTOCOL_TLS1_0 = ((3 << 8) + 1) +//!!! LDAP_OPT_X_TLS_PROTOCOL_TLS1_1 = ((3 << 8) + 2) +//!!! LDAP_OPT_X_TLS_PROTOCOL_TLS1_2 = ((3 << 8) + 3) +//!!! ) + +// OpenLDAP SASL options + +const ( + LDAP_OPT_X_SASL_MECH = 0x6100 + LDAP_OPT_X_SASL_REALM = 0x6101 + LDAP_OPT_X_SASL_AUTHCID = 0x6102 + LDAP_OPT_X_SASL_AUTHZID = 0x6103 + LDAP_OPT_X_SASL_SSF = 0x6104 // read-only + LDAP_OPT_X_SASL_SSF_EXTERNAL = 0x6105 // write-only + LDAP_OPT_X_SASL_SECPROPS = 0x6106 // write-only + LDAP_OPT_X_SASL_SSF_MIN = 0x6107 + LDAP_OPT_X_SASL_SSF_MAX = 0x6108 + LDAP_OPT_X_SASL_MAXBUFSIZE = 0x6109 + LDAP_OPT_X_SASL_MECHLIST = 0x610a // read-only + LDAP_OPT_X_SASL_NOCANON = 0x610b + LDAP_OPT_X_SASL_USERNAME = 0x610c // read-only + LDAP_OPT_X_SASL_GSS_CREDS = 0x610d +) + +// OpenLDAP GSSAPI options + +const ( + LDAP_OPT_X_GSSAPI_DO_NOT_FREE_CONTEXT = 0x6200 + LDAP_OPT_X_GSSAPI_ALLOW_REMOTE_PRINCIPAL = 0x6201 +) + +// +// OpenLDAP per connection tcp-keepalive settings +// (Linux only, ignored where unsupported) +const ( + LDAP_OPT_X_KEEPALIVE_IDLE = 0x6300 + LDAP_OPT_X_KEEPALIVE_PROBES = 0x6301 + LDAP_OPT_X_KEEPALIVE_INTERVAL = 0x6302 +) + +/* authentication methods available */ +const ( + LDAP_AUTH_NONE = 0x00 // no authentication + LDAP_AUTH_SIMPLE = 0x80 // context specific + primitive + LDAP_AUTH_SASL = 0xa3 // context specific + constructed + LDAP_AUTH_KRBV4 = 0xff // means do both of the following + LDAP_AUTH_KRBV41 = 0x81 // context specific + primitive + LDAP_AUTH_KRBV42 = 0x82 // context specific + primitive +) diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/openldap.go b/Godeps/_workspace/src/github.com/mqu/openldap/openldap.go new file mode 100644 index 000000000..8a45b2787 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/openldap.go @@ -0,0 +1,454 @@ +/* + * Openldap (2.4.30) binding in GO + * + * + * link to ldap or ldap_r (for thread-safe binding) + * + * + * Copyright (C) 2012 - Marc Quinton. + * + * Use of this source code is governed by the MIT Licence : + * http://opensource.org/licenses/mit-license.php + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +package openldap + +/* + +#define LDAP_DEPRECATED 1 +#include +#include + +static inline char* to_charptr(const void* s) { return (char*)s; } +static inline LDAPControl** to_ldapctrlptr(const void* s) { + return (LDAPControl**) s; +} +*/ +// #cgo CFLAGS: -DLDAP_DEPRECATED=1 +// #cgo linux CFLAGS: -DLINUX=1 +// #cgo LDFLAGS: -lldap -llber +import "C" + +import ( + "errors" + "fmt" + "unsafe" + "strings" + "strconv" +) + +/* Intialize() open an LDAP connexion ; supported url formats : + * + * ldap://host:389/ + * ldaps://secure-host:636/ + * + * return values : + * - on success : LDAP object, nil + * - on error : nil and error with error description. + */ +func Initialize(url string) (*Ldap, error) { + _url := C.CString(url) + defer C.free(unsafe.Pointer(_url)) + + var ldap *C.LDAP + + // API: int ldap_initialize (LDAP **ldp, LDAP_CONST char *url ) + rv := C.ldap_initialize(&ldap, _url) + + if rv != 0 { + err := errors.New(fmt.Sprintf("LDAP::Initialize() error (%d) : %s", rv, ErrorToString(int(rv)))) + return nil, err + } + + return &Ldap{ldap}, nil +} + +/* + * StartTLS() is used for regular LDAP (not + * LDAPS) connections to establish encryption + * after the session is running. + * + * return value : + * - nil on success, + * - error with error description on error. + */ +func (self *Ldap) StartTLS() error { + var rv int + + // API: int ldap_start_tls_s(LDAP *ld, LDAPControl **serverctrls, LDAPControl **clientctrls); + rv = int(C.ldap_start_tls_s(self.conn, + C.to_ldapctrlptr(unsafe.Pointer(nil)), + C.to_ldapctrlptr(unsafe.Pointer(nil)))) + + if rv == LDAP_OPT_SUCCESS { + return nil + } + + return errors.New(fmt.Sprintf("LDAP::StartTLS() error (%d) : %s", rv, + ErrorToString(rv))) +} + +/* + * Bind() is used for LDAP authentifications + * + * if who is empty this is an anonymous bind + * else this is an authentificated bind + * + * return value : + * - nil on succes, + * - error with error description on error. + * + */ +func (self *Ldap) Bind(who, cred string) error { + var rv int + + authmethod := C.int(LDAP_AUTH_SIMPLE) + + // DEPRECATED + // API: int ldap_bind_s (LDAP *ld, LDAP_CONST char *who, LDAP_CONST char *cred, int authmethod ); + if who == "" { + _who := C.to_charptr(unsafe.Pointer(nil)) + _cred := C.to_charptr(unsafe.Pointer(nil)) + + rv = int(C.ldap_bind_s(self.conn, _who, _cred, authmethod)) + } else { + _who := C.CString(who) + _cred := C.CString(cred) + defer C.free(unsafe.Pointer(_who)) + rv = int(C.ldap_bind_s(self.conn, _who, _cred, authmethod)) + } + + if rv == LDAP_OPT_SUCCESS { + return nil + } + + self.conn = nil + return errors.New(fmt.Sprintf("LDAP::Bind() error (%d) : %s", rv, ErrorToString(rv))) +} + +/* + * close LDAP connexion + * + * return value : + * - nil on succes, + * - error with error description on error. + * + */ +func (self *Ldap) Close() error { + + // DEPRECATED + // API: int ldap_unbind(LDAP *ld) + rv := C.ldap_unbind(self.conn) + + if rv == LDAP_OPT_SUCCESS { + return nil + } + + self.conn = nil + return errors.New(fmt.Sprintf("LDAP::Close() error (%d) : %s", int(rv), ErrorToString(int(rv)))) + +} +/* + * Unbind() close LDAP connexion + * + * an alias to Ldap::Close() + * + */ +func (self *Ldap) Unbind() error { + return self.Close() +} + +/* Search() is used to search LDAP server + - base is where search is starting + - scope allows local or deep search. Supported values : + - LDAP_SCOPE_BASE + - LDAP_SCOPE_ONELEVEL + - LDAP_SCOPE_SUBTREE + - filter is an LDAP search expression, + - attributes is an array of string telling with LDAP attribute to get from this request +*/ +func (self *Ldap) Search(base string, scope int, filter string, attributes []string) (*LdapMessage, error) { + + var attrsonly int = 0 // false: returns all, true, returns only attributes without values + + _base := C.CString(base) + defer C.free(unsafe.Pointer(_base)) + + _filter := C.CString(filter) + defer C.free(unsafe.Pointer(_filter)) + + // transform []string to C.char** null terminated array (attributes argument) + _attributes := make([]*C.char, len(attributes)+1) // default set to nil (NULL in C) + + for i, arg := range attributes { + _attributes[i] = C.CString(arg) + defer C.free(unsafe.Pointer(_attributes[i])) + } + + var msg *C.LDAPMessage + + // DEPRECATED + // API: int ldap_search_s (LDAP *ld, char *base, int scope, char *filter, char **attrs, int attrsonly, LdapMessage * ldap_res) + rv := int(C.ldap_search_s(self.conn, _base, C.int(scope), _filter, &_attributes[0], C.int(attrsonly), &msg)) + + if rv == LDAP_OPT_SUCCESS { + _msg := new(LdapMessage) + _msg.ldap = self + _msg.errno = rv + _msg.msg = msg + return _msg, nil + } + + return nil, errors.New(fmt.Sprintf("LDAP::Search() error : %d (%s)", rv, ErrorToString(rv))) +} + +// ------------------------------------- Ldap* method (object oriented) ------------------------------------------------------------------- + +// Create a new LdapAttribute entry with name and values. +func LdapAttributeNew(name string, values []string)(*LdapAttribute){ + a := new(LdapAttribute) + a.values = values + a.name = name + return a +} + +// Append() adds an LdapAttribute to self LdapEntry +func (self *LdapEntry) Append(a LdapAttribute){ + self.values = append(self.values, a) +} + +// String() is used for fmt.Println(self) +// +func (self *LdapAttribute) String() string{ + return self.ToText() +} + +// ToText() returns a text string representation of LdapAttribute +// avoiding displaying binary data. +// +func (self *LdapAttribute) ToText() string{ + + var list []string + + for _, a := range self.Values() { + if (!_isPrint(a)) { + list = append(list, fmt.Sprintf("binary-data[%d]", len(a))) + } else { + list = append(list, a) + } + } + if len(list) > 1 { + return fmt.Sprintf("%s: (%d)[%s]", self.name, len(list), strings.Join(list, ", ")) + } + return fmt.Sprintf("%s: [%s]", self.name, strings.Join(list, ", ")) +} + +// Name() return attribute name +func (self *LdapAttribute) Name() string{ + return self.name +} + +// Values() returns array values for self LdapAttribute +// +func (self *LdapAttribute) Values() []string{ + return self.values +} + +// _isPrint() returns true if str is printable +// +// @private method +func _isPrint(str string) bool{ + for _, c := range str{ + + if !strconv.IsPrint(rune(c)) { + return false + } + } + + return true +} + +// IsPrint() returns true is self LdapAttribute is printable. +func (self *LdapAttribute) IsPrint() bool{ + for _, a := range self.Values() { + if (!_isPrint(a)) { + return false + } + } + return true +} + +// Dn() returns DN (Distinguish Name) for self LdapEntry +func (self *LdapEntry) Dn() string{ + return self.dn +} + +// Attributes() returns an array of LdapAttribute +func (self *LdapEntry) Attributes() []LdapAttribute{ + return self.values +} + +// Print() allow printing self LdapEntry with fmt.Println() +func (self *LdapEntry) String() string { + return self.ToText() +} + +// GetValuesByName() get a list of values for self LdapEntry, using "name" attribute +func (self *LdapEntry) GetValuesByName(attrib string) []string{ + + for _, a := range self.values{ + if a.Name() == attrib { + return a.values + } + } + + return []string{} +} +// GetOneValueByName() ; a quick way to get a single attribute value +func (self *LdapEntry) GetOneValueByName(attrib string) (string, error){ + + for _, a := range self.values{ + if a.Name() == attrib { + return a.values[0], nil + } + } + + return "", errors.New(fmt.Sprintf("LdapEntry::GetOneValueByName() error : attribute %s not found", attrib)) +} + +// ToText() return a string representating self LdapEntry +func (self *LdapEntry) ToText() string{ + + txt := fmt.Sprintf("dn: %s\n", self.dn) + + for _, a := range self.values{ + txt = txt + fmt.Sprintf("%s\n", a.ToText()) + } + + return txt +} + +// Append() add e to LdapSearchResult array +func (self *LdapSearchResult) Append(e LdapEntry){ + self.entries = append(self.entries, e) +} + +// ToText() : a quick way to print an LdapSearchResult +func (self *LdapSearchResult) ToText() string{ + + txt := fmt.Sprintf("# query : %s\n", self.filter) + txt = txt + fmt.Sprintf("# num results : %d\n", self.Count()) + txt = txt + fmt.Sprintf("# search : %s\n", self.Filter()) + txt = txt + fmt.Sprintf("# base : %s\n", self.Base()) + txt = txt + fmt.Sprintf("# attributes : [%s]\n", strings.Join(self.Attributes(), ", ")) + + for _, e := range self.entries{ + txt = txt + fmt.Sprintf("%s\n", e.ToText()) + } + + return txt +} + +// String() : used for fmt.Println(self) +func (self *LdapSearchResult) String() string{ + return self.ToText() +} + +// Entries() : returns an array of LdapEntry for self +func (self *LdapSearchResult) Entries() []LdapEntry{ + return self.entries +} + +// Count() : returns number of results for self search. +func (self *LdapSearchResult) Count() int{ + return len(self.entries) +} + +// Filter() : returns filter for self search +func (self *LdapSearchResult) Filter() string{ + return self.filter +} + +// Filter() : returns base DN for self search +func (self *LdapSearchResult) Base() string{ + return self.base +} + +// Filter() : returns scope for self search +func (self *LdapSearchResult) Scope() int{ + return self.scope +} + +// Filter() : returns an array of attributes used for this actual search +func (self *LdapSearchResult) Attributes() []string{ + return self.attributes +} + +// SearchAll() : a quick way to make search. This method returns an LdapSearchResult with all necessary methods to +// access data. Result is a collection (tree) of []LdapEntry / []LdapAttribute. +// +func (self *Ldap) SearchAll(base string, scope int, filter string, attributes []string) (*LdapSearchResult, error) { + + sr := new(LdapSearchResult) + + sr.ldap = self + sr.base = base + sr.scope = scope + sr.filter = filter + sr.attributes = attributes + + // Search(base string, scope int, filter string, attributes []string) (*LDAPMessage, error) + result, err := self.Search(base, scope, filter, attributes) + + if err != nil { + fmt.Println(err) + return sr, err + } + + // Free LDAP::Result() allocated data + defer result.MsgFree() + + e := result.FirstEntry() + + for e != nil { + _e := new(LdapEntry) + + _e.dn = e.GetDn() + + attr, _ := e.FirstAttribute() + for attr != "" { + + _attr := LdapAttributeNew(attr, e.GetValues(attr)) + _e.Append(*_attr) + + attr, _ = e.NextAttribute() + + } + + sr.Append(*_e) + + e = e.NextEntry() + } + + return sr, nil +} diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/options-errors.go b/Godeps/_workspace/src/github.com/mqu/openldap/options-errors.go new file mode 100644 index 000000000..04f926843 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/options-errors.go @@ -0,0 +1,118 @@ +/* + * + * Copyright (C) 2012 - Marc Quinton. + * + * Use of this source code is governed by the MIT Licence : + * http://opensource.org/licenses/mit-license.php + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +package openldap + +/* +#include + +static inline char* to_charptr(const void* s) { return (char*)s; } + +*/ +// #cgo CFLAGS: -DLDAP_DEPRECATED=1 +// #cgo linux CFLAGS: -DLINUX=1 +// #cgo LDFLAGS: -lldap -llber +import "C" + +import ( + "errors" + "fmt" + "unsafe" +) + +// FIXME : support all kind of option (int, int*, ...) +func (self *Ldap) SetOption(opt int, val int) error { + + // API: ldap_set_option (LDAP *ld,int option, LDAP_CONST void *invalue)); + rv := C.ldap_set_option(self.conn, C.int(opt), unsafe.Pointer(&val)) + + if rv == LDAP_OPT_SUCCESS { + return nil + } + + return errors.New(fmt.Sprintf("LDAP::SetOption() error (%d) : %s", int(rv), ErrorToString(int(rv)))) +} + +// FIXME : support all kind of option (int, int*, ...) should take care of all return type for ldap_get_option +func (self *Ldap) GetOption(opt int) (val int, err error) { + + // API: int ldap_get_option (LDAP *ld,int option, LDAP_CONST void *invalue)); + rv := C.ldap_get_option(self.conn, C.int(opt), unsafe.Pointer(&val)) + + if rv == LDAP_OPT_SUCCESS { + return val, nil + } + + return 0, errors.New(fmt.Sprintf("LDAP::GetOption() error (%d) : %s", rv, ErrorToString(int(rv)))) +} + +/* +** WORK IN PROGRESS! +** +** OpenLDAP reentrancy/thread-safeness should be dynamically +** checked using ldap_get_option(). +** +** The -lldap implementation is not thread-safe. +** +** The -lldap_r implementation is: +** LDAP_API_FEATURE_THREAD_SAFE (basic thread safety) +** but also be: +** LDAP_API_FEATURE_SESSION_THREAD_SAFE +** LDAP_API_FEATURE_OPERATION_THREAD_SAFE +** +** The preprocessor flag LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE +** can be used to determine if -lldap_r is available at compile +** time. You must define LDAP_THREAD_SAFE if and only if you +** link with -lldap_r. +** +** If you fail to define LDAP_THREAD_SAFE when linking with +** -lldap_r or define LDAP_THREAD_SAFE when linking with -lldap, +** provided header definations and declarations may be incorrect. +** + */ + +func (self *Ldap) IsThreadSafe() bool { + // fmt.Println("IsThreadSafe()") + // opt, err := self.GetOption(LDAP_API_FEATURE_THREAD_SAFE) ; fmt.Println(opt, err) + // opt, err = self.GetOption(LDAP_THREAD_SAFE) ; fmt.Println(opt, err) + // opt, err = self.GetOption(LDAP_API_FEATURE_X_OPENLDAP_THREAD_SAFE) ; fmt.Println(opt, err) + + //FIXME: need to implement LDAP::GetOption(LDAP_OPT_API_FEATURE_INFO) + return false +} + +func ErrorToString(err int) string { + + // API: char * ldap_err2string (int err ) + result := C.GoString(C.to_charptr(unsafe.Pointer(C.ldap_err2string(C.int(err))))) + return result +} + +func (self *Ldap) Errno() int { + opt, _ := self.GetOption(LDAP_OPT_ERROR_NUMBER) + return opt +} diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/results.go b/Godeps/_workspace/src/github.com/mqu/openldap/results.go new file mode 100644 index 000000000..14460b848 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/results.go @@ -0,0 +1,306 @@ +/* + * + * Copyright (C) 2012 - Marc Quinton. + * + * Use of this source code is governed by the MIT Licence : + * http://opensource.org/licenses/mit-license.php + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +package openldap + +/*#include +#include +#include +#include + +int _berval_get_len(struct berval **ber, int i){ + return ber[i]->bv_len; +} + +char* _berval_get_value(struct berval **ber, int i){ + return ber[i]->bv_val; +} + +*/ +// #cgo CFLAGS: -DLDAP_DEPRECATED=1 +// #cgo linux CFLAGS: -DLINUX=1 +// #cgo LDFLAGS: -lldap -llber +import "C" + +import ( + "errors" + "fmt" + "unsafe" +) + +// ------------------------------------------ RESULTS methods --------------------------------------------- +/* + + openldap C API : + + int ldap_count_messages( LDAP *ld, LdapMessage *result ) + LdapMessage *ldap_first_message( LDAP *ld, LdapMessage *result ) + LdapMessage *ldap_next_message ( LDAP *ld, LdapMessage *message ) + + int ldap_count_entries( LDAP *ld, LdapMessage *result ) + LdapMessage *ldap_first_entry( LDAP *ld, LdapMessage *result ) + LdapMessage *ldap_next_entry ( LDAP *ld, LdapMessage *entry ) + + char *ldap_first_attribute(LDAP *ld, LdapMessage *entry, BerElement **berptr ) + char *ldap_next_attribute (LDAP *ld, LdapMessage *entry, BerElement *ber ) + + char **ldap_get_values(LDAP *ld, LdapMessage *entry, char *attr) + struct berval **ldap_get_values_len(LDAP *ld, LdapMessage *entry,char *attr) + + int ldap_count_values(char **vals) + int ldap_count_values_len(struct berval **vals) + void ldap_value_free(char **vals) + void ldap_value_free_len(struct berval **vals) + +*/ + +func (self *LdapMessage) Count() int { + // API : int ldap_count_messages(LDAP *ld, LDAPMessage *chain ) + // err : (count = -1) + count := int(C.ldap_count_messages(self.ldap.conn, self.msg)) + if count == -1 { + panic("LDAP::Count() (ldap_count_messages) error (-1)") + } + return count + +} + +func (self *LdapMessage) FirstMessage() *LdapMessage { + + var msg *C.LDAPMessage + msg = C.ldap_first_message(self.ldap.conn, self.msg) + if msg == nil { + return nil + } + _msg := new(LdapMessage) + _msg.ldap = self.ldap + _msg.errno = 0 + _msg.msg = msg + return _msg +} + +func (self *LdapMessage) NextMessage() *LdapMessage { + var msg *C.LDAPMessage + msg = C.ldap_next_message(self.ldap.conn, self.msg) + + if msg == nil { + return nil + } + _msg := new(LdapMessage) + _msg.ldap = self.ldap + _msg.errno = 0 + _msg.msg = msg + return _msg +} + +/* an alias to ldap_count_message() ? */ +func (self *LdapEntry) CountEntries() int { + // API : int ldap_count_messages(LDAP *ld, LDAPMessage *chain ) + // err : (count = -1) + return int(C.ldap_count_entries(self.ldap.conn, self.entry)) +} + +func (self *LdapMessage) FirstEntry() *LdapEntry { + + var msg *C.LDAPMessage + // API: LdapMessage *ldap_first_entry( LDAP *ld, LdapMessage *result ) + msg = C.ldap_first_entry(self.ldap.conn, self.msg) + if msg == nil { + return nil + } + _msg := new(LdapEntry) + _msg.ldap = self.ldap + _msg.errno = 0 + _msg.entry = msg + return _msg +} + +func (self *LdapEntry) NextEntry() *LdapEntry { + var msg *C.LDAPMessage + // API: LdapMessage *ldap_next_entry ( LDAP *ld, LdapMessage *entry ) + msg = C.ldap_next_entry(self.ldap.conn, self.entry) + + if msg == nil { + return nil + } + _msg := new(LdapEntry) + _msg.ldap = self.ldap + _msg.errno = 0 + _msg.entry = msg + return _msg +} + +func (self *LdapEntry) FirstAttribute() (string, error) { + + var ber *C.BerElement + + // API: char *ldap_first_attribute(LDAP *ld, LdapMessage *entry, BerElement **berptr ) + rv := C.ldap_first_attribute(self.ldap.conn, self.entry, &ber) + + if rv == nil { + // error + return "", nil + } + self.ber = ber + return C.GoString(rv), nil +} + +func (self *LdapEntry) NextAttribute() (string, error) { + + // API: char *ldap_next_attribute (LDAP *ld, LdapMessage *entry, BerElement *ber ) + rv := C.ldap_next_attribute(self.ldap.conn, self.entry, self.ber) + + if rv == nil { + // error + return "", nil + } + return C.GoString(rv), nil +} + +// private func +func sptr(p uintptr) *C.char { + return *(**C.char)(unsafe.Pointer(p)) +} + +// private func used to convert null terminated char*[] to go []string +func cstrings_array(x **C.char) []string { + var s []string + for p := uintptr(unsafe.Pointer(x)); sptr(p) != nil; p += unsafe.Sizeof(uintptr(0)) { + s = append(s, C.GoString(sptr(p))) + } + return s +} + +// GetValues() return an array of string containing values for LDAP attribute "attr". +// Binary data are supported. +func (self *LdapEntry) GetValues(attr string) []string { + var s []string + + _attr := C.CString(attr) + defer C.free(unsafe.Pointer(_attr)) + + var bv **C.struct_berval + + //API: struct berval **ldap_get_values_len(LDAP *ld, LDAPMessage *entry, char *attr) + bv = C.ldap_get_values_len(self.ldap.conn, self.entry, _attr) + + var i int + count := int(C.ldap_count_values_len(bv)) + + for i = 0 ; i < count; i++ { + s = append(s, C.GoStringN(C._berval_get_value(bv, C.int(i)), C._berval_get_len(bv, C.int(i)))) + } + + // free allocated array (bv) + C.ldap_value_free_len(bv) + + return s +} + +// ------------------------------------------------ RESULTS ----------------------------------------------- +/* + int ldap_result ( LDAP *ld, int msgid, int all, struct timeval *timeout, LdapMessage **result ); + int ldap_msgfree( LdapMessage *msg ); + int ldap_msgtype( LdapMessage *msg ); + int ldap_msgid ( LdapMessage *msg ); + +*/ + +// Result() +// take care to free LdapMessage result with MsgFree() +// +func (self *Ldap) Result() (*LdapMessage, error) { + + var msgid int = 1 + var all int = 1 + + var tv C.struct_timeval + tv.tv_sec = 30 + + var msg *C.LDAPMessage + + // API: int ldap_result( LDAP *ld, int msgid, int all, struct timeval *timeout, LDAPMessage **result ); + rv := C.ldap_result(self.conn, C.int(msgid), C.int(all), &tv, &msg) + + if rv != LDAP_OPT_SUCCESS { + return nil, errors.New(fmt.Sprintf("LDAP::Result() error : %d (%s)", rv, ErrorToString(int(rv)))) + } + + _msg := new(LdapMessage) + _msg.ldap = self + _msg.errno = int(rv) + _msg.msg = msg + + return _msg, nil +} + +// MsgFree() is used to free LDAP::Result() allocated data +// +// returns -1 on error. +// +func (self *LdapMessage) MsgFree() int{ + if self.msg != nil { + rv := C.ldap_msgfree(self.msg) + self.msg = nil + return int(rv) + } + return -1 +} + + +// ---------------------------------------- DN Methods --------------------------------------------------- +/* + + char *ldap_get_dn( LDAP *ld, LdapMessage *entry) + int ldap_str2dn( const char *str, LDAPDN *dn, unsigned flags) + void ldap_dnfree( LDAPDN dn) + int ldap_dn2str( LDAPDN dn, char **str, unsigned flags) + + char **ldap_explode_dn( const char *dn, int notypes) + char **ldap_explode_rdn( const char *rdn, int notypes) + + char *ldap_dn2ufn ( const char * dn ) + char *ldap_dn2dcedn( const char * dn ) + char *ldap_dcedn2dn( const char * dn ) + char *ldap_dn2ad_canonical( const char * dn ) + +*/ + +// GetDn() return the DN (Distinguish Name) for self LdapEntry +func (self *LdapEntry) GetDn() string { + // API: char *ldap_get_dn( LDAP *ld, LDAPMessage *entry ) + rv := C.ldap_get_dn(self.ldap.conn, self.entry) + defer C.free(unsafe.Pointer(rv)) + + if rv == nil { + err := self.ldap.Errno() + panic(fmt.Sprintf("LDAP::GetDn() error %d (%s)", err, ErrorToString(err))) + } + + val := C.GoString(rv) + return val +} diff --git a/Godeps/_workspace/src/github.com/mqu/openldap/types.go b/Godeps/_workspace/src/github.com/mqu/openldap/types.go new file mode 100644 index 000000000..d78628e65 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mqu/openldap/types.go @@ -0,0 +1,76 @@ +/* + * + * Copyright (C) 2012 - Marc Quinton. + * + * Use of this source code is governed by the MIT Licence : + * http://opensource.org/licenses/mit-license.php + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +package openldap + +/* +# include + +*/ +// #cgo CFLAGS: -DLDAP_DEPRECATED=1 +// #cgo linux CFLAGS: -DLINUX=1 +// #cgo LDFLAGS: -lldap -llber +import "C" + +type Ldap struct { + conn *C.LDAP +} + +type LdapMessage struct { + ldap *Ldap + // conn *C.LDAP + msg *C.LDAPMessage + errno int +} + +type LdapAttribute struct{ + name string + values []string +} + + +type LdapEntry struct { + ldap *Ldap + // conn *C.LDAP + entry *C.LDAPMessage + errno int + ber *C.BerElement + + dn string + values []LdapAttribute +} + +type LdapSearchResult struct{ + ldap *Ldap + + scope int + filter string + base string + attributes []string + + entries []LdapEntry +} diff --git a/Godeps/_workspace/src/golang.org/x/blog/LICENSE b/Godeps/_workspace/src/golang.org/x/blog/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/blog/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/blog/PATENTS b/Godeps/_workspace/src/golang.org/x/blog/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/blog/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/blog/content/third-party-libraries-goprotobuf-and.article b/Godeps/_workspace/src/golang.org/x/blog/content/third-party-libraries-goprotobuf-and.article new file mode 100644 index 000000000..70791b885 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/blog/content/third-party-libraries-goprotobuf-and.article @@ -0,0 +1,26 @@ +Third-party libraries: goprotobuf and beyond +20 Apr 2010 +Tags: protobuf, community + +Andrew Gerrand + +* Introduction + +On March 24, Rob Pike announced [[http://code.google.com/p/goprotobuf/][goprotobuf]], the Go bindings of Google's data interchange format [[http://code.google.com/apis/protocolbuffers/docs/overview.html][Protocol Buffers]], called protobufs for short. With this announcement, Go joins C++, Java, and Python as languages providing official protobuf implementations. This marks an important milestone in enabling the interoperability between existing systems and those built in Go. + +The goprotobuf project consists of two parts: a 'protocol compiler plugin' that generates Go source files that, once compiled, can access and manage protocol buffers; and a Go package that implements run-time support for encoding (marshaling), decoding (unmarshaling), and accessing protocol buffers. + +To use goprotobuf, you first need to have both Go and [[http://code.google.com/p/protobuf/][protobuf]] installed. You can then install the 'proto' package with [[http://golang.org/cmd/goinstall/][goinstall]]: + + goinstall goprotobuf.googlecode.com/hg/proto + +And then install the protobuf compiler plugin: + + cd $GOROOT/src/pkg/goprotobuf.googlecode.com/hg/compiler + make install + +For more detail see the project's [[http://code.google.com/p/goprotobuf/source/browse/README][README]] file. + +This is one of a growing list of third-party [[http://godashboard.appspot.com/package][Go projects]]. Since the announcement of goprotobuf, the X Go bindings have been spun off from the standard library to the [[http://code.google.com/p/x-go-binding/][x-go-binding]] project, and work has begun on a [[http://www.freetype.org/][Freetype]] port, [[http://code.google.com/p/freetype-go/][freetype-go]]. Other popular third-party projects include the lightweight web framework [[http://github.com/hoisie/web.go][web.go]], and the Go GTK bindings [[http://github.com/mattn/go-gtk][gtk-go]]. + +We wish to encourage the development of other useful packages by the open source community. If you're working on something, don't keep it to yourself - let us know through our mailing list [[http://groups.google.com/group/golang-nuts][golang-nuts]]. diff --git a/Godeps/_workspace/src/golang.org/x/crypto/LICENSE b/Godeps/_workspace/src/golang.org/x/crypto/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/crypto/PATENTS b/Godeps/_workspace/src/golang.org/x/crypto/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go b/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 000000000..c02b4d5a7 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/Godeps/_workspace/src/golang.org/x/exp/LICENSE b/Godeps/_workspace/src/golang.org/x/exp/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/exp/PATENTS b/Godeps/_workspace/src/golang.org/x/exp/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/image/LICENSE b/Godeps/_workspace/src/golang.org/x/image/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/image/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/image/PATENTS b/Godeps/_workspace/src/golang.org/x/image/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/image/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/mobile/LICENSE b/Godeps/_workspace/src/golang.org/x/mobile/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/mobile/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/mobile/PATENTS b/Godeps/_workspace/src/golang.org/x/mobile/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/mobile/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/net/LICENSE b/Godeps/_workspace/src/golang.org/x/net/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/net/PATENTS b/Godeps/_workspace/src/golang.org/x/net/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/net/context/context.go b/Godeps/_workspace/src/golang.org/x/net/context/context.go new file mode 100644 index 000000000..66aff7cb4 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/net/context/context.go @@ -0,0 +1,432 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // DoSomething calls DoSomethingSlow and returns as soon as + // // it returns or ctx.Done is closed. + // func DoSomething(ctx context.Context) (Result, error) { + // c := make(chan Result, 1) + // go func() { c <- DoSomethingSlow(ctx) }() + // select { + // case res := <-c: + // return res, nil + // case <-ctx.Done(): + // return nil, ctx.Err() + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it's is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + if p, ok := parentCancelCtx(c.Context); ok { + p.mu.Lock() + if p.children != nil { + delete(p.children, c) + } + p.mu.Unlock() + } + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with the deadline +// timer, so code should call cancel as soon as the operations running in this +// Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(removeFromParent, err) + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with the deadline +// timer, so code should call cancel as soon as the operations running in this +// Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/Godeps/_workspace/src/golang.org/x/sys/LICENSE b/Godeps/_workspace/src/golang.org/x/sys/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/sys/PATENTS b/Godeps/_workspace/src/golang.org/x/sys/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/talks/LICENSE b/Godeps/_workspace/src/golang.org/x/talks/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/talks/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/talks/PATENTS b/Godeps/_workspace/src/golang.org/x/talks/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/talks/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/text/LICENSE b/Godeps/_workspace/src/golang.org/x/text/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/text/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/text/PATENTS b/Godeps/_workspace/src/golang.org/x/text/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/text/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/Godeps/_workspace/src/golang.org/x/tools/LICENSE b/Godeps/_workspace/src/golang.org/x/tools/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/golang.org/x/tools/PATENTS b/Godeps/_workspace/src/golang.org/x/tools/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/LICENSE b/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 000000000..292a4e09d --- /dev/null +++ b/README.md @@ -0,0 +1,25 @@ +# Harbor + +Harbor is a project to provide enterprise capabilities for Docker Registry V2. It wraps the registry server to provide authorization and user interface. + +### Features +* **Role Based Access Control**: Users and docker repositories are organized via "projects", a user can have differernt permission for images under a namespace. +* **Convenient user interface**: User can easily browse, search docker repositories, manage projects/namepaces. +* **LDAP support**: harbor can easily integrate to the existing ldap of entreprise. +* **Audting**: All the access to the repositories hosted on Harbor are immediately recorded and can be used for auditing purpose. + +### Try it +Harbor is self contained and can be easily deployed via docker-compose. +```sh +$ cd Deploy +#make update to the parameters in ./prepare +$ ./prepare +Generated configuration file: ./config/ui/env +Generated configuration file: ./config/ui/app.conf +Generated configuration file: ./config/registry/config.yml +$ docker-compose up +``` + +### License +Harbor is available under the [Apache 2 license](LICENSE). + diff --git a/api/base.go b/api/base.go new file mode 100644 index 000000000..92578bb78 --- /dev/null +++ b/api/base.go @@ -0,0 +1,65 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "encoding/json" + "net/http" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +type BaseAPI struct { + beego.Controller +} + +func (b *BaseAPI) Render() error { + return nil +} + +func (b *BaseAPI) RenderError(code int, text string) { + http.Error(b.Ctx.ResponseWriter, text, code) +} + +func (b *BaseAPI) DecodeJsonReq(v interface{}) { + err := json.Unmarshal(b.Ctx.Input.CopyBody(1<<32), v) + if err != nil { + beego.Error("Error while decoding the json request:", err) + b.CustomAbort(400, "Invalid json request") + } +} + +func (b *BaseAPI) ValidateUser() int { + + sessionUserId := b.GetSession("userId") + if sessionUserId == nil { + beego.Warning("No user id in session, canceling request") + b.CustomAbort(401, "") + } + userId := sessionUserId.(int) + u, err := dao.GetUser(models.User{UserId: userId}) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + b.CustomAbort(500, "Internal error.") + } + if u == nil { + beego.Warning("User was deleted already, user id: ", userId, " canceling request.") + b.CustomAbort(401, "") + } + return userId +} diff --git a/api/project.go b/api/project.go new file mode 100644 index 000000000..9f92befa6 --- /dev/null +++ b/api/project.go @@ -0,0 +1,195 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "fmt" + "log" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "strconv" + "time" + + "github.com/astaxie/beego" +) + +type ProjectAPI struct { + BaseAPI + userId int + projectId int64 +} + +type projectReq struct { + ProjectName string `json:"project_name"` + Public bool `json:"public"` +} + +func (p *ProjectAPI) Prepare() { + p.userId = p.ValidateUser() + id_str := p.Ctx.Input.Param(":id") + if len(id_str) > 0 { + var err error + p.projectId, err = strconv.ParseInt(id_str, 10, 64) + if err != nil { + log.Printf("Error parsing project id: %s, error: %v", id_str, err) + p.CustomAbort(400, "invalid project id") + } + proj, err := dao.GetProjectById(models.Project{ProjectId: p.projectId}) + if err != nil { + log.Printf("Error occurred in GetProjectById: %v", err) + p.CustomAbort(500, "Internal error.") + } + if proj == nil { + p.CustomAbort(404, fmt.Sprintf("project does not exist, id: %v", p.projectId)) + } + } +} + +func (p *ProjectAPI) Post() { + var req projectReq + var public int + p.DecodeJsonReq(&req) + if req.Public { + public = 1 + } + projectName := req.ProjectName + exist, err := dao.ProjectExists(projectName) + if err != nil { + beego.Error("Error happened checking project existence in db:", err, ", project name:", projectName) + } + if exist { + p.RenderError(409, "") + return + } + project := models.Project{OwnerId: p.userId, Name: projectName, CreationTime: time.Now(), Public: public} + dao.AddProject(project) +} + +func (p *ProjectAPI) Head() { + projectName := p.GetString("project_name") + result, err := dao.ProjectExists(projectName) + if err != nil { + p.RenderError(500, "Error while communicating with DB") + return + } + if !result { + p.RenderError(404, "") + return + } +} + +func (p *ProjectAPI) Get() { + queryProject := models.Project{UserId: p.userId} + projectName := p.GetString("project_name") + if len(projectName) > 0 { + queryProject.Name = "%" + projectName + "%" + } + public, _ := p.GetInt("is_public") + queryProject.Public = public + + projectList, err := dao.QueryProject(queryProject) + if err != nil { + beego.Error("Error occurred in QueryProject:", err) + p.CustomAbort(500, "Internal error.") + } + for i := 0; i < len(projectList); i++ { + if isProjectAdmin(p.userId, projectList[i].ProjectId) { + projectList[i].Togglable = true + } + } + p.Data["json"] = projectList + p.ServeJSON() +} + +func (p *ProjectAPI) Put() { + var req projectReq + var public int + + projectId, err := strconv.ParseInt(p.Ctx.Input.Param(":id"), 10, 64) + if err != nil { + beego.Error("Error parsing project id:", projectId, ", error: ", err) + p.RenderError(400, "invalid project id") + return + } + + p.DecodeJsonReq(&req) + if req.Public { + public = 1 + } + + proj, err := dao.GetProjectById(models.Project{ProjectId: projectId}) + if err != nil { + beego.Error("Error occurred in GetProjectById:", err) + p.CustomAbort(500, "Internal error.") + } + if proj == nil { + p.RenderError(404, "") + return + } + if !isProjectAdmin(p.userId, projectId) { + beego.Warning("Current user, id:", p.userId, ", does not have project admin role for project, id:", projectId) + p.RenderError(403, "") + return + } + err = dao.ToggleProjectPublicity(p.projectId, public) + if err != nil { + beego.Error("Error while updating project, project id:", projectId, ", error:", err) + p.RenderError(500, "Failed to update project") + } +} + +func (p *ProjectAPI) ListAccessLog() { + + query := models.AccessLog{ProjectId: p.projectId} + accessLogList, err := dao.GetAccessLogs(query) + if err != nil { + log.Printf("Error occurred in GetAccessLogs: %v", err) + p.CustomAbort(500, "Internal error.") + } + p.Data["json"] = accessLogList + p.ServeJSON() +} + +func (p *ProjectAPI) FilterAccessLog() { + + var filter models.AccessLog + p.DecodeJsonReq(&filter) + + username := filter.Username + keywords := filter.Keywords + beginTime := filter.BeginTime + endTime := filter.EndTime + + query := models.AccessLog{ProjectId: p.projectId, Username: "%" + username + "%", Keywords: keywords, BeginTime: beginTime, EndTime: endTime} + accessLogList, err := dao.GetAccessLogs(query) + if err != nil { + log.Printf("Error occurred in GetAccessLogs: %v", err) + p.CustomAbort(500, "Internal error.") + } + p.Data["json"] = accessLogList + p.ServeJSON() +} + +func isProjectAdmin(userId int, pid int64) bool { + userQuery := models.User{UserId: userId, RoleId: models.PROJECTADMIN} + rolelist, err := dao.GetUserProjectRoles(userQuery, pid) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err, ", returning false") + return false + } + return len(rolelist) > 0 +} diff --git a/api/project_member.go b/api/project_member.go new file mode 100644 index 000000000..b387958e7 --- /dev/null +++ b/api/project_member.go @@ -0,0 +1,212 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" + + "strconv" +) + +type ProjectMemberAPI struct { + BaseAPI + memberId int + currentUserId int + project *models.Project +} + +type memberReq struct { + Username string `json:"user_name"` + UserId int `json:"user_id"` + Roles []int `json:"roles"` +} + +func (pma *ProjectMemberAPI) Prepare() { + pid, err := strconv.ParseInt(pma.Ctx.Input.Param(":pid"), 10, 64) + if err != nil { + beego.Error("Error parsing project id:", pid, ", error:", err) + pma.CustomAbort(400, "invalid project Id") + return + } + p, err := dao.GetProjectById(models.Project{ProjectId: pid}) + if err != nil { + beego.Error("Error occurred in GetProjectById:", err) + pma.CustomAbort(500, "Internal error.") + } + + if p == nil { + beego.Warning("Project with id:", pid, "does not exist.") + pma.CustomAbort(404, "Project does not exist") + } + pma.project = p + pma.currentUserId = pma.ValidateUser() + mid := pma.Ctx.Input.Param(":mid") + if mid == "current" { + pma.memberId = pma.currentUserId + } else if len(mid) == 0 { + pma.memberId = 0 + } else if len(mid) > 0 { + memberId, err := strconv.Atoi(mid) + if err != nil { + beego.Error("Invalid member Id, error:", err) + pma.CustomAbort(400, "Invalid member id") + } + pma.memberId = memberId + } +} + +func (pma *ProjectMemberAPI) Get() { + pid := pma.project.ProjectId + if !CheckProjectPermission(pma.currentUserId, pid) { + beego.Warning("Current user, user id :", pma.currentUserId, "does not have permission for project, id:", pid) + pma.RenderError(403, "") + return + } + if pma.memberId == 0 { //member id not set return list of the members + queryProject := models.Project{ProjectId: pid} + username := pma.GetString("username") + queryUser := models.User{Username: "%" + username + "%"} + userList, err := dao.GetUserByProject(queryProject, queryUser) + if err != nil { + beego.Error("Failed to query database for member list, error:", err) + pma.RenderError(500, "Internal Server Error") + return + } + pma.Data["json"] = userList + } else { //return detail of a member + roleList, err := dao.GetUserProjectRoles(models.User{UserId: pma.memberId}, pid) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + pma.CustomAbort(500, "Internal error.") + } + //return empty role list to indicate if a user is not a member + result := make(map[string]interface{}) + user, err := dao.GetUser(models.User{UserId: pma.memberId}) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + pma.CustomAbort(500, "Internal error.") + } + result["user_name"] = user.Username + result["user_id"] = pma.memberId + result["roles"] = roleList + pma.Data["json"] = result + } + pma.ServeJSON() +} + +func (pma *ProjectMemberAPI) Post() { + pid := pma.project.ProjectId + userQuery := models.User{UserId: pma.currentUserId, RoleId: models.PROJECTADMIN} + rolelist, err := dao.GetUserProjectRoles(userQuery, pid) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + pma.CustomAbort(500, "Internal error.") + } + if len(rolelist) == 0 { + beego.Warning("Current user, id:", pma.currentUserId, "does not have project admin role for project, id:", pid) + pma.RenderError(403, "") + return + } + var req memberReq + pma.DecodeJsonReq(&req) + username := req.Username + userId := CheckUserExists(username) + if userId <= 0 { + beego.Warning("User does not exist, user name:", username) + pma.RenderError(404, "User does not exist") + return + } + rolelist, err = dao.GetUserProjectRoles(models.User{UserId: userId}, pid) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + pma.CustomAbort(500, "Internal error.") + } + if len(rolelist) > 0 { + beego.Warning("user is already added to project, user id:", userId, ", project id:", pid) + pma.RenderError(409, "user is ready in project") + return + } + + for _, rid := range req.Roles { + err = dao.AddUserProjectRole(userId, pid, int(rid)) + if err != nil { + beego.Error("Failed to update DB to add project user role, project id:", pid, ", user id:", userId, ", role id:", rid) + pma.RenderError(500, "Failed to update data in database") + return + } + } +} + +func (pma *ProjectMemberAPI) Put() { + pid := pma.project.ProjectId + mid := pma.memberId + userQuery := models.User{UserId: pma.currentUserId, RoleId: models.PROJECTADMIN} + rolelist, err := dao.GetUserProjectRoles(userQuery, pid) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + pma.CustomAbort(500, "Internal error.") + } + if len(rolelist) == 0 { + beego.Warning("Current user, id:", pma.currentUserId, ", does not have project admin role for project, id:", pid) + pma.RenderError(403, "") + return + } + var req memberReq + pma.DecodeJsonReq(&req) + roleList, err := dao.GetUserProjectRoles(models.User{UserId: mid}, pid) + if len(roleList) == 0 { + beego.Warning("User is not in project, user id:", mid, ", project id:", pid) + pma.RenderError(404, "user not exist in project") + return + } + //TODO: delete and insert should in one transaction + //delete user project role record for the given user + err = dao.DeleteUserProjectRoles(mid, pid) + if err != nil { + beego.Error("Failed to delete project roles for user, user id:", mid, ", project id: ", pid, ", error: ", err) + pma.RenderError(500, "Failed to update data in DB") + return + } + //insert roles in request + for _, rid := range req.Roles { + err = dao.AddUserProjectRole(mid, pid, int(rid)) + if err != nil { + beego.Error("Failed to update DB to add project user role, project id:", pid, ", user id:", mid, ", role id:", rid) + pma.RenderError(500, "Failed to update data in database") + return + } + } +} + +func (pma *ProjectMemberAPI) Delete() { + pid := pma.project.ProjectId + mid := pma.memberId + userQuery := models.User{UserId: pma.currentUserId, RoleId: models.PROJECTADMIN} + rolelist, err := dao.GetUserProjectRoles(userQuery, pid) + if len(rolelist) == 0 { + beego.Warning("Current user, id:", pma.currentUserId, ", does not have project admin role for project, id:", pid) + pma.RenderError(403, "") + return + } + err = dao.DeleteUserProjectRoles(mid, pid) + if err != nil { + beego.Error("Failed to delete project roles for user, user id:", mid, ", project id:", pid, ", error:", err) + pma.RenderError(500, "Failed to update data in DB") + return + } +} diff --git a/api/repository.go b/api/repository.go new file mode 100644 index 000000000..284a73515 --- /dev/null +++ b/api/repository.go @@ -0,0 +1,175 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + svc_utils "github.com/vmware/harbor/service/utils" + + "github.com/astaxie/beego" +) + +//For repostiories, we won't check the session in this API due to search functionality, querying manifest will be contorlled by +//the security of registry + +type RepositoryAPI struct { + BaseAPI + userId int + username string +} + +func (ra *RepositoryAPI) Prepare() { + userId, ok := ra.GetSession("userId").(int) + if !ok { + ra.userId = dao.NON_EXIST_USER_ID + } else { + ra.userId = userId + } + username, ok := ra.GetSession("username").(string) + if !ok { + beego.Warning("failed to get username from session") + ra.username = "" + } else { + ra.username = username + } +} + +func (ra *RepositoryAPI) Get() { + projectId, err0 := ra.GetInt64("project_id") + if err0 != nil { + beego.Error("Failed to get project id, error:", err0) + ra.RenderError(400, "Invalid project id") + return + } + projectQuery := models.Project{ProjectId: projectId} + p, err := dao.GetProjectById(projectQuery) + if err != nil { + beego.Error("Error occurred in GetProjectById:", err) + ra.CustomAbort(500, "Internal error.") + } + if p == nil { + beego.Warning("Project with Id:", projectId, ", does not exist", projectId) + ra.RenderError(404, "") + return + } + if p.Public == 0 && !CheckProjectPermission(ra.userId, projectId) { + ra.RenderError(403, "") + return + } + repoList, err := svc_utils.GetRepoFromCache() + if err != nil { + beego.Error("Failed to get repo from cache, error:", err) + ra.RenderError(500, "internal sever error") + } + projectName := p.Name + q := ra.GetString("q") + var resp []string + if len(q) > 0 { + for _, r := range repoList { + if strings.Contains(r, "/") && strings.Contains(r[strings.LastIndex(r, "/")+1:], q) && r[0:strings.LastIndex(r, "/")] == projectName { + resp = append(resp, r) + } + } + ra.Data["json"] = resp + } else if len(projectName) > 0 { + for _, r := range repoList { + if strings.Contains(r, "/") && r[0:strings.LastIndex(r, "/")] == projectName { + resp = append(resp, r) + } + } + ra.Data["json"] = resp + } else { + ra.Data["json"] = repoList + } + ra.ServeJSON() +} + +type Tag struct { + Name string `json: "name"` + Tags []string `json:"tags"` +} + +type HistroyItem struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type Manifest struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + SchemaVersion int `json:"schemaVersion"` + History []HistroyItem `json:"history"` +} + +func (ra *RepositoryAPI) GetTags() { + + var tags []string + + repoName := ra.GetString("repo_name") + result, err := svc_utils.RegistryApiGet(svc_utils.BuildRegistryUrl(repoName, "tags", "list"), ra.username) + if err != nil { + beego.Error("Failed to get repo tags, repo name:", repoName, ", error: ", err) + ra.RenderError(500, "Failed to get repo tags") + } else { + t := Tag{} + json.Unmarshal(result, &t) + tags = t.Tags + } + ra.Data["json"] = tags + ra.ServeJSON() +} + +func (ra *RepositoryAPI) GetManifests() { + repoName := ra.GetString("repo_name") + tag := ra.GetString("tag") + + item := models.RepoItem{} + + result, err := svc_utils.RegistryApiGet(svc_utils.BuildRegistryUrl(repoName, "manifests", tag), ra.username) + if err != nil { + beego.Error("Failed to get manifests for repo, repo name:", repoName, ", tag:", tag, ", error:", err) + ra.RenderError(500, "Internal Server Error") + return + } else { + mani := Manifest{} + err = json.Unmarshal(result, &mani) + if err != nil { + beego.Error("Failed to decode json from response for manifests, repo name:", repoName, ", tag:", tag, ", error:", err) + ra.RenderError(500, "Internal Server Error") + return + } else { + v1Compatibility := mani.History[0].V1Compatibility + + err = json.Unmarshal([]byte(v1Compatibility), &item) + if err != nil { + beego.Error("Failed to decode V1 field for repo, repo name:", repoName, ", tag:", tag, ", error:", err) + ra.RenderError(500, "Internal Server Error") + return + } else { + item.CreatedStr = item.Created.Format("2006-01-02 15:04:05") + item.DurationDays = strconv.Itoa(int(time.Since(item.Created).Hours()/24)) + " days" + } + } + } + + ra.Data["json"] = item + ra.ServeJSON() +} diff --git a/api/search.go b/api/search.go new file mode 100644 index 000000000..805647596 --- /dev/null +++ b/api/search.go @@ -0,0 +1,103 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "sort" + "strings" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + svc_utils "github.com/vmware/harbor/service/utils" + "github.com/vmware/harbor/utils" + + "github.com/astaxie/beego" +) + +type SearchAPI struct { + BaseAPI +} + +type SearchResult struct { + Project []map[string]interface{} `json:"project"` + Repository []map[string]interface{} `json:"repository"` +} + +func (n *SearchAPI) Get() { + userId, ok := n.GetSession("userId").(int) + if !ok { + userId = dao.NON_EXIST_USER_ID + } + keyword := n.GetString("q") + projects, err := dao.QueryRelevantProjects(userId) + if err != nil { + beego.Error("Failed to get projects of user id:", userId, ", error:", err) + n.CustomAbort(500, "Failed to get project search result") + } + projectSorter := &utils.ProjectSorter{Projects: projects} + sort.Sort(projectSorter) + projectResult := []map[string]interface{}{} + for _, p := range projects { + match := true + if len(keyword) > 0 && !strings.Contains(p.Name, keyword) { + match = false + } + if match { + entry := make(map[string]interface{}) + entry["id"] = p.ProjectId + entry["name"] = p.Name + entry["public"] = p.Public + projectResult = append(projectResult, entry) + } + } + + repositories, err2 := svc_utils.GetRepoFromCache() + if err2 != nil { + beego.Error("Failed to get repos from cache, error :", err2) + n.CustomAbort(500, "Failed to get repositories search result") + } + sort.Strings(repositories) + repositoryResult := filterRepositories(repositories, projects, keyword) + result := &SearchResult{Project: projectResult, Repository: repositoryResult} + n.Data["json"] = result + n.ServeJSON() +} + +func filterRepositories(repositories []string, projects []models.Project, keyword string) []map[string]interface{} { + var i, j int = 0, 0 + result := []map[string]interface{}{} + for i < len(repositories) && j < len(projects) { + r := &utils.Repository{Name: repositories[i]} + d := strings.Compare(r.GetProject(), projects[j].Name) + if d < 0 { + i++ + continue + } else if d == 0 { + i++ + if len(keyword) != 0 && !strings.Contains(r.Name, keyword) { + continue + } + entry := make(map[string]interface{}) + entry["repository_name"] = r.Name + entry["project_name"] = projects[j].Name + entry["project_id"] = projects[j].ProjectId + entry["project_public"] = projects[j].Public + result = append(result, entry) + } else { + j++ + } + } + return result +} diff --git a/api/user.go b/api/user.go new file mode 100644 index 000000000..2df9bf454 --- /dev/null +++ b/api/user.go @@ -0,0 +1,132 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "strconv" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +type UserAPI struct { + BaseAPI + currentUid int + userId int +} + +func (ua *UserAPI) Prepare() { + + ua.currentUid = ua.ValidateUser() + id := ua.Ctx.Input.Param(":id") + if id == "current" { + ua.userId = ua.currentUid + } else if len(id) > 0 { + var err error + ua.userId, err = strconv.Atoi(id) + if err != nil { + beego.Error("Invalid user id, error:", err) + ua.CustomAbort(400, "Invalid user Id") + } + userQuery := models.User{UserId: ua.userId} + u, err := dao.GetUser(userQuery) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + ua.CustomAbort(500, "Internal error.") + } + if u == nil { + beego.Error("User with Id:", ua.userId, "does not exist") + ua.CustomAbort(404, "") + } + } +} + +func (ua *UserAPI) Get() { + exist, err := dao.IsAdminRole(ua.currentUid) + if err != nil { + beego.Error("Error occurred in IsAdminRole:", err) + ua.CustomAbort(500, "Internal error.") + } + + if ua.userId == 0 { //list users + if !exist { + beego.Error("Current user, id:", ua.currentUid, ", does not have admin role, can not list users") + ua.RenderError(403, "User does not have admin role") + return + } + username := ua.GetString("username") + userQuery := models.User{} + if len(username) > 0 { + userQuery.Username = "%" + username + "%" + } + userList, err := dao.ListUsers(userQuery) + if err != nil { + beego.Error("Failed to get data from database, error:", err) + ua.RenderError(500, "Failed to query from database") + return + } + ua.Data["json"] = userList + + } else if ua.userId == ua.currentUid || exist { + userQuery := models.User{UserId: ua.userId} + u, err := dao.GetUser(userQuery) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + ua.CustomAbort(500, "Internal error.") + } + ua.Data["json"] = u + } else { + beego.Error("Current user, id:", ua.currentUid, "does not have admin role, can not view other user's detail") + ua.RenderError(403, "User does not have admin role") + return + } + ua.ServeJSON() +} + +func (ua *UserAPI) Put() { //currently only for toggle admin, so no request body + exist, err := dao.IsAdminRole(ua.currentUid) + if err != nil { + beego.Error("Error occurred in IsAdminRole:", err) + ua.CustomAbort(500, "Internal error.") + } + if !exist { + beego.Warning("current user, id:", ua.currentUid, ", does not have admin role, can not update other user's role") + ua.RenderError(403, "User does not have admin role") + return + } + userQuery := models.User{UserId: ua.userId} + dao.ToggleUserAdminRole(userQuery) +} + +func (ua *UserAPI) Delete() { + exist, err := dao.IsAdminRole(ua.currentUid) + if err != nil { + beego.Error("Error occurred in IsAdminRole:", err) + ua.CustomAbort(500, "Internal error.") + } + if !exist { + beego.Warning("current user, id:", ua.currentUid, ", does not have admin role, can not remove user") + ua.RenderError(403, "User does not have admin role") + return + } + err = dao.DeleteUser(ua.userId) + if err != nil { + beego.Error("Failed to delete data from database, error:", err) + ua.RenderError(500, "Failed to delete User") + return + } +} diff --git a/api/utils.go b/api/utils.go new file mode 100644 index 000000000..573c9837c --- /dev/null +++ b/api/utils.go @@ -0,0 +1,51 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package api + +import ( + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +func CheckProjectPermission(userId int, projectId int64) bool { + exist, err := dao.IsAdminRole(userId) + if err != nil { + beego.Error("Error occurred in IsAdminRole:", err) + return false + } + if exist { + return true + } + roleList, err := dao.GetUserProjectRoles(models.User{UserId: userId}, projectId) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + return false + } + return len(roleList) > 0 +} + +func CheckUserExists(name string) int { + u, err := dao.GetUser(models.User{Username: name}) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + return 0 + } + if u != nil { + return u.UserId + } + return 0 +} diff --git a/conf/app.conf b/conf/app.conf new file mode 100644 index 000000000..c80279df4 --- /dev/null +++ b/conf/app.conf @@ -0,0 +1,9 @@ +appname = harbor +runmode = dev + +[lang] +types = en-US|zh-CN +names = en-US|zh-CN + +[dev] +httpport = 80 \ No newline at end of file diff --git a/conf/private_key.pem b/conf/private_key.pem new file mode 100644 index 000000000..6c68cacb3 --- /dev/null +++ b/conf/private_key.pem @@ -0,0 +1,15 @@ +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQClak/4HO7EeLU0w/BhtVENPLOqU0AP2QjVUdg1qhNiDWVrbWx9 +KYHqz5Kn0n2+fxdZo3o7ZY5/2+hhgkKh1z6Kge9XGgune6z4fx2J/X2Se8WsGeQU +TiND8ngSnsCANtYFwW50SbUZPtyf5XjAfKRofZem51OxbxzN3217L/ubKwIDAQAB +AoGBAITMMuNYJwAogCGaZHOs4yMjZoIJT9bpQMQxbsi2f9UqOA/ky0I4foqKloyQ +2k6DLbXTHqBsydgwLgGKWAAiE5xIR2bPMUNSLgjbA2eLly3aOR/0FJ5n09k2EmGg +Am7tLP+6yneXWKVi3HI3NzXriVjWK94WHGGC1b9F+n5CY/2RAkEA1d62OJUNve2k +IY6/b6T0BdssFo3VFcm22vnayEL/wcYrnRfF9Pb5wM4HUUqwVelKTouivXg60GNK +ZKYAx5CtHwJBAMYAEf5u0CQ/8URcwBuMkm0LzK4AM2x1nGs7gIxAEFhu1Z4xPjVe +MtIxuHhDhlLvD760uccmo5yE72QJ1ZrYBHUCQQCAxLZMPRpoB4QyHEOREe1G9V6H +OeBZXPk2wQcEWqqo3gt2a1DqHCXl+2aWgHTJVUxDHHngwFoRDCdHkFeZ0LcbAkAj +T8/luI2WaXD16DS6tQ9IM1qFjbOeHDuRRENgv+wqWVnvpIibq/kUU5m6mRBTqh78 +u+6F/fYf6/VluftGalAhAkAukdMtt+sksq2e7Qw2dRr5GXtXjt+Otjj0NaJENmWk +a7SgAs34EOWtbd0XGYpZFrg134MzQGbweFeEUTj++e8p +-----END RSA PRIVATE KEY----- diff --git a/controllers/base.go b/controllers/base.go new file mode 100644 index 000000000..1f95f830e --- /dev/null +++ b/controllers/base.go @@ -0,0 +1,140 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +import ( + "os" + "strings" + + "github.com/astaxie/beego" + "github.com/beego/i18n" +) + +type CommonController struct { + BaseController +} + +func (c *CommonController) Render() error { + return nil +} + +type BaseController struct { + beego.Controller + i18n.Locale +} + +type langType struct { + Lang string + Name string +} + +const ( + DEFAULT_LANG = "en-US" +) + +var supportLanguages map[string]langType + +func (b *BaseController) Prepare() { + + var lang string = "" + al := b.Ctx.Request.Header.Get("Accept-Language") + + if len(al) > 4 { + al = al[:5] // Only compare first 5 letters. + if i18n.IsExist(al) { + lang = al + } + } + + if _, exist := supportLanguages[lang]; exist == false { //Check if support the request language. + lang = DEFAULT_LANG //Set default language if not supported. + } + + sessionLang := b.GetSession("lang") + if sessionLang != nil { + b.SetSession("Lang", lang) + lang = sessionLang.(string) + } + + curLang := langType{ + Lang: lang, + } + + restLangs := make([]*langType, 0, len(langTypes)-1) + for _, v := range langTypes { + if lang != v.Lang { + restLangs = append(restLangs, v) + } else { + curLang.Name = v.Name + } + } + + // Set language properties. + b.Lang = lang + b.Data["Lang"] = curLang.Lang + b.Data["CurLang"] = curLang.Name + b.Data["RestLangs"] = restLangs + + sessionUserId := b.GetSession("userId") + if sessionUserId != nil { + b.Data["Username"] = b.GetSession("username") + } + authMode := os.Getenv("AUTH_MODE") + if authMode == "" { + authMode = "db_auth" + } + b.Data["AuthMode"] = authMode +} + +func (b *BaseController) ForwardTo(pageTitle string, pageName string) { + b.Layout = "segment/base-layout.tpl" + b.TplName = "segment/base-layout.tpl" + b.Data["PageTitle"] = b.Tr(pageTitle) + b.LayoutSections = make(map[string]string) + b.LayoutSections["HeaderInc"] = "segment/header-include.tpl" + b.LayoutSections["HeaderContent"] = "segment/header-content.tpl" + b.LayoutSections["BodyContent"] = pageName + ".tpl" + b.LayoutSections["ModalDialog"] = "segment/modal-dialog.tpl" + b.LayoutSections["FootContent"] = "segment/foot-content.tpl" +} + +var langTypes []*langType + +func init() { + + beego.AddFuncMap("i18n", i18n.Tr) + + langs := strings.Split(beego.AppConfig.String("lang::types"), "|") + names := strings.Split(beego.AppConfig.String("lang::names"), "|") + + supportLanguages = make(map[string]langType) + + langTypes = make([]*langType, 0, len(langs)) + for i, v := range langs { + t := langType{ + Lang: v, + Name: names[i], + } + langTypes = append(langTypes, &t) + supportLanguages[v] = t + } + + for _, lang := range langs { + if err := i18n.SetMessage(lang, "static/i18n/"+"locale_"+lang+".ini"); err != nil { + beego.Error("Fail to set message file:" + err.Error()) + return + } + } +} diff --git a/controllers/item_detail.go b/controllers/item_detail.go new file mode 100644 index 000000000..391fbc04e --- /dev/null +++ b/controllers/item_detail.go @@ -0,0 +1,109 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +import ( + "net/url" + "os" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +type ItemDetailController struct { + BaseController +} + +var SYS_ADMIN int = 1 +var PROJECT_ADMIN int = 2 +var DEVELOPER int = 3 +var GUEST int = 4 + +func CheckProjectRole(userId int, projectId int64) bool { + if projectId == 0 { + return false + } + userQuery := models.User{UserId: int(userId)} + if userId == SYS_ADMIN { + return true + } + roleList, err := dao.GetUserProjectRoles(userQuery, projectId) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + return false + } + return len(roleList) > 0 +} + +func CheckPublicProject(projectId int64) bool { + projectQuery := models.Project{ProjectId: projectId} + project, err := dao.GetProjectById(projectQuery) + if err != nil { + beego.Error("Error occurred in GetProjectById:", err) + return false + } + if project != nil && project.Public == 1 { + return true + } + return false +} + +func (idc *ItemDetailController) Get() { + + sessionUserId := idc.GetSession("userId") + projectId, _ := idc.GetInt64("project_id") + + if CheckPublicProject(projectId) == false && (sessionUserId == nil || !CheckProjectRole(sessionUserId.(int), projectId)) { + idc.Redirect("/signIn?uri="+url.QueryEscape(idc.Ctx.Input.URI()), 302) + } + + projectQuery := models.Project{ProjectId: projectId} + project, err := dao.GetProjectById(projectQuery) + + if err != nil { + beego.Error("Error occurred in GetProjectById:", err) + idc.CustomAbort(500, "Internal error.") + } + + if project == nil { + idc.Redirect("/signIn", 302) + } + + idc.Data["ProjectId"] = project.ProjectId + idc.Data["ProjectName"] = project.Name + idc.Data["OwnerName"] = project.OwnerName + idc.Data["OwnerId"] = project.OwnerId + + if sessionUserId != nil { + idc.Data["Username"] = idc.GetSession("username") + idc.Data["UserId"] = sessionUserId.(int) + roleList, err := dao.GetUserProjectRoles(models.User{UserId: sessionUserId.(int)}, projectId) + if err != nil { + beego.Error("Error occurred in GetUserProjectRoles:", err) + idc.CustomAbort(500, "Internal error.") + } + if len(roleList) > 0 { + idc.Data["RoleId"] = roleList[0].RoleId + } + } + + idc.Data["HarborRegUrl"] = os.Getenv("HARBOR_REG_URL") + idc.Data["RepoName"] = idc.GetString("repo_name") + + idc.ForwardTo("page_title_item_details", "item-detail") + +} diff --git a/controllers/login.go b/controllers/login.go new file mode 100644 index 000000000..d907018d4 --- /dev/null +++ b/controllers/login.go @@ -0,0 +1,70 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +import ( + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/opt_auth" + + "github.com/astaxie/beego" +) + +type IndexController struct { + BaseController +} + +func (c *IndexController) Get() { + c.Data["Username"] = c.GetSession("username") + c.ForwardTo("page_title_index", "index") +} + +type SignInController struct { + BaseController +} + +func (sic *SignInController) Get() { + sic.ForwardTo("page_title_sign_in", "sign-in") +} + +func (c *CommonController) Login() { + principal := c.GetString("principal") + password := c.GetString("password") + + user, err := opt_auth.Login(models.AuthModel{principal, password}) + if err != nil { + beego.Error("Error occurred in UserLogin:", err) + c.CustomAbort(500, "Internal error.") + } + + if user == nil { + c.CustomAbort(401, "") + } + + c.SetSession("userId", user.UserId) + c.SetSession("username", user.Username) +} + +func (c *CommonController) SwitchLanguage() { + lang := c.GetString("lang") + if lang == "en-US" || lang == "zh-CN" { + c.SetSession("lang", lang) + c.Data["Lang"] = lang + } + c.Redirect(c.Ctx.Request.Header.Get("Referer"), 302) +} + +func (c *CommonController) Logout() { + c.DestroySession() +} diff --git a/controllers/password.go b/controllers/password.go new file mode 100644 index 000000000..e6211d413 --- /dev/null +++ b/controllers/password.go @@ -0,0 +1,202 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +import ( + "bytes" + "os" + "regexp" + "text/template" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/utils" + + "github.com/astaxie/beego" +) + +type ChangePasswordController struct { + BaseController +} + +func (cpc *ChangePasswordController) Get() { + sessionUserId := cpc.GetSession("userId") + if sessionUserId == nil { + cpc.Redirect("/signIn", 302) + } + cpc.Data["Username"] = cpc.GetSession("username") + cpc.ForwardTo("page_title_change_password", "change-password") +} + +func (cpc *CommonController) UpdatePassword() { + + sessionUserId := cpc.GetSession("userId") + sessionUsername := cpc.GetSession("username") + + if sessionUserId == nil || sessionUsername == nil { + beego.Warning("User does not login.") + cpc.CustomAbort(401, "please_login_first") + } + + oldPassword := cpc.GetString("old_password") + queryUser := models.User{UserId: sessionUserId.(int), Username: sessionUsername.(string), Password: oldPassword} + user, err := dao.CheckUserPassword(queryUser) + if err != nil { + beego.Error("Error occurred in CheckUserPassword:", err) + cpc.CustomAbort(500, "Internal error.") + } + + if user == nil { + beego.Warning("Password input is not correct") + cpc.CustomAbort(403, "old_password_is_not_correct") + } + + password := cpc.GetString("password") + if password != "" { + updateUser := models.User{UserId: sessionUserId.(int), Username: sessionUsername.(string), Password: password, Salt: user.Salt} + dao.ChangeUserPassword(updateUser) + } else { + cpc.CustomAbort(404, "please_input_new_password") + } +} + +type ForgotPasswordController struct { + BaseController +} + +type MessageDetail struct { + Hint string + Url string + Uuid string +} + +func (fpc *ForgotPasswordController) Get() { + fpc.ForwardTo("page_title_forgot_password", "forgot-password") +} + +func (fpc *CommonController) SendEmail() { + + email := fpc.GetString("email") + + if ok, _ := regexp.MatchString(`^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$`, email); ok { + + queryUser := models.User{Email: email} + exist, err := dao.UserExists(queryUser, "email") + if err != nil { + beego.Error("Error occurred in UserExists:", err) + fpc.CustomAbort(500, "Internal error.") + } + if !exist { + fpc.CustomAbort(404, "email_does_not_exist") + } + + messageTemplate, err := template.ParseFiles("views/reset-password-mail.tpl") + if err != nil { + beego.Error("Parse email template file failed:", err) + fpc.CustomAbort(500, err.Error()) + } + + message := new(bytes.Buffer) + + harborUrl := os.Getenv("HARBOR_URL") + if harborUrl == "" { + harborUrl = "localhost" + } + uuid, err := dao.GenerateRandomString() + if err != nil { + beego.Error("Error occurred in GenerateRandomString:", err) + fpc.CustomAbort(500, "Internal error.") + } + err = messageTemplate.Execute(message, MessageDetail{ + Hint: fpc.Tr("reset_email_hint"), + Url: harborUrl, + Uuid: uuid, + }) + + if err != nil { + beego.Error("message template error:", err) + fpc.CustomAbort(500, "internal_error") + } + + config, err := beego.AppConfig.GetSection("mail") + if err != nil { + beego.Error("Can not load app.conf:", err) + fpc.CustomAbort(500, "internal_error") + } + + mail := utils.Mail{ + From: config["from"], + To: []string{email}, + Subject: fpc.Tr("reset_email_subject"), + Message: message.String()} + + err = mail.SendMail() + + if err != nil { + beego.Error("send email failed:", err) + fpc.CustomAbort(500, "send_email_failed") + } + + user := models.User{ResetUuid: uuid, Email: email} + dao.UpdateUserResetUuid(user) + + } else { + fpc.CustomAbort(409, "email_content_illegal") + } + +} + +type ResetPasswordController struct { + BaseController +} + +func (rpc *ResetPasswordController) Get() { + + q := rpc.GetString("q") + queryUser := models.User{ResetUuid: q} + user, err := dao.GetUser(queryUser) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + rpc.CustomAbort(500, "Internal error.") + } + + if user != nil { + rpc.Data["ResetUuid"] = user.ResetUuid + rpc.ForwardTo("page_title_reset_password", "reset-password") + } else { + rpc.Redirect("/", 302) + } +} + +func (rpc *CommonController) ResetPassword() { + + resetUuid := rpc.GetString("reset_uuid") + + queryUser := models.User{ResetUuid: resetUuid} + user, err := dao.GetUser(queryUser) + if err != nil { + beego.Error("Error occurred in GetUser:", err) + rpc.CustomAbort(500, "Internal error.") + } + + password := rpc.GetString("password") + + if password != "" { + user.Password = password + dao.ResetUserPassword(*user) + } else { + rpc.CustomAbort(404, "password_is_required") + } +} diff --git a/controllers/project.go b/controllers/project.go new file mode 100644 index 000000000..4ba4ebff7 --- /dev/null +++ b/controllers/project.go @@ -0,0 +1,24 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +type ProjectController struct { + BaseController +} + +func (p *ProjectController) Get() { + p.Data["Username"] = p.GetSession("username") + p.ForwardTo("page_title_project", "project") +} diff --git a/controllers/register.go b/controllers/register.go new file mode 100644 index 000000000..244770419 --- /dev/null +++ b/controllers/register.go @@ -0,0 +1,75 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +import ( + "os" + "strings" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +type RegisterController struct { + BaseController +} + +func (rc *RegisterController) Get() { + authMode := os.Getenv("AUTH_MODE") + if authMode == "" || authMode == "db_auth" { + rc.ForwardTo("page_title_registration", "register") + } else { + rc.Redirect("/signIn", 404) + } +} + +func (rc *CommonController) SignUp() { + username := strings.TrimSpace(rc.GetString("username")) + email := strings.TrimSpace(rc.GetString("email")) + realname := strings.TrimSpace(rc.GetString("realname")) + password := strings.TrimSpace(rc.GetString("password")) + comment := strings.TrimSpace(rc.GetString("comment")) + + user := models.User{Username: username, Email: email, Realname: realname, Password: password, Comment: comment} + + _, err := dao.Register(user) + if err != nil { + beego.Error("Error occurred in Register:", err) + rc.CustomAbort(500, "Internal error.") + } +} + +func (rc *CommonController) UserExists() { + target := rc.GetString("target") + value := rc.GetString("value") + + user := models.User{} + switch target { + case "username": + user.Username = value + case "email": + user.Email = value + } + + exist, err := dao.UserExists(user, target) + if err != nil { + beego.Error("Error occurred in UserExists:", err) + rc.CustomAbort(500, "Internal error.") + } + rc.Data["json"] = exist + rc.ServeJSON() +} diff --git a/controllers/search.go b/controllers/search.go new file mode 100644 index 000000000..ebd228fbd --- /dev/null +++ b/controllers/search.go @@ -0,0 +1,25 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package controllers + +type SearchController struct { + BaseController +} + +func (sc *SearchController) Get() { + sc.Data["Username"] = sc.GetSession("username") + sc.Data["QueryParam"] = sc.GetString("q") + sc.ForwardTo("page_title_search", "search") +} diff --git a/dao/access_log.go b/dao/access_log.go new file mode 100644 index 000000000..4e99cfdb8 --- /dev/null +++ b/dao/access_log.go @@ -0,0 +1,103 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "strings" + + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego/orm" +) + +func AddAccessLog(accessLog models.AccessLog) error { + o := orm.NewOrm() + p, err := o.Raw(`insert into access_log + (user_id, project_id, repo_name, guid, operation, op_time) + values (?, ?, ?, ?, ?, now())`).Prepare() + if err != nil { + return err + } + defer p.Close() + + _, err = p.Exec(accessLog.UserId, accessLog.ProjectId, accessLog.RepoName, accessLog.Guid, accessLog.Operation) + + return err +} + +func GetAccessLogs(accessLog models.AccessLog) ([]models.AccessLog, error) { + + o := orm.NewOrm() + sql := `select a.log_id, u.username, a.repo_name, a.operation, a.op_time + from access_log a left join user u on a.user_id = u.user_id + where a.project_id = ? ` + queryParam := make([]interface{}, 1) + queryParam = append(queryParam, accessLog.ProjectId) + + if accessLog.UserId != 0 { + sql += ` and a.user_id = ? ` + queryParam = append(queryParam, accessLog.UserId) + } + if accessLog.Operation != "" { + sql += ` and a.operation = ? ` + queryParam = append(queryParam, accessLog.Operation) + } + if accessLog.Username != "" { + sql += ` and u.username like ? ` + queryParam = append(queryParam, accessLog.Username) + } + if accessLog.Keywords != "" { + sql += ` and a.operation in ( ` + keywordList := strings.Split(accessLog.Keywords, "/") + num := len(keywordList) + for i := 0; i < num; i++ { + if keywordList[i] != "" { + if i == num-1 { + sql += `?)` + } else { + sql += `?,` + } + queryParam = append(queryParam, keywordList[i]) + } + } + } + if accessLog.BeginTime != "" { + sql += ` and a.op_time >= str_to_date(?, '%Y-%m-%d %H:%i:%s') ` + queryParam = append(queryParam, accessLog.BeginTime+" 00:00:00.000000") + } + if accessLog.EndTime != "" { + sql += ` and a.op_time <= str_to_date(?, '%Y-%m-%d %H:%i:%s') ` + queryParam = append(queryParam, accessLog.EndTime+" 23:59:59.99999") + } + + sql += ` order by a.op_time desc ` + + var accessLogList []models.AccessLog + _, err := o.Raw(sql, queryParam).QueryRows(&accessLogList) + if err != nil { + return nil, err + } + return accessLogList, nil +} + +func AccessLog(username, projectName, repoName, action string) error { + o := orm.NewOrm() + sql := "insert into access_log (user_id, project_id, repo_name, operation, op_time) " + + "select (select user_id as user_id from user where username=?), " + + "(select project_id as project_id from project where name=?), ?, ?, now() " + _, err := o.Raw(sql, username, projectName, repoName, action).Exec() + + return err +} diff --git a/dao/base.go b/dao/base.go new file mode 100644 index 000000000..d6e8f37a9 --- /dev/null +++ b/dao/base.go @@ -0,0 +1,119 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "log" + "net" + + "os" + "strings" + "time" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/orm" + _ "github.com/go-sql-driver/mysql" +) + +const NON_EXIST_USER_ID = 0 + +func isIllegalLength(s string, min int, max int) bool { + if min == -1 { + return (len(s) > max) + } + if max == -1 { + return (len(s) <= min) + } + return (len(s) < min || len(s) > max) +} + +func isContainIllegalChar(s string, illegalChar []string) bool { + for _, c := range illegalChar { + if strings.Index(s, c) >= 0 { + return true + } + } + return false +} + +func GenerateRandomString() (string, error) { + o := orm.NewOrm() + var salt string + err := o.Raw(`select uuid() as uuid`).QueryRow(&salt) + if err != nil { + return "", err + } + return salt, nil + +} + +func init() { + orm.RegisterDriver("mysql", orm.DRMySQL) + + addr := os.Getenv("MYSQL_HOST") + if len(addr) == 0 { + addr = os.Getenv("MYSQL_PORT_3306_TCP_ADDR") + } + + port := os.Getenv("MYSQL_PORT_3306_TCP_PORT") + username := os.Getenv("MYSQL_USR") + + password := os.Getenv("MYSQL_ENV_MYSQL_ROOT_PASSWORD") + if len(password) == 0 { + password = os.Getenv("MYSQL_PWD") + } + + var flag bool = true + if addr == "" { + beego.Error("Unset env of MYSQL_HOST") + flag = false + } else if port == "" { + beego.Error("Unset env of MYSQL_PORT_3306_TCP_PORT") + flag = false + } else if username == "" { + beego.Error("Unset env of MYSQL_USR") + flag = false + } else if password == "" { + beego.Error("Unset env of MYSQL_PWD") + flag = false + } + + if !flag { + os.Exit(1) + } + + db_str := username + ":" + password + "@tcp(" + addr + ":" + port + ")/registry" + ch := make(chan int, 1) + go func() { + var err error + var c net.Conn + for { + c, err = net.Dial("tcp", addr+":"+port) + if err == nil { + c.Close() + ch <- 1 + } else { + log.Printf("failed to connect to db, retry after 2 seconds...") + time.Sleep(2 * time.Second) + } + } + }() + select { + case <-ch: + case <-time.After(30 * time.Second): + panic("Failed to connect to DB after 30 seconds") + } + orm.RegisterDataBase("default", "mysql", db_str) +} diff --git a/dao/item_detail.go b/dao/item_detail.go new file mode 100644 index 000000000..17a070471 --- /dev/null +++ b/dao/item_detail.go @@ -0,0 +1,50 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego/orm" +) + +func GetUserByProject(queryProject models.Project, queryUser models.User) ([]models.User, error) { + o := orm.NewOrm() + u := []models.User{} + sql := `select + u.user_id, u.username, r.name rolename, r.role_id + from user u left join user_project_role upr + on u.user_id = upr.user_id + left join project_role pr + on pr.pr_id = upr.pr_id + left join role r + on r.role_id = pr.role_id + where u.deleted = 0 + and pr.project_id = ? ` + + queryParam := make([]interface{}, 1) + queryParam = append(queryParam, queryProject.ProjectId) + + if queryUser.Username != "" { + sql += " and u.username like ? " + queryParam = append(queryParam, queryUser.Username) + } else if queryUser.RoleId != 0 { + sql += ` and r.role_id <= ? ` + queryParam = append(queryParam, queryUser.RoleId) + } + sql += ` order by u.user_id ` + _, err := o.Raw(sql, queryParam).QueryRows(&u) + return u, err +} diff --git a/dao/project.go b/dao/project.go new file mode 100644 index 000000000..f177bddc3 --- /dev/null +++ b/dao/project.go @@ -0,0 +1,234 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "github.com/vmware/harbor/models" + + "errors" + "fmt" + "time" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/orm" +) + +//TODO:transaction, return err +func AddProject(project models.Project) error { + + if isIllegalLength(project.Name, 4, 30) { + return errors.New("project name is illegal in length. (greater than 4 or less than 30)") + } + if isContainIllegalChar(project.Name, []string{"~", "-", "$", "\\", "[", "]", "{", "}", "(", ")", "&", "^", "%", "*", "<", ">", "\"", "'", "/", "?", "@"}) { + return errors.New("project name contains illegal characters") + } + + o := orm.NewOrm() + + p, err := o.Raw("insert into project (owner_id, name, creation_time, deleted, public) values (?, ?, now(), ?, ?)").Prepare() + if err != nil { + return err + } + + r, err := p.Exec(project.OwnerId, project.Name, project.Deleted, project.Public) + if err != nil { + return err + } + + projectId, err := r.LastInsertId() + if err != nil { + return err + } + + projectAdminRole := models.ProjectRole{ProjectId: projectId, RoleId: models.PROJECTADMIN} + _, err = AddProjectRole(projectAdminRole) + if err != nil { + return err + } + + projectDeveloperRole := models.ProjectRole{ProjectId: projectId, RoleId: models.DEVELOPER} + _, err = AddProjectRole(projectDeveloperRole) + if err != nil { + return err + } + + projectGuestRole := models.ProjectRole{ProjectId: projectId, RoleId: models.GUEST} + _, err = AddProjectRole(projectGuestRole) + if err != nil { + return err + } + + //Add all project roles, after that when assigning a user to a project just update the upr table + err = AddUserProjectRole(project.OwnerId, projectId, models.PROJECTADMIN) + if err != nil { + return err + } + + accessLog := models.AccessLog{UserId: project.OwnerId, ProjectId: projectId, RepoName: project.Name + "/", Guid: "N/A", Operation: "create", OpTime: time.Now()} + err = AddAccessLog(accessLog) + + return err +} + +func IsProjectPublic(projectName string) bool { + project, err := GetProjectByName(projectName) + if err != nil { + beego.Error("Error occurred in GetProjectByName:", err) + return false + } + if project == nil { + return false + } + return project.Public == 1 +} + +func QueryProject(query models.Project) ([]models.Project, error) { + o := orm.NewOrm() + + sql := `select distinct + p.project_id, p.owner_id, p.name,p.creation_time, p.public + from project p + left join project_role pr on p.project_id = pr.project_id + left join user_project_role upr on upr.pr_id = pr.pr_id + left join user u on u.user_id = upr.user_id + where p.deleted = 0 ` + + queryParam := make([]interface{}, 1) + + if query.Public == 1 { + sql += ` and p.public = ?` + queryParam = append(queryParam, query.Public) + } else if isAdmin, _ := IsAdminRole(query.UserId); isAdmin == false { + sql += ` and (p.owner_id = ? or u.user_id = ?) ` + queryParam = append(queryParam, query.UserId) + queryParam = append(queryParam, query.UserId) + } + + if query.Name != "" { + sql += " and p.name like ? " + queryParam = append(queryParam, query.Name) + } + + sql += " order by p.creation_time desc " + + var r []models.Project + _, err := o.Raw(sql, queryParam).QueryRows(&r) + + if err != nil { + return nil, err + } + return r, nil +} + +func ProjectExists(nameOrId interface{}) (bool, error) { + o := orm.NewOrm() + type dummy struct{} + sql := `select project_id from project where deleted = 0 and ` + switch nameOrId.(type) { + case int64: + sql += `project_id = ?` + case string: + sql += `name = ?` + default: + return false, errors.New(fmt.Sprintf("Invalid nameOrId: %v", nameOrId)) + } + + var d []dummy + num, err := o.Raw(sql, nameOrId).QueryRows(&d) + if err != nil { + return false, err + } + return num > 0, nil + +} + +func GetProjectById(query models.Project) (*models.Project, error) { + o := orm.NewOrm() + + sql := `select p.project_id, p.name, u.username as owner_name, p.owner_id, p.creation_time, p.public + from project p left join user u on p.owner_id = u.user_id where p.deleted = 0 and p.project_id = ?` + queryParam := make([]interface{}, 1) + queryParam = append(queryParam, query.ProjectId) + if query.Public != 0 { + sql += " and p.public = ? " + queryParam = append(queryParam, query.Public) + } + + p := []models.Project{} + count, err := o.Raw(sql, queryParam).QueryRows(&p) + + if err != nil { + return nil, err + } else if count == 0 { + return nil, nil + } else { + return &p[0], nil + } +} + +func GetProjectByName(projectName string) (*models.Project, error) { + o := orm.NewOrm() + var p []models.Project + n, err := o.Raw(`select project_id, owner_id, name, deleted, public from project where name = ? and deleted = 0`, projectName).QueryRows(&p) + if err != nil { + return nil, err + } else if n == 0 { + return nil, nil + } else { + return &p[0], nil + } +} + +func GetPermission(username, projectName string) (string, error) { + o := orm.NewOrm() + + sql := "select r.role_code from role as r " + + "inner join project_role as pr on r.role_id = pr.role_id " + + "inner join user_project_role as ur on pr.pr_id = ur.pr_id " + + "inner join user as u on u.user_id = ur.user_id " + + "inner join project p on p.project_id = pr.project_id " + + "where u.username = ? and p.name = ? and u.deleted = 0 and p.deleted = 0" + + var r []models.Role + n, err := o.Raw(sql, username, projectName).QueryRows(&r) + if err != nil { + return "", err + } else if n == 0 { + return "", nil + } else { + return r[0].RoleCode, nil + } +} + +func ToggleProjectPublicity(projectId int64, publicity int) error { + o := orm.NewOrm() + sql := "update project set public = ? where project_id = ?" + _, err := o.Raw(sql, publicity, projectId).Exec() + return err +} + +func QueryRelevantProjects(userId int) ([]models.Project, error) { + o := orm.NewOrm() + sql := `SELECT distinct p.project_id, p.name, p.public FROM registry.project p + left join project_role pr on p.project_id = pr.project_id + left join user_project_role upr on upr.pr_id = pr.pr_id + where upr.user_id = ? or p.public = 1 and p.deleted = 0` + var res []models.Project + _, err := o.Raw(sql, userId).QueryRows(&res) + if err != nil { + return nil, err + } + return res, err +} diff --git a/dao/project_role.go b/dao/project_role.go new file mode 100644 index 000000000..bbc9217e0 --- /dev/null +++ b/dao/project_role.go @@ -0,0 +1,89 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego/orm" +) + +func AddProjectRole(projectRole models.ProjectRole) (int64, error) { + o := orm.NewOrm() + p, err := o.Raw("insert into project_role (project_id, role_id) values (?, ?)").Prepare() + if err != nil { + return 0, err + } + defer p.Close() + r, err := p.Exec(projectRole.ProjectId, projectRole.RoleId) + if err != nil { + return 0, err + } + id, err := r.LastInsertId() + return id, err +} + +func AddUserProjectRole(userId int, projectId int64, roleId int) error { + + o := orm.NewOrm() + + var pr []models.ProjectRole + + var prId int + + sql := `select pr.pr_id, pr.project_id, pr.role_id from project_role pr where pr.project_id = ? and pr.role_id = ?` + n, err := o.Raw(sql, projectId, roleId).QueryRows(&pr) + if err != nil { + return err + } + + if n == 0 { //project role not found, insert a pr record + p, err := o.Raw("insert into project_role (project_id, role_id) values (?, ?)").Prepare() + if err != nil { + return err + } + defer p.Close() + r, err := p.Exec(projectId, roleId) + if err != nil { + return err + } + id, err := r.LastInsertId() + if err != nil { + return err + } + prId = int(id) + } else if n > 0 { + prId = pr[0].PrId + } + p, err := o.Raw("insert into user_project_role (user_id, pr_id) values (?, ?)").Prepare() + if err != nil { + return err + } + defer p.Close() + _, err = p.Exec(userId, prId) + return err +} + +func DeleteUserProjectRoles(userId int, projectId int64) error { + o := orm.NewOrm() + sql := `delete from user_project_role where user_id = ? and pr_id in + (select pr_id from project_role where project_id = ?)` + p, err := o.Raw(sql).Prepare() + if err != nil { + return err + } + _, err = p.Exec(userId, projectId) + return err +} diff --git a/dao/register.go b/dao/register.go new file mode 100644 index 000000000..a8311ebf5 --- /dev/null +++ b/dao/register.go @@ -0,0 +1,131 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "errors" + "regexp" + + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/utils" + + "github.com/astaxie/beego/orm" +) + +func Register(user models.User) (int64, error) { + + err := validate(user) + if err != nil { + return 0, err + } + + o := orm.NewOrm() + p, err := o.Raw("insert into user (username, password, realname, email, comment, salt) values (?, ?, ?, ?, ?, ?)").Prepare() + if err != nil { + return 0, err + } + defer p.Close() + + salt, err := GenerateRandomString() + if err != nil { + return 0, err + } + + r, err := p.Exec(user.Username, utils.Encrypt(user.Password, salt), user.Realname, user.Email, user.Comment, salt) + + if err != nil { + return 0, err + } + userId, err := r.LastInsertId() + if err != nil { + return 0, err + } + + return userId, nil +} + +func validate(user models.User) error { + + if isIllegalLength(user.Username, 0, 20) { + return errors.New("Username with illegal length.") + } + if isContainIllegalChar(user.Username, []string{",", "~", "#", "$", "%"}) { + return errors.New("Username contains illegal characters.") + } + + if exist, _ := UserExists(models.User{Username: user.Username}, "username"); exist { + return errors.New("Username already exists.") + } + + if m, _ := regexp.MatchString(`^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$`, user.Email); !m { + return errors.New("Email with illegal format.") + } + + if isIllegalLength(user.Email, 0, -1) { + return errors.New("Email cannot empty.") + } + + if exist, _ := UserExists(models.User{Email: user.Email}, "email"); exist { + return errors.New("Email already exists.") + } + + if isIllegalLength(user.Realname, 0, 20) { + return errors.New("Realname with illegal length.") + } + + if isContainIllegalChar(user.Realname, []string{",", "~", "#", "$", "%"}) { + return errors.New("Realname contains illegal characters.") + } + + if isIllegalLength(user.Password, 0, 20) { + return errors.New("Password with illegal length.") + } + + if isIllegalLength(user.Comment, -1, 30) { + return errors.New("Comment with illegal length.") + } + return nil +} + +func UserExists(user models.User, target string) (bool, error) { + + if user.Username == "" && user.Email == "" { + return false, errors.New("User name and email are blank.") + } + + o := orm.NewOrm() + + sql := `select user_id from user where 1=1 ` + queryParam := make([]interface{}, 1) + + switch target { + case "username": + sql += ` and username = ? ` + queryParam = append(queryParam, user.Username) + case "email": + sql += ` and email = ? ` + queryParam = append(queryParam, user.Email) + } + + var u []models.User + n, err := o.Raw(sql, queryParam).QueryRows(&u) + if err != nil { + return false, err + } else if n == 0 { + return false, nil + } else { + return true, nil + } +} diff --git a/dao/role.go b/dao/role.go new file mode 100644 index 000000000..2379b149b --- /dev/null +++ b/dao/role.go @@ -0,0 +1,63 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego/orm" +) + +func GetUserProjectRoles(userQuery models.User, projectId int64) ([]models.Role, error) { + + o := orm.NewOrm() + + sql := `select distinct r.role_id, r.role_code, r.name + from role r + left join project_role pr on r.role_id = pr.role_id + left join user_project_role upr on pr.pr_id = upr.pr_id + left join user u on u.user_id = upr.user_id + where u.deleted = 0 + and u.user_id = ? ` + queryParam := make([]interface{}, 1) + queryParam = append(queryParam, userQuery.UserId) + + if projectId > 0 { + sql += ` and pr.project_id = ? ` + queryParam = append(queryParam, projectId) + } + if userQuery.RoleId > 0 { + sql += ` and r.role_id = ? ` + queryParam = append(queryParam, userQuery.RoleId) + } + + var roleList []models.Role + _, err := o.Raw(sql, queryParam).QueryRows(&roleList) + + if err != nil { + return nil, err + } + return roleList, nil +} + +func IsAdminRole(userId int) (bool, error) { + //role_id == 1 means the user is system admin + userQuery := models.User{UserId: userId, RoleId: models.SYSADMIN} + adminRoleList, err := GetUserProjectRoles(userQuery, 0) + if err != nil { + return false, err + } + return len(adminRoleList) > 0, nil +} diff --git a/dao/user.go b/dao/user.go new file mode 100644 index 000000000..e215712f4 --- /dev/null +++ b/dao/user.go @@ -0,0 +1,198 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package dao + +import ( + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/utils" + + "github.com/astaxie/beego" + "github.com/astaxie/beego/orm" +) + +func GetUser(query models.User) (*models.User, error) { + + o := orm.NewOrm() + + sql := `select user_id, username, email, realname, reset_uuid, salt, + ifnull((select pr.role_id + from project_role pr + left join user_project_role upr on upr.pr_id = pr.pr_id + where pr.role_id = 1 + and upr.user_id = u.user_id),0) as has_admin_role + from user u + where deleted = 0 ` + queryParam := make([]interface{}, 1) + if query.UserId != 0 { + sql += ` and user_id = ? ` + queryParam = append(queryParam, query.UserId) + } + + if query.Username != "" { + sql += ` and username = ? ` + queryParam = append(queryParam, query.Username) + } + + if query.ResetUuid != "" { + sql += ` and reset_uuid = ? ` + queryParam = append(queryParam, query.ResetUuid) + } + + var u []models.User + n, err := o.Raw(sql, queryParam).QueryRows(&u) + + if err != nil { + return nil, err + } else if n == 0 { + return nil, nil + } else { + return &u[0], nil + } +} + +func LoginByDb(auth models.AuthModel) (*models.User, error) { + + query := models.User{Username: auth.Principal, Email: auth.Principal} + + o := orm.NewOrm() + var u []models.User + n, err := o.Raw(`select username from user where (username = ? or email = ?)`, query.Username, query.Email).QueryRows(&u) + if err != nil { + return nil, err + } else if n == 0 { + beego.Warning("User does not exist. Principal:", auth.Principal) + return nil, nil + } else { + u[0].Password = auth.Password + return CheckUserPassword(u[0]) + } + +} + +func ListUsers(query models.User) ([]models.User, error) { + o := orm.NewOrm() + u := []models.User{} + sql := `select u.user_id, u.username, u.email, ifnull((select pr.role_id + from project_role pr + left join user_project_role upr on upr.pr_id = pr.pr_id + where pr.role_id = 1 + and upr.user_id = u.user_id),0) as has_admin_role + from user u + where u.deleted = 0 and u.user_id != 1 ` + + queryParam := make([]interface{}, 1) + if query.Username != "" { + sql += ` and u.username like ? ` + queryParam = append(queryParam, query.Username) + } + sql += ` order by u.user_id desc ` + + _, err := o.Raw(sql, queryParam).QueryRows(&u) + return u, err +} + +func ToggleUserAdminRole(u models.User) error { + + projectRole := models.ProjectRole{PrId: 1} //admin project role + + o := orm.NewOrm() + + var pr []models.ProjectRole + + n, err := o.Raw(`select user_id from user_project_role where user_id = ? and pr_id = ? `, u.UserId, projectRole.PrId).QueryRows(&pr) + if err != nil { + return err + } + + var sql string + if n == 0 { + sql = `insert into user_project_role (user_id, pr_id) values (?, ?)` + } else { + sql = `delete from user_project_role where user_id = ? and pr_id = ?` + } + + p, err := o.Raw(sql).Prepare() + if err != nil { + return err + } + defer p.Close() + _, err = p.Exec(u.UserId, projectRole.PrId) + + return err +} + +func ChangeUserPassword(u models.User) error { + o := orm.NewOrm() + _, err := o.Raw(`update user set password=?, salt=? where user_id=?`, utils.Encrypt(u.Password, u.Salt), u.Salt, u.UserId).Exec() + return err +} + +func ResetUserPassword(u models.User) error { + o := orm.NewOrm() + _, err := o.Raw(`update user set password=?, reset_uuid=? where reset_uuid=?`, utils.Encrypt(u.Password, u.Salt), "", u.ResetUuid).Exec() + return err +} + +func UpdateUserResetUuid(u models.User) error { + o := orm.NewOrm() + _, err := o.Raw(`update user set reset_uuid=? where email=?`, u.ResetUuid, u.Email).Exec() + return err +} + +func CheckUserPassword(query models.User) (*models.User, error) { + + currentUser, err := GetUser(query) + + if err != nil { + return nil, err + } + + if currentUser == nil { + return nil, nil + } + + sql := `select user_id, username, salt from user where deleted = 0` + + queryParam := make([]interface{}, 1) + + if query.UserId != 0 { + sql += ` and password = ? and user_id = ?` + queryParam = append(queryParam, utils.Encrypt(query.Password, currentUser.Salt)) + queryParam = append(queryParam, query.UserId) + } else { + sql += ` and username = ? and password = ?` + queryParam = append(queryParam, currentUser.Username) + queryParam = append(queryParam, utils.Encrypt(query.Password, currentUser.Salt)) + } + o := orm.NewOrm() + var user []models.User + + n, err := o.Raw(sql, queryParam).QueryRows(&user) + + if err != nil { + return nil, err + } else if n == 0 { + beego.Warning("User principal does not match password. Current:", currentUser) + return nil, nil + } else { + return &user[0], nil + } +} + +func DeleteUser(userId int) error { + o := orm.NewOrm() + _, err := o.Raw(`update user set deleted = 1 where user_id = ?`, userId).Exec() + return err +} diff --git a/main.go b/main.go new file mode 100644 index 000000000..86e8ee87d --- /dev/null +++ b/main.go @@ -0,0 +1,81 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package main + +import ( + "errors" + "fmt" + "log" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + _ "github.com/vmware/harbor/opt_auth/db" + _ "github.com/vmware/harbor/opt_auth/ldap" + _ "github.com/vmware/harbor/routers" + + "os" + + "github.com/astaxie/beego" +) + +const ( + ADMIN_USER_ID = 1 +) + +func updateInitPassword(userId int, password string) error { + queryUser := models.User{UserId: userId} + user, err := dao.GetUser(queryUser) + if err != nil { + log.Println("Failed to get user in initial password, userId:", userId) + return err + } + if user == nil { + log.Printf("User id: %d does not exist.", userId) + return errors.New(fmt.Sprintf("User id: %s does not exist.", userId)) + } else if user.Salt == "" { + salt, err := dao.GenerateRandomString() + if err != nil { + log.Printf("Failed to generate salt for encrypting password, %v", err) + return err + } + user.Salt = salt + user.Password = password + err = dao.ChangeUserPassword(*user) + if err != nil { + log.Printf("Failed to update user encrypted password, userId: %d, err: %v", userId, err) + return err + } + log.Printf("User id: %d updated its encypted password successfully.", userId) + } else { + log.Printf("User id: %d already has its encrypted password.", userId) + } + return nil +} + +func main() { + + beego.BConfig.WebConfig.Session.SessionOn = true + + //conf/app.conf -> os.Getenv("config_path") + configPath := os.Getenv("CONFIG_PATH") + if len(configPath) != 0 { + beego.Debug(fmt.Sprintf("Config path: %s", configPath)) + beego.AppConfigPath = configPath + } + + updateInitPassword(ADMIN_USER_ID, os.Getenv("HARBOR_ADMIN_PASSWORD")) + + beego.Run() +} diff --git a/models/access_log.go b/models/access_log.go new file mode 100644 index 000000000..72d15253e --- /dev/null +++ b/models/access_log.go @@ -0,0 +1,35 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +import ( + "time" +) + +type AccessLog struct { + LogId int + UserId int + ProjectId int64 + RepoName string + Guid string + Operation string + OpTime time.Time + OpTimeStr string + + Username string + Keywords string + BeginTime string + EndTime string +} diff --git a/models/auth_model.go b/models/auth_model.go new file mode 100644 index 000000000..e3ad85314 --- /dev/null +++ b/models/auth_model.go @@ -0,0 +1,20 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +type AuthModel struct { + Principal string + Password string +} diff --git a/models/notification.go b/models/notification.go new file mode 100644 index 000000000..853d80bd0 --- /dev/null +++ b/models/notification.go @@ -0,0 +1,49 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +import ( + "time" +) + +type Notification struct { + Events []Event +} + +type Event struct { + Id string + TimeStamp time.Time + Action string + Target *Target + Request *Request + Actor *Actor +} + +type Target struct { + MediaType string + Digest string + Repository string + Url string +} + +type Actor struct { + Name string +} + +type Request struct { + Id string + Method string + UserAgent string +} diff --git a/models/project.go b/models/project.go new file mode 100644 index 000000000..126ee34f4 --- /dev/null +++ b/models/project.go @@ -0,0 +1,33 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +import ( + "time" +) + +type Project struct { + ProjectId int64 + OwnerId int + Name string + CreationTime time.Time + CreationTimeStr string + Deleted int + UserId int + OwnerName string + Public int + //This field does not have correspondent column in DB, this is just for UI to disable button + Togglable bool +} diff --git a/models/project_role.go b/models/project_role.go new file mode 100644 index 000000000..dd70feacf --- /dev/null +++ b/models/project_role.go @@ -0,0 +1,21 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +type ProjectRole struct { + PrId int + ProjectId int64 + RoleId int +} diff --git a/models/repo.go b/models/repo.go new file mode 100644 index 000000000..a247f0012 --- /dev/null +++ b/models/repo.go @@ -0,0 +1,25 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +type V1Repo struct { + NumResults int + Query string + Results []RepoItem +} + +type Repo struct { + Repositories []string `json:repositories` +} diff --git a/models/repo_item.go b/models/repo_item.go new file mode 100644 index 000000000..6315893b8 --- /dev/null +++ b/models/repo_item.go @@ -0,0 +1,32 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +import ( + "time" +) + +type RepoItem struct { + Id string `json:"Id"` + Parent string `json:"Parent"` + Created time.Time `json:"Created"` + CreatedStr string `json:"CreatedStr"` + DurationDays string `json:"Duration Days"` + Author string `json:"Author"` + Architecture string `json:"Architecture"` + Docker_version string `json:"Docker Version"` + Os string `json:"OS"` + //Size int `json:"Size"` +} diff --git a/models/role.go b/models/role.go new file mode 100644 index 000000000..847fb3347 --- /dev/null +++ b/models/role.go @@ -0,0 +1,28 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +const ( + SYSADMIN = 1 + PROJECTADMIN = 2 + DEVELOPER = 3 + GUEST = 4 +) + +type Role struct { + RoleId int `json:"role_id"` + RoleCode string `json:"role_code"` + Name string `json:"role_name"` +} diff --git a/models/tag.go b/models/tag.go new file mode 100644 index 000000000..f9bc25fb8 --- /dev/null +++ b/models/tag.go @@ -0,0 +1,20 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +type Tag struct { + Version string `json:version` + ImageId string `json:image_id` +} diff --git a/models/user.go b/models/user.go new file mode 100644 index 000000000..666723dee --- /dev/null +++ b/models/user.go @@ -0,0 +1,31 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +type User struct { + UserId int + Username string + Email string + Password string + Realname string + Comment string + Deleted int + Rolename string + RoleId int + RoleList []Role + HasAdminRole int + ResetUuid string + Salt string +} diff --git a/models/user_project_role.go b/models/user_project_role.go new file mode 100644 index 000000000..fcd5b1344 --- /dev/null +++ b/models/user_project_role.go @@ -0,0 +1,21 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package models + +type UserProjectRole struct { + UprId int + UserId int + PrId int64 +} diff --git a/opt_auth/db/db.go b/opt_auth/db/db.go new file mode 100644 index 000000000..f9b76d7f6 --- /dev/null +++ b/opt_auth/db/db.go @@ -0,0 +1,35 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package db + +import ( + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/opt_auth" +) + +type DbAuth struct{} + +func (d *DbAuth) Validate(auth models.AuthModel) (*models.User, error) { + u, err := dao.LoginByDb(auth) + if err != nil { + return nil, err + } + return u, nil +} + +func init() { + opt_auth.Register("db_auth", &DbAuth{}) +} diff --git a/opt_auth/ldap/ldap.go b/opt_auth/ldap/ldap.go new file mode 100644 index 000000000..214ff764e --- /dev/null +++ b/opt_auth/ldap/ldap.go @@ -0,0 +1,126 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package ldap + +import ( + "errors" + "fmt" + "log" + "os" + "strings" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/opt_auth" + + "github.com/astaxie/beego" + "github.com/mqu/openldap" +) + +type LdapAuth struct{} + +const META_CHARS = "&|!=~*<>()" + +func (l *LdapAuth) Validate(auth models.AuthModel) (*models.User, error) { + + ldapUrl := os.Getenv("LDAP_URL") + if ldapUrl == "" { + return nil, errors.New("Can not get any available LDAP_URL.") + } + beego.Debug("ldapUrl:", ldapUrl) + + p := auth.Principal + for _, c := range META_CHARS { + if strings.ContainsRune(p, c) { + log.Printf("The principal contains meta char: %q", c) + return nil, nil + } + } + + ldap, err := openldap.Initialize(ldapUrl) + if err != nil { + return nil, err + } + defer ldap.Close() + + ldap.SetOption(openldap.LDAP_OPT_PROTOCOL_VERSION, openldap.LDAP_VERSION3) + + ldapBaseDn := os.Getenv("LDAP_BASE_DN") + if ldapBaseDn == "" { + return nil, errors.New("Can not get any available LDAP_BASE_DN.") + } + + baseDn := fmt.Sprintf(ldapBaseDn, auth.Principal) + beego.Debug("baseDn:", baseDn) + + err = ldap.Bind(baseDn, auth.Password) + if err != nil { + return nil, err + } + + scope := openldap.LDAP_SCOPE_SUBTREE // LDAP_SCOPE_BASE, LDAP_SCOPE_ONELEVEL, LDAP_SCOPE_SUBTREE + filter := "objectClass=*" + attributes := []string{"cn", "mail", "uid"} + + result, err := ldap.SearchAll(baseDn, scope, filter, attributes) + if err != nil { + return nil, err + } + if len(result.Entries()) != 1 { + log.Printf("Found more than one entry.") + return nil, nil + } + en := result.Entries()[0] + u := models.User{} + for _, attr := range en.Attributes() { + val := attr.Values()[0] + switch attr.Name() { + case "uid": + u.Username = val + case "mail": + u.Email = val + case "cn": + u.Realname = val + } + } + + beego.Debug("username:", u.Username, ",email:", u.Email, ",realname:", u.Realname) + + exist, err := dao.UserExists(u, "username") + if err != nil { + return nil, err + } + + if exist { + currentUser, err := dao.GetUser(u) + if err != nil { + return nil, err + } + u.UserId = currentUser.UserId + } else { + u.Password = "12345678AbC" + u.Comment = "registered from LDAP." + userId, err := dao.Register(u) + if err != nil { + return nil, err + } + u.UserId = int(userId) + } + return &u, nil +} + +func init() { + opt_auth.Register("ldap_auth", &LdapAuth{}) +} diff --git a/opt_auth/opt_auth.go b/opt_auth/opt_auth.go new file mode 100644 index 000000000..164a19831 --- /dev/null +++ b/opt_auth/opt_auth.go @@ -0,0 +1,53 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package opt_auth + +import ( + "fmt" + "os" + + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +type OptAuth interface { + Validate(auth models.AuthModel) (*models.User, error) +} + +var registry = make(map[string]OptAuth) + +func Register(name string, optAuth OptAuth) { + if _, dup := registry[name]; dup { + panic(name + " already exist.") + return + } + registry[name] = optAuth +} + +func Login(auth models.AuthModel) (*models.User, error) { + + var authMode string = os.Getenv("AUTH_MODE") + if authMode == "" || auth.Principal == "admin" { + authMode = "db_auth" + } + beego.Debug("Current AUTH_MODE is ", authMode) + + optAuth := registry[authMode] + if optAuth == nil { + return nil, fmt.Errorf("Unrecognized auth_mode: %s", authMode) + } + return optAuth.Validate(auth) +} diff --git a/routers/router.go b/routers/router.go new file mode 100644 index 000000000..44082951e --- /dev/null +++ b/routers/router.go @@ -0,0 +1,66 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package routers + +import ( + "github.com/vmware/harbor/api" + "github.com/vmware/harbor/controllers" + "github.com/vmware/harbor/service" + + "github.com/astaxie/beego" +) + +func init() { + + beego.SetStaticPath("registry/static/i18n", "static/i18n") + beego.SetStaticPath("registry/static/resources", "static/resources") + beego.SetStaticPath("registry/static/vendors", "static/vendors") + + beego.Router("/login", &controllers.CommonController{}, "post:Login") + beego.Router("/logout", &controllers.CommonController{}, "get:Logout") + beego.Router("/language", &controllers.CommonController{}, "get:SwitchLanguage") + beego.Router("/signUp", &controllers.CommonController{}, "post:SignUp") + beego.Router("/userExists", &controllers.CommonController{}, "post:UserExists") + beego.Router("/reset", &controllers.CommonController{}, "post:ResetPassword") + beego.Router("/sendEmail", &controllers.CommonController{}, "get:SendEmail") + beego.Router("/updatePassword", &controllers.CommonController{}, "post:UpdatePassword") + + beego.Router("/", &controllers.IndexController{}) + beego.Router("/signIn", &controllers.SignInController{}) + beego.Router("/register", &controllers.RegisterController{}) + beego.Router("/forgotPassword", &controllers.ForgotPasswordController{}) + beego.Router("/resetPassword", &controllers.ResetPasswordController{}) + beego.Router("/changePassword", &controllers.ChangePasswordController{}) + + beego.Router("/registry/project", &controllers.ProjectController{}) + beego.Router("/registry/detail", &controllers.ItemDetailController{}) + + beego.Router("/search", &controllers.SearchController{}) + + //API: + beego.Router("/api/search", &api.SearchAPI{}) + beego.Router("/api/projects/:pid/members/?:mid", &api.ProjectMemberAPI{}) + beego.Router("/api/projects/?:id", &api.ProjectAPI{}) + beego.Router("/api/projects/:id/logs", &api.ProjectAPI{}, "get:ListAccessLog") + beego.Router("/api/projects/:id/logs/filter", &api.ProjectAPI{}, "post:FilterAccessLog") + beego.Router("/api/users/?:id", &api.UserAPI{}) + beego.Router("/api/repositories", &api.RepositoryAPI{}) + beego.Router("/api/repositories/tags", &api.RepositoryAPI{}, "get:GetTags") + beego.Router("/api/repositories/manifests", &api.RepositoryAPI{}, "get:GetManifests") + + //external service that hosted on harbor process: + beego.Router("/service/notifications", &service.NotificationHandler{}) + beego.Router("/service/token", &service.AuthController{}, "get:Auth") +} diff --git a/service/auth.go b/service/auth.go new file mode 100644 index 000000000..f405ee9ce --- /dev/null +++ b/service/auth.go @@ -0,0 +1,84 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package service + +import ( + "log" + + "github.com/vmware/harbor/models" + "github.com/vmware/harbor/opt_auth" + svc_utils "github.com/vmware/harbor/service/utils" + "github.com/vmware/harbor/utils" + + "github.com/astaxie/beego" + "github.com/docker/distribution/registry/auth/token" +) + +type AuthController struct { + beego.Controller +} + +//handle request +func (a *AuthController) Auth() { + + request := a.Ctx.Request + + log.Println("request url: " + request.URL.String()) + authorization := request.Header["Authorization"] + log.Println("authorization:", authorization) + username, password := utils.ParseBasicAuth(authorization) + authenticated := authenticate(username, password) + + service := a.GetString("service") + scope := a.GetString("scope") + + if len(scope) == 0 && !authenticated { + log.Printf("login request with invalid credentials") + a.CustomAbort(401, "") + } + access := svc_utils.GetResourceActions(scope) + for _, a := range access { + svc_utils.FilterAccess(username, authenticated, a) + } + a.serveToken(username, service, access) +} + +func (a *AuthController) serveToken(username, service string, access []*token.ResourceActions) { + writer := a.Ctx.ResponseWriter + //create token + rawToken, err := svc_utils.MakeToken(username, service, access) + if err != nil { + log.Printf("Failed to make token, error: %v", err) + writer.WriteHeader(500) + return + } + tk := make(map[string]string) + tk["token"] = rawToken + a.Data["json"] = tk + a.ServeJSON() +} + +func authenticate(principal, password string) bool { + user, err := opt_auth.Login(models.AuthModel{principal, password}) + if err != nil { + log.Printf("Error occurred in UserLogin: %v", err) + return false + } + if user == nil { + return false + } else { + return true + } +} diff --git a/service/notification.go b/service/notification.go new file mode 100644 index 000000000..3419fb8ea --- /dev/null +++ b/service/notification.go @@ -0,0 +1,72 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package service + +import ( + "encoding/json" + "strings" + + "github.com/vmware/harbor/dao" + "github.com/vmware/harbor/models" + svc_utils "github.com/vmware/harbor/service/utils" + + "github.com/astaxie/beego" +) + +type NotificationHandler struct { + beego.Controller +} + +const MEDIA_TYPE_MANIFEST = "application/vnd.docker.distribution.manifest.v1+json" + +func (n *NotificationHandler) Post() { + var notification models.Notification + // log.Printf("Notification Handler triggered!\n") + // log.Printf("request body in string: %s", string(n.Ctx.Input.CopyBody())) + err := json.Unmarshal(n.Ctx.Input.CopyBody(1<<32), ¬ification) + + if err != nil { + beego.Error("error while decoding json: ", err) + return + } + var username, action, repo, project string + for _, e := range notification.Events { + if e.Target.MediaType == MEDIA_TYPE_MANIFEST && strings.HasPrefix(e.Request.UserAgent, "docker") { + username = e.Actor.Name + action = e.Action + repo = e.Target.Repository + if strings.Contains(repo, "/") { + project = repo[0:strings.LastIndex(repo, "/")] + } + if username == "" { + username = "anonymous" + } + go dao.AccessLog(username, project, repo, action) + if action == "push" { + go func() { + err2 := svc_utils.RefreshCatalogCache() + if err2 != nil { + beego.Error("Error happens when refreshing cache:", err2) + } + }() + } + } + } + +} + +func (n *NotificationHandler) Render() error { + return nil +} diff --git a/service/utils/auth_utils.go b/service/utils/auth_utils.go new file mode 100644 index 000000000..54e83047b --- /dev/null +++ b/service/utils/auth_utils.go @@ -0,0 +1,190 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "crypto" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "github.com/vmware/harbor/dao" + + "github.com/docker/distribution/registry/auth/token" + "github.com/docker/libtrust" +) + +const ( + issuer = "registry-token-issuer" + privateKey = "conf/private_key.pem" + expiration = 5 //minute +) + +func GetResourceActions(scope string) []*token.ResourceActions { + var res []*token.ResourceActions + if scope == "" { + return res + } + items := strings.Split(scope, ":") + res = append(res, &token.ResourceActions{ + Type: items[0], + Name: items[1], + Actions: strings.Split(items[2], ","), + }) + return res +} + +//Try to modify the action list in access based on permission +//determine if the request needs to be authenticated. +//for details see:https://github.com/docker/docker/issues/15640 +func FilterAccess(username string, authenticated bool, a *token.ResourceActions) { + + if a.Type == "registry" && a.Name == "catalog" { + return + } else { + //clear action list to assign to new acess element after perm check. + a.Actions = []string{} + if a.Type == "repository" { + if strings.Contains(a.Name, "/") { //Only check the permission when the requested image has a namespace, i.e. project + projectName := a.Name[0:strings.LastIndex(a.Name, "/")] + var permission string + var err error + if authenticated { + if username == "admin" { + exist, err := dao.ProjectExists(projectName) + if err != nil { + log.Printf("Error occurred in CheckExistProject: %v", err) + return + } + if exist { + permission = "RW" + } else { + permission = "" + log.Printf("project %s does not exist, set empty permission for admin", projectName) + } + } else { + permission, err = dao.GetPermission(username, projectName) + if err != nil { + log.Printf("Error occurred in GetPermission: %v", err) + return + } + } + } + if strings.Contains(permission, "W") { + a.Actions = append(a.Actions, "push") + } + if strings.Contains(permission, "R") || dao.IsProjectPublic(projectName) { + a.Actions = append(a.Actions, "pull") + } + } + } + log.Printf("current access, type: %s, name:%s, actions:%v \n", a.Type, a.Name, a.Actions) + } +} + +//For the UI process to call, so it won't establish a https connection from UI to proxy. +func GenTokenForUI(username, service, scope string) (string, error) { + access := GetResourceActions(scope) + for _, a := range access { + FilterAccess(username, true, a) + } + return MakeToken(username, service, access) +} + +func MakeToken(username, service string, access []*token.ResourceActions) (string, error) { + pk, err := libtrust.LoadKeyFile(privateKey) + if err != nil { + return "", err + } + tk, err := makeTokenCore(issuer, username, service, expiration, access, pk) + if err != nil { + return "", err + } + rs := fmt.Sprintf("%s.%s", tk.Raw, base64UrlEncode(tk.Signature)) + return rs, nil +} + +//make token core +func makeTokenCore(issuer, subject, audience string, expiration int, + access []*token.ResourceActions, signingKey libtrust.PrivateKey) (*token.Token, error) { + + joseHeader := &token.Header{ + Type: "JWT", + SigningAlg: "RS256", + KeyID: signingKey.KeyID(), + } + + jwtID, err := randString(16) + if err != nil { + return nil, fmt.Errorf("Error to generate jwt id: %s", err) + } + + now := time.Now() + + claimSet := &token.ClaimSet{ + Issuer: issuer, + Subject: subject, + Audience: audience, + Expiration: now.Add(time.Duration(expiration) * time.Minute).Unix(), + NotBefore: now.Unix(), + IssuedAt: now.Unix(), + JWTID: jwtID, + Access: access, + } + + var joseHeaderBytes, claimSetBytes []byte + + if joseHeaderBytes, err = json.Marshal(joseHeader); err != nil { + return nil, fmt.Errorf("unable to marshal jose header: %s", err) + } + if claimSetBytes, err = json.Marshal(claimSet); err != nil { + return nil, fmt.Errorf("unable to marshal claim set: %s", err) + } + + encodedJoseHeader := base64UrlEncode(joseHeaderBytes) + encodedClaimSet := base64UrlEncode(claimSetBytes) + payload := fmt.Sprintf("%s.%s", encodedJoseHeader, encodedClaimSet) + + var signatureBytes []byte + if signatureBytes, _, err = signingKey.Sign(strings.NewReader(payload), crypto.SHA256); err != nil { + return nil, fmt.Errorf("unable to sign jwt payload: %s", err) + } + + signature := base64UrlEncode(signatureBytes) + tokenString := fmt.Sprintf("%s.%s", payload, signature) + //log.Printf("token: %s", tokenString) + return token.NewToken(tokenString) +} + +func randString(length int) (string, error) { + const alphanum = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + rb := make([]byte, length) + _, err := rand.Read(rb) + if err != nil { + return "", err + } + for i, b := range rb { + rb[i] = alphanum[int(b)%len(alphanum)] + } + return string(rb), nil +} + +func base64UrlEncode(b []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} diff --git a/service/utils/cache.go b/service/utils/cache.go new file mode 100644 index 000000000..cb3ae7548 --- /dev/null +++ b/service/utils/cache.go @@ -0,0 +1,69 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "encoding/json" + "time" + + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" + + "github.com/astaxie/beego/cache" +) + +var Cache cache.Cache + +const CATALOG string = "catalog" + +func init() { + var err error + Cache, err = cache.NewCache("memory", `{"interval":720}`) + if err != nil { + beego.Error("Failed to initialize cache, error:", err) + } +} + +func RefreshCatalogCache() error { + result, err := RegistryApiGet(BuildRegistryUrl("_catalog"), "") + if err != nil { + return err + } + repoResp := models.Repo{} + err = json.Unmarshal(result, &repoResp) + if err != nil { + return err + } + Cache.Put(CATALOG, repoResp.Repositories, 600*time.Second) + return nil +} + +func GetRepoFromCache() ([]string, error) { + + result := Cache.Get(CATALOG) + if result == nil { + err := RefreshCatalogCache() + if err != nil { + return nil, err + } + cached := Cache.Get(CATALOG) + if cached != nil { + return cached.([]string), nil + } + return nil, nil + } + return result.([]string), nil +} diff --git a/service/utils/registry_utils.go b/service/utils/registry_utils.go new file mode 100644 index 000000000..8c8892d5b --- /dev/null +++ b/service/utils/registry_utils.go @@ -0,0 +1,111 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "errors" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strings" +) + +func BuildRegistryUrl(segments ...string) string { + registryURL := os.Getenv("REGISTRY_URL") + if registryURL == "" { + registryURL = "http://localhost:5000" + } + url := registryURL + "/v2" + for _, s := range segments { + if s == "v2" { + log.Printf("unnecessary v2 in %v", segments) + continue + } + url += "/" + s + } + return url +} + +func RegistryApiGet(url, username string) ([]byte, error) { + response, err := http.Get(url) + if err != nil { + return nil, err + } + result, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode == 200 { + return result, nil + } else if response.StatusCode == 401 { + authenticate := response.Header.Get("WWW-Authenticate") + str := strings.Split(authenticate, " ")[1] + log.Println("url: " + url) + var service string + var scope string + strs := strings.Split(str, ",") + for _, s := range strs { + if strings.Contains(s, "service") { + service = s + } else if strings.Contains(s, "scope") { + scope = s + } + } + service = strings.Split(service, "\"")[1] + scope = strings.Split(scope, "\"")[1] + token, err := GenTokenForUI(username, service, scope) + if err != nil { + return nil, err + } + request, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + request.Header.Add("Authorization", "Bearer "+token) + client := &http.Client{} + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // log.Printf("via length: %d\n", len(via)) + if len(via) >= 10 { + return fmt.Errorf("too many redirects") + } + for k, v := range via[0].Header { + if _, ok := req.Header[k]; !ok { + req.Header[k] = v + } + } + return nil + } + response, err = client.Do(request) + if err != nil { + return nil, err + } + if response.StatusCode != 200 { + errMsg := fmt.Sprintf("Unexpected return code from registry: %d", response.StatusCode) + log.Printf(errMsg) + return nil, fmt.Errorf(errMsg) + } + result, err = ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + defer response.Body.Close() + return result, nil + } else { + return nil, errors.New(string(result)) + } +} diff --git a/static/i18n/locale_en-US.ini b/static/i18n/locale_en-US.ini new file mode 100644 index 000000000..ee0b06600 --- /dev/null +++ b/static/i18n/locale_en-US.ini @@ -0,0 +1,84 @@ +page_title_index = Harbor +page_title_sign_in = Sign In - Harbor +page_title_project = Project - Harbor +page_title_item_details = Item Details - Harbor +page_title_registration = Registration - Harbor +page_title_forgot_password = Forgot Password - Harbor +title_forgot_password = Forgot Password +page_title_reset_password = Reset Password - Harbor +title_reset_password = Reset Password +page_title_change_password = Change Password - Harbor +title_change_password = Change Password +page_title_search = Search - Harbor +sign_in = Sign In +sign_up = Sign Up +log_out = Log Out +search_placeholder = Search for projects or repositories +change_password = Change Password +username_email = Username/Email +password = Password +forgot_password = Forgot Password +welcome = Welcome +my_projects = My Projects +public_projects = Public Projects +admin_options = Admin Options +project_name = Project Name +creation_time = Creation Time +publicity = Publicity +add_project = Add Project +check_for_publicity = Check this checkbox and the project will be public. +button_save = Save +button_close = Close +button_submit = Submit +username = Username +email = Email +system_admin = System Admin +dlg_button_ok = OK +dlg_button_cancel = Cancel +registration = Registration +username_description = This will be your username. +email_description = You will occasionally receive account related emails. We promise not to share your email with anyone. +full_name = Full Name +full_name_description = First name & Last name. +password_description = Use more than seven characters with at least one lowercase letter, one capital letter and one numeral. +confirm_password = Confirm Password +note_to_the_admin = Notes to the Administrator +old_password = Old Password +new_password = New Password +forgot_password_description = Please input the email for your account, and a link to reset password will be sent. + +projects = Projects +repositories = Repositories +search = Search +home = Home +project = Project +owner = Owner +repo = Repositories +user = Users +logs = Logs +repo_name = Repository Name +add_members = Add Members +role_info = Role Info +operation = Operation +advance = Advance +all = All +others = Others +start_date = Start Date +end_date = End Date +timestamp = Timestamp +role = Role +reset_email_hint = Please use this link to reset your password +reset_email_subject = Reset your password +language = English +language_en-US = English +language_zh-CN = 中文 +copyright = Copyright +all_rights_reserved = All rights reserved. +container_registry_management = Container Registry Management +index_desc = Project Harbor is to build an enterprise-class, reliable registry server. Enterprises can set up a private registry server in their own environment to improve productivity as well as security. Project Harbor can be used in both development and production environment. +index_desc_0 = Key benefits of Project Harbor: +index_desc_1 = 1. Security: Keep their intellectual properties within their organizations. +index_desc_2 = 2. Efficiency: A private registry server is set up within the organization's network and can reduce significantly the internet traffic to the public service. +index_desc_3 = 3. Access Control: RBAC(Role Based Access Control) is provided. User management can be integrated with existing enterprise identity services like AD/LDAP. +index_desc_4 = 4. Audit: All access to the registry are logged and can be used for audit purpose. + diff --git a/static/i18n/locale_messages.js b/static/i18n/locale_messages.js new file mode 100644 index 000000000..ab8603491 --- /dev/null +++ b/static/i18n/locale_messages.js @@ -0,0 +1,264 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +var global_messages = { + "username_is_required" : { + "en-US": "Username is required!", + "zh-CN": "用户名为必填项。" + }, + "username_has_been_taken" : { + "en-US": "Username has been taken!", + "zh-CN": "用户名已被占用!" + }, + "username_is_too_long" : { + "en-US": "Username is too long (maximum is 20 characters)", + "zh-CN": "用户名内容长度超出字符数限制。(最长为20个字符)" + }, + "username_contains_illegal_chars": { + "en-US": "Username contains illegal characters.", + "zh-CN": "用户名包含不合法的字符。" + }, + "email_is_required" : { + "en-US": "Email is required!", + "zh-CN": "邮箱为必填项。" + }, + "email_contains_illegal_chars" : { + "en-US": "Email contains illegal characters.", + "zh-CN": "邮箱包含不合法的字符。" + }, + "email_has_been_taken" : { + "en-US": "Email has been taken!", + "zh-CN": "邮箱已被占用!" + }, + "email_content_illegal" : { + "en-US": "Email content is illegal.", + "zh-CN": "邮箱格式不合法!" + }, + "email_does_not_exist" : { + "en-US": "Email does not exist!", + "zh-CN": "邮箱不存在。" + }, + "realname_is_required" : { + "en-US": "Realname is required!", + "zh-CN": "全名为必填项。" + }, + "realname_is_too_long" : { + "en-US": "Realname is too long (maximum is 20 characters)", + "zh-CN": "全名内容长度超出字符数限制。(最长为20个字符)" + }, + "realname_contains_illegal_chars" : { + "en-US": "Realname contains illegal characters.", + "zh-CN": "全名包含不合法的字符。" + }, + "password_is_required" : { + "en-US": "Password is required!", + "zh-CN": "密码为必填项。" + }, + "password_is_invalid" : { + "en-US": "Password is invalid. Use more than seven characters with at least one lowercase letter, one capital letter and one numeral.", + "zh-CN": "密码无效。至少输入 7个字符且包含 1个小写字母,1个大写字母和数字。" + }, + "password_is_too_long" : { + "en-US": "Password is too long (maximum is 20 characters)", + "zh-CN": "密码内容长度超出字符数限制。(最长为20个字符)" + }, + "password_does_not_match" : { + "en-US": "Password does not match the confirmation.", + "zh-CN": "两次密码输入内容不一致。" + }, + "comment_is_too_long" : { + "en-US": "Comment is too long (maximum is 20 characters)", + "zh-CN": "留言内容长度超过字符数限制。(最长为20个字符)" + }, + "comment_contains_illegal_chars" : { + "en-US": "Comment contains illegal characters.", + "zh-CN": "留言内容包含不合法的字符。" + }, + "project_name_is_required" : { + "en-US": "Project name is required!", + "zh-CN": "项目名称为必填项。" + }, + "project_name_is_too_short" : { + "en-US": "Project name is too short (minimum is 4 characters)", + "zh-CN": "项目名称内容过于简短。(最少要求4个字符)" + }, + "project_name_is_too_long" : { + "en-US": "Project name is too long (maximum is 30 characters)", + "zh-CN": "项目名称内容长度超出字符数限制。(最长为30个字符)" + }, + "project_name_contains_illegal_chars" : { + "en-US": "Project name contains illegal characters.", + "zh-CN": "项目名称内容包含不合法的字符。" + }, + "project_exists" : { + "en-US": "Project exists.", + "zh-CN": "项目已存在。" + }, + "delete_user" : { + "en-US": "Delete User", + "zh-CN": "删除用户" + }, + "are_you_sure_to_delete_user" : { + "en-US": "Are you sure to delete ", + "zh-CN": "确认要删除用户 " + }, + "input_your_username_and_password" : { + "en-US": "Please input your username and password.", + "zh-CN": "请输入用户名和密码。" + }, + "check_your_username_or_password" : { + "en-US": "Please check your username or password!", + "zh-CN": "请输入正确的用户名或密码。" + }, + "title_login_failed" : { + "en-US": "Login Failed", + "zh-CN": "登录失败" + }, + "title_change_password" : { + "en-US": "Change Password", + "zh-CN": "修改密码" + }, + "change_password_successfully" : { + "en-US": "Changed password successfully.", + "zh-CN": "修改密码操作成功。" + }, + "title_forgot_password" : { + "en-US": "Forgot Password", + "zh-CN": "忘记密码" + }, + "email_has_been_sent" : { + "en-US": "Email has been sent.", + "zh-CN": "重置密码邮件已发送。" + }, + "send_email_failed" : { + "en-US": "Send email failed.", + "zh-CN": "邮件发送失败。" + }, + "please_login_first" : { + "en-US": "Please login first!", + "zh-CN": "请先登录。" + }, + "old_password_is_not_correct" : { + "en-US": "Old password input is not correct.", + "zh-CN": "原密码输入不正确。" + }, + "please_input_new_password" : { + "en-US": "Please input new password.", + "zh-CN": "请输入新密码。" + }, + "invalid_reset_url": { + "en-US": "Invalid reset url", + "zh-CN": "无效的重置链接" + }, + "reset_password_successfully" : { + "en-US": "Reset password successfully.", + "zh-CN": "密码重置成功。" + }, + "internal_error": { + "en-US": "Internal error, please contact sysadmin.", + "zh-CN": "内部错误,请联系系统管理员。" + }, + "title_reset_password" : { + "en-US": "Reset Password", + "zh-CN": "重置密码" + }, + "title_sign_up" : { + "en-US": "Sign Up", + "zh-CN": "注册" + }, + "registered_successfully": { + "en-US": "Registered successfully.", + "zh-CN": "注册成功。" + }, + "registered_failed" : { + "en-US": "Registered failed.", + "zh-CN": "注册失败。" + }, + "projects" : { + "en-US": "Projects", + "zh-CN": "项目" + }, + "repositories" : { + "en-US": "Repositories", + "zh-CN": "镜像资源" + }, + "no_repo_exists" :{ + "en-US": "No repositories exist.", + "zh-CN": "没有镜像资源。" + }, + "tag" : { + "en-US": "Tag", + "zh-CN": "标签" + }, + "pull_command": { + "en-US": "Pull Command", + "zh-CN": "Pull 命令" + }, + "image_details" : { + "en-US": "Image Details", + "zh-CN": "镜像详细信息" + }, + "add_members" : { + "en-US": "Add Members", + "zh-CN": "添加成员" + }, + "edit_members" : { + "en-US": "Edit Members", + "zh-CN": "编辑成员" + }, + "add_member_failed" : { + "en-US": "Add Member Failed", + "zh-CN": "添加成员失败" + }, + "please_input_username" : { + "en-US": "Please input a username.", + "zh-CN": "请输入用户名。" + }, + "please_assign_a_role_to_user" : { + "en-US": "Please assign a role to the user.", + "zh-CN": "请为用户分配角色。" + }, + "user_id_exists" : { + "en-US": "User ID exists.", + "zh-CN": "用户ID已存在。" + }, + "user_id_does_not_exist" : { + "en-US": "User ID does not exist.", + "zh-CN": "不存在此用户ID。" + }, + "insuffient_authority" : { + "en-US": "Insufficient authority.", + "zh-CN": "权限不足。" + }, + "operation_failed" : { + "en-US": "Operation Failed", + "zh-CN": "操作失败" + }, + "network_error" : { + "en-US": "Network Error", + "zh-CN": "网络故障" + }, + "network_error_description" : { + "en-US": "Network error, please contact sysadmin.", + "zh-CN": "网络故障, 请联系系统管理员。" + }, + "button_on" : { + "en-US": "On", + "zh-CN": "打开" + }, + "button_off" : { + "en-US": "Off", + "zh-CN": "关闭" + } +}; \ No newline at end of file diff --git a/static/i18n/locale_zh-CN.ini b/static/i18n/locale_zh-CN.ini new file mode 100644 index 000000000..3c154c1d6 --- /dev/null +++ b/static/i18n/locale_zh-CN.ini @@ -0,0 +1,82 @@ +page_title_index = Harbor +page_title_sign_in = 登录 - Harbor +page_title_project = 项目 - Harbor +page_title_item_details = 详细信息 - Harbor +page_title_registration = 注册 - Harbor +page_title_forgot_password = 忘记密码 - Harbor +title_forgot_password = 忘记密码 +page_title_reset_password = 重置密码 - Harbor +title_reset_password = 重置密码 +page_title_change_password = 修改密码 - Harbor +title_change_password = 修改密码 +page_title_search = 搜索 - Harbor +sign_in = 登录 +sign_up = 注册 +log_out = 注销 +search_placeholder = 搜索项目或镜像资源 +change_password = 修改密码 +username_email = 用户名/邮箱 +password = 密码 +forgot_password = 忘记密码 +welcome = 欢迎 +my_projects = 我的项目 +public_projects = 公开项目 +admin_options = 管理员选项 +project_name = 项目名称 +creation_time = 创建时间 +publicity = 公开 +add_project = 新增项目 +check_for_publicity = 选中此项会使该项目对其他人公开。 +button_save = 保存 +button_close = 关闭 +button_submit = 提交 +username = 用户名 +email = 邮箱 +system_admin = 系统管理员 +dlg_button_ok = 确定 +dlg_button_cancel = 取消 +registration = 注册 +username_description = 在此填入登录时的用户名 +email_description = 您将定期通过邮件收到相关信息,我们保证不会将此邮箱告知他人。 +full_name = 全名 +full_name_description = 姓氏和名称 +password_description = 至少输入7个字符且包含1个小写字母,1个大写字母和数字。 +confirm_password = 确认密码 +note_to_the_admin = 留言 +old_password = 原密码 +new_password = 新密码 +forgot_password_description = 输入您注册时使用的邮箱,我们将发送重置邮件链接到此邮箱。 +projects = 项目 +repositories = 镜像资源 +search = 搜索 +home = 首页 +project = 项目 +owner = 所有者 +repo = 镜像 +user = 用户 +logs = 日志 +repo_name = 镜像名称 +add_members = 添加成员 +role_info = 角色信息 +operation = 操作 +advance = 高级检索 +all = 全部 +others = 其他 +start_date = 开始日期 +end_date = 结束日期 +timestamp = 时间戳 +role = 角色 +reset_email_hint = 请点击下面的链接进行重置密码操作 +reset_email_subject = 重置您的密码 +language = 中文 +language_en-US = English +language_zh-CN = 中文 +copyright = 版权所有 +all_rights_reserved = 保留所有权利。 +container_registry_management = 容器注册管理 +index_desc = Harbor是用于构建企业级、可信赖的容器注册管理系统。企业用户通过搭建Harbor,创建私有容器注册服务,提高生产力和安全管控。Harbor可同时应用于生产或开发环境。 +index_desc_0 = 使用Harbor的关键优势在于: +index_desc_1 = 1. 安全: 确保知识产权在自己组织的管控之下。 +index_desc_2 = 2. 效率: 搭建于组织内部网络的私有容器注册系统可明显降低访问公共容器注册服务的带宽损耗。 +index_desc_3 = 3. 访问控制: 提供基于角色的访问控制,可集成企业目前拥有的用户管理系统(如:AD/LDAP)。 +index_desc_4 = 4. 审计: 所有访问注册服务的行为均被记录且可被用于日后审计。 diff --git a/static/resources/css/base.css b/static/resources/css/base.css new file mode 100644 index 000000000..0f8b9bd14 --- /dev/null +++ b/static/resources/css/base.css @@ -0,0 +1,39 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +.footer { + margin-top: 60px; + width: 100%; + /* Set the fixed height of the footer here */ + height: 60px; +} +.div-height { + overflow-y: auto; +} +.blue { + color: #428bca; +} +sub { + font-size: 6pt; + color: blue; +} + +.table-responsive { + height: 430px; + overflow-y: auto; +} + +table > tbody > tr > td { + vertical-align: bottom; +} \ No newline at end of file diff --git a/static/resources/css/sign-in.css b/static/resources/css/sign-in.css new file mode 100644 index 000000000..2bea229cf --- /dev/null +++ b/static/resources/css/sign-in.css @@ -0,0 +1,27 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +body { + background-color: #fff; +} + +.form-signin { + padding-top: 40px; + padding-bottom: 40px; + max-width: 380px; + margin: 10% auto; +} + + + diff --git a/static/resources/js/change-password.js b/static/resources/js/change-password.js new file mode 100644 index 000000000..a09e298b3 --- /dev/null +++ b/static/resources/js/change-password.js @@ -0,0 +1,98 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + new AjaxUtil({ + url: "/api/users/current", + type: "get", + success: function(data, status, xhr){}, + errors: { + 403: "" + }, + error: function(jqXhr){ + if(jqXhr && jqXhr.status == 401){ + document.location = "/signIn"; + } + } + }).exec(); + + $("#divErrMsg").css({"display": "none"}); + + $("#OldPassword,#Password,#ConfirmedPassword").on("blur", validateCallback); + validateOptions.Items = ["#OldPassword", "#Password", "#ConfirmedPassword"]; + + function bindEnterKey(){ + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#txtCommonSearch").is(":focus")){ + document.location = "/search?q=" + $("#txtCommonSearch").val(); + }else{ + $("#btnSubmit").trigger("click"); + } + } + }); + } + function unbindEnterKey(){ + $(document).off("keydown"); + } + bindEnterKey(); + + var spinner = new Spinner({scale:1}).spin(); + + $("#btnSubmit").on("click", function(){ + validateOptions.Validate(function(){ + var oldPassword = $("#OldPassword").val(); + var password = $("#Password").val(); + $.ajax({ + "url": "/updatePassword", + "type": "post", + "data": {"old_password": oldPassword, "password" : password}, + "beforeSend": function(e){ + unbindEnterKey(); + $("h1").append(spinner.el); + $("#btnSubmit").prop("disabled", true); + }, + "success": function(data, status, xhr){ + if(xhr && xhr.status == 200){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_change_password"), + "content": i18n.getMessage("change_password_successfully"), + "callback": function(){ + window.close(); + } + }); + } + }, + "error": function(jqXhr, status, error){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_change_password"), + "content": i18n.getMessage(jqXhr.responseText), + "callback": function(){ + bindEnterKey(); + return; + } + }); + }, + "complete": function(){ + spinner.stop(); + $("#btnSubmit").prop("disabled", false); + } + }); + }); + }); +}); \ No newline at end of file diff --git a/static/resources/js/common.js b/static/resources/js/common.js new file mode 100644 index 000000000..850d1b9ab --- /dev/null +++ b/static/resources/js/common.js @@ -0,0 +1,160 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +var AjaxUtil = function(params){ + + this.url = params.url; + this.data = params.data; + this.dataRaw = params.dataRaw; + this.type = params.type; + this.errors = params.errors || {}; + + this.success = params.success; + this.complete = params.complete; + this.error = params.error; + +}; + +AjaxUtil.prototype.exec = function(){ + + var self = this; + + return $.ajax({ + url: self.url, + contentType: (self.dataRaw ? "application/x-www-form-urlencoded; charset=UTF-8" : "application/json; charset=utf-8"), + data: JSON.stringify(self.data) || self.dataRaw, + type: self.type, + dataType: "json", + success: function(data, status, xhr){ + if(self.success != null){ + self.success(data, status, xhr); + } + }, + complete: function(jqXhr, status) { + if(self.complete != null){ + self.complete(jqXhr, status); + } + }, + error: function(jqXhr){ + if(self.error != null){ + self.error(jqXhr); + }else{ + var errorMessage = self.errors[jqXhr.status] || jqXhr.responseText; + if(jqXhr.status == 401){ + var lastUri = location.pathname + location.search; + if(lastUri != ""){ + document.location = "/signIn?uri=" + encodeURIComponent(lastUri); + }else{ + document.location = "/signIn"; + } + }else if($.trim(errorMessage).length > 0){ + $("#dlgModal").dialogModal({"title": i18n.getMessage("operation_failed"), "content": errorMessage}); + } + } + } + }); +}; + +var SUPPORT_LANGUAGES = { + "en-US": "English", + "zh-CN": "Chinese" +}; + +var DEFAULT_LANGUAGE = "en-US"; + +var I18n = function(messages) { + this.messages = messages; +}; + +I18n.prototype.isSupportLanguage = function(lang){ + return (lang in SUPPORT_LANGUAGES); +} + +I18n.prototype.getLocale = function(){ + var lang = $("#currentLanguage").val(); + if(this.isSupportLanguage(lang)){ + return lang; + }else{ + return DEFAULT_LANGUAGE; + } +}; + +I18n.prototype.getMessage = function(key){ + return this.messages[key][this.getLocale()]; +}; + +var i18n = new I18n(global_messages); + +moment().locale(i18n.getLocale()); + +jQuery(function(){ + + $("#aLogout").on("click", function(){ + new AjaxUtil({ + url:'/logout', + dataRaw:{"timestamp" : new Date().getTime()}, + type: "get", + complete: function(jqXhr){ + if(jqXhr && jqXhr.status == 200){ + document.location = "/"; + } + } + }).exec(); + }); + + $.fn.dialogModal = function(options){ + var settings = $.extend({ + title: '', + content: '', + text: false, + callback: null, + enableCancel: false, + }, options || {}); + + if(settings.enableCancel){ + $("#dlgCancel").show(); + $("#dlgCancel").on("click", function(){ + $(self).modal('close'); + }); + } + + var self = this; + $("#dlgLabel", self).text(settings.title); + + if(options.text){ + $("#dlgBody", self).html(settings.content); + }else if(typeof settings.content == "object"){ + $(".modal-dialog", self).addClass("modal-lg"); + var lines = ['
']; + for(var item in settings.content){ + lines.push('
'+ + '' + + '

' + settings.content[item] + '

' + + '
'); + } + lines.push('
'); + $("#dlgBody", self).html(lines.join("")); + }else{ + $(".modal-dialog", self).removeClass("modal-lg"); + $("#dlgBody", self).text(settings.content); + } + + if(settings.callback != null){ + $("#dlgConfirm").on("click", function(){ + settings.callback(); + }); + } + $(self).modal('show'); + } +}); \ No newline at end of file diff --git a/static/resources/js/forgot-password.js b/static/resources/js/forgot-password.js new file mode 100644 index 000000000..b7c4be9e6 --- /dev/null +++ b/static/resources/js/forgot-password.js @@ -0,0 +1,84 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + $("#divErrMsg").css({"display": "none"}); + + validateOptions.Items = ["#EmailF"]; + function bindEnterKey(){ + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#txtCommonSearch").is(":focus")){ + document.location = "/search?q=" + $("#txtCommonSearch").val(); + }else{ + $("#btnSubmit").trigger("click"); + } + } + }); + } + function unbindEnterKey(){ + $(document).off("keydown"); + } + bindEnterKey(); + var spinner = new Spinner({scale:1}).spin(); + + $("#btnSubmit").on("click", function(){ + validateOptions.Validate(function(){ + var username = $("#UsernameF").val(); + var email = $("#EmailF").val(); + $.ajax({ + "url":"/sendEmail", + "type": "get", + "data": {"username": username, "email": email}, + "beforeSend": function(e){ + unbindEnterKey(); + $("h1").append(spinner.el); + $("#btnSubmit").prop("disabled", true); + }, + "success": function(data, status, xhr){ + if(xhr && xhr.status == 200){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_forgot_password"), + "content": i18n.getMessage("email_has_been_sent"), + "callback": function(){ + document.location="/"; + } + }); + } + + }, + "complete": function(){ + spinner.stop(); + $("#btnSubmit").prop("disabled", false); + }, + "error": function(jqXhr, status, error){ + if(jqXhr){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_forgot_password"), + "content": i18n.getMessage(jqXhr.responseText), + "callback": function(){ + bindEnterKey(); + return; + } + }); + } + } + }); + }); + }); +}); \ No newline at end of file diff --git a/static/resources/js/header-login.js b/static/resources/js/header-login.js new file mode 100644 index 000000000..8e1a6e35e --- /dev/null +++ b/static/resources/js/header-login.js @@ -0,0 +1,32 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + $("#btnSignUp").css({"visibility": "visible"}); + + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#txtCommonSearch").is(":focus")){ + document.location = "/search?q=" + $("#txtCommonSearch").val(); + } + } + }); + $("#btnSignIn").on("click", function(){ + document.location = "/signIn"; + }); + $("#btnSignUp").on("click", function(){ + document.location = "/register"; + }); +}); \ No newline at end of file diff --git a/static/resources/js/item-detail.js b/static/resources/js/item-detail.js new file mode 100644 index 000000000..53591b6ce --- /dev/null +++ b/static/resources/js/item-detail.js @@ -0,0 +1,452 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + $.when( + new AjaxUtil({ + url: "/api/users/current", + type: "get", + error: function(jqXhr){ + if(jqXhr){ + if(jqXhr.status == 403){ + return false; + } + } + } + }).exec() + ).then(function(){ + noNeedToLoginCallback(); + needToLoginCallback(); + }).fail(function(){ + noNeedToLoginCallback(); + }); + + function noNeedToLoginCallback(){ + + $("#tabItemDetail a:first").tab("show"); + $("#btnFilterOption button:first").addClass("active"); + $("#divErrMsg").hide(); + + if($("#public").val() == 1){ + $("#tabItemDetail li:eq(1)").hide(); + $("#tabItemDetail li:eq(2)").hide(); + } + + listRepo($("#repoName").val()); + + function listRepo(repoName){ + + $("#divErrMsg").hide(); + + new AjaxUtil({ + url: "/api/repositories?project_id=" + $("#projectId").val() + "&q=" + repoName, + type: "get", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + $("#accordionRepo").children().remove(); + if(data == null){ + $("#divErrMsg").show(); + $("#divErrMsg center").html(i18n.getMessage("no_repo_exists")); + return; + } + $.each(data, function(i, e){ + var targetId = e.replace(/\//g, "------"); + var row = '
' + + '' + + '
' + + '
' + + '
' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '' + + '
' + i18n.getMessage("tag")+ ' ' + i18n.getMessage("pull_command") + '
' + '
' + + '
' + + '
' + + '
'; + $("#accordionRepo").append(row); + }); + if(repoName != ""){ + $("#txtRepoName").val(repoName); + $("#accordionRepo #heading0 a").trigger("click"); + } + } + } + }).exec(); + } + $("#btnSearchRepo").on("click", function(){ + listRepo($.trim($("#txtRepoName").val())); + }); + + $('#accordionRepo').on('show.bs.collapse', function (e) { + $('#accordionRepo .in').collapse('hide'); + var targetId = $(e.target).attr("targetId"); + var repoName = targetId.replace(/------/g, "/"); + new AjaxUtil({ + url: "/api/repositories/tags?repo_name=" + repoName, + type: "get", + success: function(data, status, xhr){ + $('#' + targetId +' table tbody tr').remove(); + var row = []; + for(var i in data){ + var tagName = data[i] + row.push('' + tagName + ''); + } + $('#' + targetId +' table tbody').append(row.join("")); + $('#' + targetId +' table tbody tr a').on("click", function(e){ + var imageId = $(this).attr("imageId"); + var repoName = $(this).attr("repoName"); + new AjaxUtil({ + url: "/api/repositories/manifests?repo_name=" + repoName + "&tag=" + imageId, + type: "get", + success: function(data, status, xhr){ + if(data){ + for(var i in data){ + if(data[i] == ""){ + data[i] = "N/A"; + } + } + data.Created = data.CreatedStr; + delete data.CreatedStr; + + $("#dlgModal").dialogModal({"title": i18n.getMessage("image_details"), "content": data}); + } + } + }).exec(); + }); + } + }).exec(); + }); + } + + function needToLoginCallback(){ + + var hasAuthorization = false; + + $.when( + new AjaxUtil({ + url: "/api/projects/" + $("#projectId").val() + "/members/current", + type: "get", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200 && data.roles.length > 0){ + hasAuthorization = true; + } + } + }).exec()) + .done(function(){ + + if(!hasAuthorization) return false; + + $("#tabItemDetail a:eq(1)").css({"visibility": "visible"}); + $("#tabItemDetail a:eq(2)").css({"visibility": "visible"}); + + $(".glyphicon .glyphicon-pencil", "#tblUser").on("click", function(e){ + $("#txtUserName").hide(); + $("#lblUserName").show(); + $("#dlgUserTitle").text(i18n.getMessage("edit_members")); + }); + + $("#btnAddUser").on("click", function(){ + $("#operationType").val("add"); + $("#spnSearch").show(); + $("#txtUserName").prop("disabled", false) + $("#txtUserName").val(""); + $("#lstRole input[name=chooseRole]:radio").prop("checked", false); + $("#dlgUserTitle").text(i18n.getMessage("add_members")); + }); + + $("#btnSave").on("click", function(){ + + var username = $("#txtUserName").val(); + if($.trim(username).length == 0){ + $("#dlgModal").dialogModal({"title": i18n.getMessage("add_member_failed"), "content": i18n.getMessage("please_input_username")}); + return; + } + var projectId = $("#projectId").val(); + var operationType = $("#operationType").val(); + var userId = $("#editUserId").val(); + + var checkedRole = $("#lstRole input[name='chooseRole']:checked") + if(checkedRole.length == 0){ + $("#dlgModal").dialogModal({"title": i18n.getMessage("add_member_failed"), "content": i18n.getMessage("please_assign_a_role_to_user")}); + return; + } + + var checkedRoleItemList = []; + $.each(checkedRole, function(i, e){ + checkedRoleItemList.push(new Number($(this).val())); + }); + + var ajaxOpts = {}; + if(operationType == "add"){ + ajaxOpts.url = "/api/projects/" + projectId + "/members/"; + ajaxOpts.type = "post"; + ajaxOpts.data = {"roles" : checkedRoleItemList, "user_name": username}; + }else if(operationType == "edit"){ + ajaxOpts.url = "/api/projects/" + projectId + "/members/" + userId; + ajaxOpts.type = "put"; + ajaxOpts.data = {"roles" : checkedRoleItemList}; + } + + new AjaxUtil({ + url: ajaxOpts.url, + data: ajaxOpts.data, + type: ajaxOpts.type, + complete: function(jqXhr, status){ + if(jqXhr && jqXhr.status == 200){ + $("#btnClose").trigger("click"); + listUser(null); + } + }, + errors: { + 404: i18n.getMessage("user_id_does_not_exist"), + 409: i18n.getMessage("user_id_exists"), + 403: i18n.getMessage("insuffient_authority") + } + }).exec(); + }); + + var name_mapping = { + "projectAdmin": "Project Admin", + "developer": "Developer", + "guest": "Guest" + } + + function listUserByProjectCallback(userList){ + var loginedUserId = $("#userId").val(); + var loginedUserRoleId = $("#roleId").val(); + var ownerId = $("#ownerId").val(); + + $("#tblUser tbody tr").remove(); + for(var i = 0; i < userList.length; ){ + + var userId = userList[i].UserId; + var roleId = userList[i].RoleId; + var username = userList[i].Username; + var roleNameList = []; + + for(var j = i; j < userList.length; i++, j++){ + if(userList[j].UserId == userId){ + roleNameList.push(name_mapping[userList[j].Rolename]); + }else{ + break; + } + } + + var row = '' + + '' + username + '' + + '' + roleNameList.join(",") + '' + + ''; + var isShowOperations = true; + if(loginedUserRoleId >= 3 /*role: developer guest*/){ + isShowOperations = false; + }else if(ownerId == userId){ + isShowOperations = false; + }else if (loginedUserId == userId){ + isShowOperations = false; + } + if(isShowOperations){ + row += ' ' + + ''; + } + + row += ''; + $("#tblUser tbody").append(row); + + } + } + + function searchAccessLogCallback(LogList){ + $("#tabOperationLog tbody tr").remove(); + $.each(LogList || [], function(i, e){ + $("#tabOperationLog tbody").append( + '' + + '' + e.Username + '' + + '' + e.RepoName + '' + + '' + e.Operation + '' + + '' + moment(new Date(e.OpTime)).format("YYYY-MM-DD HH:mm:ss") + '' + + ''); + }); + } + + function getUserRoleCallback(userId){ + new AjaxUtil({ + url: "/api/projects/" + $("#projectId").val() + "/members/" + userId, + type: "get", + success: function(data, status, xhr){ + var user = data; + $("#operationType").val("edit"); + $("#editUserId").val(user.user_id); + $("#spnSearch").hide(); + $("#txtUserName").val(user.user_name); + $("#txtUserName").prop("disabled", true); + $("#btnSave").removeClass("disabled"); + $("#dlgUserTitle").text(i18n.getMessage("edit_members")); + $("#lstRole input[name=chooseRole]:radio").not('[value=' + user.role_id + ']').prop("checked", false) + $.each(user.roles, function(i, e){ + $("#lstRole input[name=chooseRole]:radio").filter('[value=' + e.role_id + ']').prop("checked", "checked"); + }); + } + }).exec(); + } + function listUser(username){ + $.when( + new AjaxUtil({ + url: "/api/projects/" + $("#projectId").val() + "/members?username=" + (username == null ? "" : username), + type: "get", + errors: { + 403: "" + }, + success: function(data, status, xhr){ + return data || []; + } + }).exec() + ).done(function(userList){ + listUserByProjectCallback(userList || []); + $("#tblUser .glyphicon-pencil").on("click", function(e){ + var userId = $(this).attr("userid") + getUserRoleCallback(userId); + }); + $("#tblUser .glyphicon-trash").on("click", function(){ + var roleId = $(this).attr("roleid"); + var userId = $(this).attr("userid"); + new AjaxUtil({ + url: "/api/projects/" + $("#projectId").val() + "/members/" + userId, + type: "delete", + complete: function(jqXhr, status){ + if(jqXhr && jqXhr.status == 200){ + listUser(null); + } + } + }).exec(); + }); + }); + } + listUser(null); + listOperationLogs(); + + function listOperationLogs(){ + var projectId = $("#projectId").val(); + + $.when( + new AjaxUtil({ + url : "/api/projects/" + projectId + "/logs", + dataRaw: {"timestamp": new Date().getTime()}, + type: "get", + success: function(data){ + return data || []; + } + }).exec() + ).done(function(operationLogs){ + searchAccessLogCallback(operationLogs); + }); + } + + $("#btnSearchUser").on("click", function(){ + var username = $("#txtSearchUser").val(); + if($.trim(username).length == 0){ + username = null; + } + listUser(username); + }); + + $("#btnFilterLog").on("click", function(){ + + var projectId = $("#projectId").val(); + var username = $("#txtSearchUserName").val(); + + var beginTime = ""; + var endTime = ""; + + if($("#begindatepicker").val() != ""){ + beginTime = moment(new Date($("#begindatepicker").val())).format("YYYY-MM-DD"); + } + if($("#enddatepicker").val() != ""){ + endTime = moment(new Date($("#enddatepicker").val())).format("YYYY-MM-DD"); + } + + new AjaxUtil({ + url: "/api/projects/" + projectId + "/logs/filter", + data:{"username":username, "project_id" : projectId, "keywords" : getKeyWords() , "beginTime" : beginTime, "endTime" : endTime}, + type: "post", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + searchAccessLogCallback(data); + } + } + }).exec(); + }); + + $("#spnFilterOption input[name=chkAll]").on("click", function(){ + $("#spnFilterOption input[name=chkOperation]").prop("checked", $(this).prop("checked")); + }); + + function getKeyWords(){ + var keywords = ""; + var checkedItemList=$("#spnFilterOption input[name=chkOperation]:checked"); + var keywords = []; + $.each(checkedItemList, function(i, e){ + var itemValue = $(e).val(); + if(itemValue == "others" && $.trim($("#txtOthers").val()).length > 0){ + keywords.push($("#txtOthers").val()); + }else{ + keywords.push($(e).val()); + } + }); + return keywords.join("/"); + } + + $('#datetimepicker1').datetimepicker({ + locale: i18n.getLocale(), + ignoreReadonly: true, + format: 'L', + showClear: true + }); + $('#datetimepicker2').datetimepicker({ + locale: i18n.getLocale(), + ignoreReadonly: true, + format: 'L', + showClear: true + }); + }); + } + + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#tabItemDetail li:eq(0)").is(":focus") || $("#txtRepoName").is(":focus")){ + $("#btnSearchRepo").trigger("click"); + }else if($("#tabItemDetail li:eq(1)").is(":focus") || $("#txtSearchUser").is(":focus")){ + $("#btnSearchUser").trigger("click"); + }else if($("#tabItemDetail li:eq(2)").is(":focus") || $("#txtSearchUserName").is(":focus")){ + $("#btnFilterLog").trigger("click"); + }else if($("#txtUserName").is(":focus") || $("#lstRole :radio").is(":focus")){ + $("#btnSave").trigger("click"); + } + } + }); +}) diff --git a/static/resources/js/login.js b/static/resources/js/login.js new file mode 100644 index 000000000..9b0bfda2b --- /dev/null +++ b/static/resources/js/login.js @@ -0,0 +1,31 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + new AjaxUtil({ + url: "/api/users/current", + type: "get", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + document.location = "/registry/project"; + } + }, + error: function(jqXhr){ + if(jqXhr.status == 401) + return false; + } + }).exec(); + +}); \ No newline at end of file diff --git a/static/resources/js/project.js b/static/resources/js/project.js new file mode 100644 index 000000000..527dd950b --- /dev/null +++ b/static/resources/js/project.js @@ -0,0 +1,234 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + new AjaxUtil({ + url: "/api/users/current", + type: "get", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + if(data.HasAdminRole == 1) { + renderForAdminRole(); + } + renderForAnyRole(); + } + } + }).exec(); + + function renderForAnyRole(){ + $("#tabProject a:first").tab("show"); + + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#tabProject li:eq(0)").is(":focus") || $("#txtSearchProject").is(":focus")){ + $("#btnSearch").trigger("click"); + }else if($("#tabProject li:eq(1)").is(":focus") || $("#txtSearchPublicProjects").is(":focus")){ + $("#btnSearchPublicProjects").trigger("click"); + }else if($("#tabProject li:eq(1)").is(":focus") || $("#txtSearchUsername").is(":focus")){ + $("#btnSearchUsername").trigger("click"); + }else if($("#dlgAddProject").is(":focus") || $("#projectName").is(":focus")){ + $("#btnSave").trigger("click"); + } + } + }); + + function listProject(projectName, isPublic){ + currentPublic = isPublic; + $.when( + new AjaxUtil({ + url: "/api/projects?is_public=" + isPublic + "&project_name=" + (projectName == null ? "" : projectName) + "×tamp=" + new Date().getTime(), + type: "get", + success: function(data, status, xhr){ + $("#tblProject tbody tr").remove(); + $.each(data || [], function(i, e){ + var row = '' + + '' + e.Name + '' + + '' + moment(new Date(e.CreationTime)).format("YYYY-MM-DD HH:mm:ss") + ''; + if(e.Public == 1 && e.Togglable){ + row += '' + } else if (e.Public == 1) { + row += ''; + } else if (e.Public == 0 && e.Togglable) { + row += ''; + } else if (e.Public == 0) { + row += ''; + row += ''; + } + $("#tblProject tbody").append(row); + }); + } + }).exec()) + .done(function() { + $("#tblProject tbody tr :button").on("click", function(){ + var projectId = $(this).attr("projectid"); + var self = this; + new AjaxUtil({ + url: "/api/projects/" + projectId, + data: {"public": ($(self).hasClass("btn-success") ? false : true)}, + type: "put", + complete: function(jqXhr, status) { + if($(self).hasClass("btn-success")){ + $(self).removeClass("btn-success").addClass("btn-danger"); + $(self).html(i18n.getMessage("button_off")); + }else{ + $(self).removeClass("btn-danger").addClass("btn-success"); + $(self).html(i18n.getMessage("button_on")); + } + } + }).exec(); + }); + }); + } + listProject(null, 0); + var currentPublic = 0; + + $("#tabProject a:eq(0)").on("click", function(){ + $("#btnAddProject").css({"visibility": "visible"}); + listProject(null, 0); + }); + + $("#tabProject a:eq(1)").on("click", function(){ + $("#btnAddProject").css({"visibility": "hidden"}); + listProject(null, 1); + }); + + $("#divErrMsg").css({"display": "none"}); + validateOptions.Items.push("#projectName"); + + $('#dlgAddProject').on('hide.bs.modal', function () { + $("#divErrMsg").css({"display": "none"}); + $("#projectName").val(""); + $("#projectName").parent().removeClass("has-feedback").removeClass("has-error").removeClass("has-success"); + $("#projectName").siblings("span").removeClass("glyphicon-warning-sign").removeClass("glyphicon-ok"); + }); + $('#dlgAddProject').on('show.bs.modal', function () { + $("#divErrMsg").css({"display": "none"}); + $("#projectName").val(""); + $("#projectName").parent().addClass("has-feedback"); + $("#projectName").siblings("span").removeClass("glyphicon-warning-sign").removeClass("glyphicon-ok"); + $("#isPublic").prop('checked', false); + }); + + $("#btnSave").on("click", function(){ + validateOptions.Validate(function() { + new AjaxUtil({ + url: "/api/projects", + data: {"project_name" : $.trim($("#projectName").val()), "public":$("#isPublic").prop('checked'), "timestamp" : new Date().getTime()}, + type: "post", + errors: { + 409: i18n.getMessage("project_exists") + }, + complete: function(jqXhr, status){ + $("#btnClose").trigger("click"); + listProject(null, currentPublic); + } + }).exec(); + }); + }); + + $("#btnSearch").on("click", function(){ + var projectName = $("#txtSearchProject").val(); + if($.trim(projectName).length == 0){ + projectName = null; + } + listProject(projectName, currentPublic); + }); + } + + + function renderForAdminRole(){ + $("#liAdminOption").css({"visibility": "visible"}); + $("#tabAdminOption").css({"visibility": "visible"}); + function listUserAdminRole(searchUsername){ + $.when( + new AjaxUtil({ + url: "/api/users?username=" + (searchUsername == null ? "" : searchUsername), + type: "get", + success: function(data){ + $("#tblUser tbody tr").remove(); + $.each(data || [], function(i, e){ + var row = '' + + '' + e.Username + '' + + '' + e.Email + ''; + if(e.HasAdminRole == 1){ + row += ''; + } else { + row += ''; + } + row += ''; + row += ''; + $("#tblUser tbody").append(row); + }); + } + }).exec() + ).done(function(){ + $("#tblUser tbody tr :button").on("click",function(){ + var userId = $(this).attr("userid"); + var self = this; + new AjaxUtil({ + url: "/api/users/" + userId, + type: "put", + complete: function(jqXhr, status){ + if(jqXhr && jqXhr.status == 200){ + if($(self).hasClass("btn-success")){ + $(self).removeClass("btn-success").addClass("btn-danger"); + $(self).html(i18n.getMessage("button_off")); + }else{ + $(self).removeClass("btn-danger").addClass("btn-success"); + $(self).html(i18n.getMessage("button_on")); + } + } + } + }).exec(); + }); + $("#tblUser tbody tr").on("mouseover", function(){ + $(".tdDeleteUser", this).css({"visibility":"visible"}); + }).on("mouseout", function(){ + $(".tdDeleteUser", this).css({"visibility":"hidden"}); + }); + $("#tblUser tbody tr .tdDeleteUser").on("click", function(){ + var userId = $(this).attr("userid"); + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("delete_user"), + "content": i18n.getMessage("are_you_sure_to_delete_user") + $(this).attr("username") + " ?", + "enableCancel": true, + "callback": function(){ + new AjaxUtil({ + url: "/api/users/" + userId, + type: "delete", + complete: function(jqXhr, status){ + if(jqXhr && jqXhr.status == 200){ + $("#btnSearchUsername").trigger("click"); + } + }, + error: function(jqXhr){} + }).exec(); + } + }); + }); + }); + } + listUserAdminRole(null); + $("#btnSearchUsername").on("click", function(){ + var username = $("#txtSearchUsername").val(); + if($.trim(username).length == 0){ + username = null; + } + listUserAdminRole(username); + }); + } +}) diff --git a/static/resources/js/register.js b/static/resources/js/register.js new file mode 100644 index 000000000..cbd133671 --- /dev/null +++ b/static/resources/js/register.js @@ -0,0 +1,74 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + $("#divErrMsg").css({"display": "none"}); + + $(document).on("keydown", function(e){ + + if(e.keyCode == 13){ + e.preventDefault(); + if(!$("#txtCommonSearch").is(":focus")){ + $("#btnPageSignUp").trigger("click"); + } + } + }); + + $("#Username,#Email,#Realname,#Password,#ConfirmedPassword").on("blur", validateCallback); + validateOptions.Items = ["#Username","#Email","#Realname","#Password","#ConfirmedPassword"]; + + $("#btnPageSignUp").on("click", function(){ + validateOptions.Validate(function() { + var username = $.trim($("#Username").val()); + var email = $.trim($("#Email").val()); + var password = $.trim($("#Password").val()); + var confirmedPassword = $.trim($("#ConfirmedPassword").val()); + var realname = $.trim($("#Realname").val()); + var comment = $.trim($("#Comment").val()); + $.ajax({ + url : '/signUp', + data:{username: username, password: password, realname: realname, comment: comment, email: email}, + type: "POST", + beforeSend: function(e){ + $("#btnPageSignUp").prop("disabled", true); + }, + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_sign_up"), + "content": i18n.getMessage("registered_successfully"), + "callback": function(){ + document.location = "/signIn"; + } + }); + } + }, + error:function(jqxhr, status, error){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_sign_up"), + "content": i18n.getMessage("internal_error"), + "callback": function(){ + return; + } + }); + }, + complete: function(){ + $("#btnPageSignUp").prop("disabled", false); + } + }); + }); + }); +}); \ No newline at end of file diff --git a/static/resources/js/reset-password.js b/static/resources/js/reset-password.js new file mode 100644 index 000000000..0a3efc566 --- /dev/null +++ b/static/resources/js/reset-password.js @@ -0,0 +1,82 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + $("#divErrMsg").css({"display": "none"}); + + $("#Password,#ConfirmedPassword").on("blur", validateCallback); + validateOptions.Items = ["#Password", "#ConfirmedPassword"]; + function bindEnterKey(){ + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + $("#btnSubmit").trigger("click"); + } + }); + } + function unbindEnterKey(){ + $(document).off("keydown"); + } + bindEnterKey(); + + + var spinner = new Spinner({scale:1}).spin(); + + $("#btnSubmit").on("click", function(){ + validateOptions.Validate(function(){ + var resetUuid = $("#resetUuid").val(); + var password = $("#Password").val(); + $.ajax({ + "url": "/reset", + "type": "post", + "data": {"reset_uuid": resetUuid, "password": password}, + "beforeSend": function(e){ + unbindEnterKey(); + $("h1").append(spinner.el); + $("#btnSubmit").prop("disabled", true); + }, + "success": function(data, status, xhr){ + if(xhr && xhr.status == 200){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_reset_password"), + "content": i18n.getMessage("reset_password_successfully"), + "callback": function(){ + document.location="/signIn"; + } + }); + } + + }, + "complete": function(){ + spinner.stop(); + $("#btnSubmit").prop("disabled", false); + }, + "error": function(jqXhr, status, error){ + if(jqXhr){ + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_reset_password"), + "content": i18n.getMessage(jqXhr.responseText), + "callback": function(){ + bindEnterKey(); + return; + } + }); + } + } + }); + }); + }); +}); \ No newline at end of file diff --git a/static/resources/js/search.js b/static/resources/js/search.js new file mode 100644 index 000000000..5284866f9 --- /dev/null +++ b/static/resources/js/search.js @@ -0,0 +1,79 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#txtCommonSearch").is(":focus")){ + search($("#txtCommonSearch").val()); + } + } + }); + + var queryParam = $("#queryParam").val(); + $("#txtCommonSearch").val(queryParam); + + search(queryParam); + + function search(keyword){ + keyword = $.trim(keyword) + if(keyword.length > 0){ + $.ajax({ + url: "/api/search", + data: {"q": keyword}, + type: "get", + dataType: "json", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + $("#panelCommonSearchProjectsHeader").text(i18n.getMessage("projects") + " (" + data.project.length + ")"); + $("#panelCommonSearchRepositoriesHeader").text(i18n.getMessage("repositories") +" (" + data.repository.length + ")"); + + render($("#panelCommonSearchProjectsBody"), data.project, "project"); + render($("#panelCommonSearchRepositoriesBody"), data.repository, "repository"); + } + } + }); + } + } + + var Project = function(id, name, public){ + this.id = id; + this.name = name; + this.public = public; + } + + function render(element, data, discriminator){ + $(element).children().remove(); + $.each(data, function(i, e){ + var project, description, repoName; + switch(discriminator){ + case "project": + project = new Project(e.id, e.name, e.public); + description = project.name; + repoName = ""; + break; + case "repository": + project = new Project(e.project_id, e.project_name, e.project_public); + description = e.repository_name; + repoName = e.repository_name.substring(e.repository_name.lastIndexOf("/") + 1); + break; + } + if(project){ + $(element).append(''); + } + }); + } +}); \ No newline at end of file diff --git a/static/resources/js/sign-in.js b/static/resources/js/sign-in.js new file mode 100644 index 000000000..1602a6b04 --- /dev/null +++ b/static/resources/js/sign-in.js @@ -0,0 +1,79 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +jQuery(function(){ + + new AjaxUtil({ + url: "/api/users/current", + type: "get", + success: function(data, status, xhr){ + if(xhr && xhr.status == 200){ + document.location = "/registry/project"; + } + }, + error: function(jqxhr){ + return false; + } + }).exec(); + + $(document).on("keydown", function(e){ + if(e.keyCode == 13){ + e.preventDefault(); + if($("#Principal").is(":focus") || $("#Password").is(":focus") || $("#btnPageSignIn").is(":focus")){ + $("#btnPageSignIn").trigger("click"); + } + } + }); + $("#btnForgot").on("click", function(){ + document.location = "/forgotPassword"; + }); + + $("#btnPageSignIn").on("click", function(){ + + var principal = $.trim($("#Principal").val()); + var password = $.trim($("#Password").val()); + + if($.trim(principal).length <= 0 || $.trim(password).length <= 0) { + $("#dlgModal").dialogModal({"title": i18n.getMessage("title_login_failed"), "content": i18n.getMessage("input_your_username_and_password")}); + return; + } + + $.ajax({ + url:'/login', + data: {principal: principal, password: password}, + type: "post", + success: function(jqXhr, status){ + var lastUri = location.search; + if(lastUri != "" && lastUri.indexOf("=") > 0){ + document.location = decodeURIComponent(lastUri.split("=")[1]); + }else{ + document.location = "/registry/project"; + } + }, + error: function(jqXhr){ + var i18nKey = ""; + if(jqXhr.status == 500){ + i18nKey = "internal_error"; + }else{ + i18nKey = "check_your_username_or_password" + } + $("#dlgModal") + .dialogModal({ + "title": i18n.getMessage("title_login_failed"), + "content": i18n.getMessage(i18nKey) + }); + } + }); + }); +}); \ No newline at end of file diff --git a/static/resources/js/validate-options.js b/static/resources/js/validate-options.js new file mode 100644 index 000000000..34f28ead9 --- /dev/null +++ b/static/resources/js/validate-options.js @@ -0,0 +1,183 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +var validateOptions = { + "Result" : [], + "Items" : [], + "Validate": function(callback){ + for(var i = 0; i < this.Items.length; i++){ + if(validateCallback(this.Items[i]) == false){ + return false; + } + } + callback(); + }, + "Username" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("username_is_required")}, + "CheckExist": { "value" : function(value){ + var result = true; + $.ajax({ + url: "/userExists", + data: {"target": "username", "value" : value}, + dataType: "json", + type: "post", + async: false, + success: function(data){ + result = data; + } + }); + return result; + }, "errMsg" : i18n.getMessage("username_has_been_taken")}, + "MaxLength": {"value" : 20, "errMsg" : i18n.getMessage("username_is_too_long")}, + "IllegalChar": {"value": [",","~","#", "$", "%"] , "errMsg": i18n.getMessage("username_contains_illegal_chars")} + }, + "Email" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("email_is_required")}, + "RegExp": {"value": /^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/, + "errMsg": i18n.getMessage("email_contains_illegal_chars")}, + "CheckExist": { "value" : function(value){ + var result = true; + $.ajax({ + url: "/userExists", + data: {"target": "email", "value": value}, + dataType: "json", + type: "post", + async: false, + success: function(data){ + result = data; + } + }); + return result; + }, "errMsg" : i18n.getMessage("email_has_been_taken")} + }, + "EmailF" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("email_is_required")}, + "RegExp": {"value": /^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/, + "errMsg": i18n.getMessage("email_content_illegal")}, + "CheckIfNotExist": { "value" : function(value){ + var result = true; + $.ajax({ + url: "/userExists", + data: {"target": "email", "value": value}, + dataType: "json", + type: "post", + async: false, + success: function(data){ + result = data; + } + }); + return result; + }, "errMsg" : i18n.getMessage("email_does_not_exist")} + }, + "Realname" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("realname_is_required")}, + "MaxLength": {"value" : 20, "errMsg" : i18n.getMessage("realname_is_too_long")}, + "IllegalChar": {"value": [",","~","#", "$", "%"] , "errMsg": i18n.getMessage("realname_contains_illegal_chars")} + }, + "OldPassword" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("password_is_required")} + }, + "Password" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("password_is_required")}, + "RegExp": {"value" : /^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{7,20}$/, "errMsg" : i18n.getMessage("password_is_invalid")}, + "MaxLength": {"value" : 20, "errMsg" : i18n.getMessage("password_is_too_long")} + }, + "ConfirmedPassword" :{ + "CompareWith": {"value" : "#Password", "errMsg" : i18n.getMessage("password_does_not_match")}, + "RegExp": {"value" : /^(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?!.*\s).{7,20}$/, "errMsg" : i18n.getMessage("password_is_invalid")} + }, + "Comment" :{ + "MaxLength": {"value" : 20, "errMsg" : i18n.getMessage("comment_is_too_long")}, + "IllegalChar": {"value": [",","~","#", "$", "%"] , "errMsg": i18n.getMessage("comment_contains_illegal_chars")} + }, + "projectName" :{ + "Required": { "value" : true, "errMsg" : i18n.getMessage("project_name_is_required")}, + "MinLength": {"value" : 4, "errMsg" : i18n.getMessage("project_name_is_too_short")}, + "MaxLength": {"value" : 30, "errMsg" : i18n.getMessage("project_name_is_too_long")}, + "IllegalChar": {"value": ["~","$","-", "\\", "[", "]", "{", "}", "(", ")", "&", "^", "%", "*", "<", ">", "\"", "'","/","?","@"] , "errMsg": i18n.getMessage("project_name_contains_illegal_chars")} + } +}; +function validateCallback(target){ + + if (typeof target != "string"){ + target = this; + } + + var isValid = true; + var inputValue = $.trim($(target).val()); + var currentId = $(target).attr("id"); + var validateItem = validateOptions[currentId]; + + var errMsg = ""; + + for(var checkTitle in validateItem){ + + var checkValue = validateItem[checkTitle].value; + + if(checkTitle == "Required" && checkValue && inputValue.length == 0){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "CheckOldPasswordIsCorrect" && checkValue(inputValue) == false){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "CheckExist" && checkValue(inputValue) == true){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "CheckIfNotExist" && checkValue(inputValue) == false){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "RegExp" && checkValue.test(inputValue) == false){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "MinLength" && inputValue.length < checkValue){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "MaxLength" && inputValue.length > checkValue){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "CompareWith" && $.trim($(checkValue).val()).length > 0 && inputValue != $.trim($(checkValue).val())){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + break; + }else if(checkTitle == "IllegalChar"){ + for(var i = 0; i < checkValue.length; i++){ + if(inputValue.indexOf(checkValue[i]) > -1){ + isValid = false; + errMsg = validateItem[checkTitle].errMsg; + } + } + break; + } + } + + if(isValid == false){ + $(target).parent().removeClass("has-success").addClass("has-error"); + $(target).siblings("span").removeClass("glyphicon-ok").addClass("glyphicon-warning-sign"); + $("#divErrMsg").css({"display": "block"}); + $("#divErrMsg").text(errMsg); + }else { + $(target).parent().removeClass("has-error").addClass("has-success"); + $(target).siblings("span").removeClass("glyphicon-warning-sign").addClass("glyphicon-ok"); + $("#divErrMsg").css({"display": "none"}); + } + validateOptions.Result.push(isValid); + return isValid; +} \ No newline at end of file diff --git a/static/vendors/eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.min.css b/static/vendors/eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.min.css new file mode 100644 index 000000000..63c2a3a07 --- /dev/null +++ b/static/vendors/eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.min.css @@ -0,0 +1,5 @@ +/*! + * Datetimepicker for Bootstrap 3 + * version : 4.17.37 + * https://github.com/Eonasdan/bootstrap-datetimepicker/ + */.bootstrap-datetimepicker-widget{list-style:none}.bootstrap-datetimepicker-widget.dropdown-menu{margin:2px 0;padding:4px;width:19em}@media (min-width:768px){.bootstrap-datetimepicker-widget.dropdown-menu.timepicker-sbs{width:38em}}@media (min-width:992px){.bootstrap-datetimepicker-widget.dropdown-menu.timepicker-sbs{width:38em}}@media (min-width:1200px){.bootstrap-datetimepicker-widget.dropdown-menu.timepicker-sbs{width:38em}}.bootstrap-datetimepicker-widget.dropdown-menu:before,.bootstrap-datetimepicker-widget.dropdown-menu:after{content:'';display:inline-block;position:absolute}.bootstrap-datetimepicker-widget.dropdown-menu.bottom:before{border-left:7px solid transparent;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-bottom-color:rgba(0,0,0,0.2);top:-7px;left:7px}.bootstrap-datetimepicker-widget.dropdown-menu.bottom:after{border-left:6px solid transparent;border-right:6px solid transparent;border-bottom:6px solid white;top:-6px;left:8px}.bootstrap-datetimepicker-widget.dropdown-menu.top:before{border-left:7px solid transparent;border-right:7px solid transparent;border-top:7px solid #ccc;border-top-color:rgba(0,0,0,0.2);bottom:-7px;left:6px}.bootstrap-datetimepicker-widget.dropdown-menu.top:after{border-left:6px solid transparent;border-right:6px solid transparent;border-top:6px solid white;bottom:-6px;left:7px}.bootstrap-datetimepicker-widget.dropdown-menu.pull-right:before{left:auto;right:6px}.bootstrap-datetimepicker-widget.dropdown-menu.pull-right:after{left:auto;right:7px}.bootstrap-datetimepicker-widget .list-unstyled{margin:0}.bootstrap-datetimepicker-widget a[data-action]{padding:6px 0}.bootstrap-datetimepicker-widget a[data-action]:active{box-shadow:none}.bootstrap-datetimepicker-widget .timepicker-hour,.bootstrap-datetimepicker-widget .timepicker-minute,.bootstrap-datetimepicker-widget .timepicker-second{width:54px;font-weight:bold;font-size:1.2em;margin:0}.bootstrap-datetimepicker-widget button[data-action]{padding:6px}.bootstrap-datetimepicker-widget .btn[data-action="incrementHours"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Increment Hours"}.bootstrap-datetimepicker-widget .btn[data-action="incrementMinutes"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Increment Minutes"}.bootstrap-datetimepicker-widget .btn[data-action="decrementHours"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Decrement Hours"}.bootstrap-datetimepicker-widget .btn[data-action="decrementMinutes"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Decrement Minutes"}.bootstrap-datetimepicker-widget .btn[data-action="showHours"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Show Hours"}.bootstrap-datetimepicker-widget .btn[data-action="showMinutes"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Show Minutes"}.bootstrap-datetimepicker-widget .btn[data-action="togglePeriod"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Toggle AM/PM"}.bootstrap-datetimepicker-widget .btn[data-action="clear"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Clear the picker"}.bootstrap-datetimepicker-widget .btn[data-action="today"]::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Set the date to today"}.bootstrap-datetimepicker-widget .picker-switch{text-align:center}.bootstrap-datetimepicker-widget .picker-switch::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Toggle Date and Time Screens"}.bootstrap-datetimepicker-widget .picker-switch td{padding:0;margin:0;height:auto;width:auto;line-height:inherit}.bootstrap-datetimepicker-widget .picker-switch td span{line-height:2.5;height:2.5em;width:100%}.bootstrap-datetimepicker-widget table{width:100%;margin:0}.bootstrap-datetimepicker-widget table td,.bootstrap-datetimepicker-widget table th{text-align:center;border-radius:4px}.bootstrap-datetimepicker-widget table th{height:20px;line-height:20px;width:20px}.bootstrap-datetimepicker-widget table th.picker-switch{width:145px}.bootstrap-datetimepicker-widget table th.disabled,.bootstrap-datetimepicker-widget table th.disabled:hover{background:none;color:#777;cursor:not-allowed}.bootstrap-datetimepicker-widget table th.prev::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Previous Month"}.bootstrap-datetimepicker-widget table th.next::after{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0;content:"Next Month"}.bootstrap-datetimepicker-widget table thead tr:first-child th{cursor:pointer}.bootstrap-datetimepicker-widget table thead tr:first-child th:hover{background:#eee}.bootstrap-datetimepicker-widget table td{height:54px;line-height:54px;width:54px}.bootstrap-datetimepicker-widget table td.cw{font-size:.8em;height:20px;line-height:20px;color:#777}.bootstrap-datetimepicker-widget table td.day{height:20px;line-height:20px;width:20px}.bootstrap-datetimepicker-widget table td.day:hover,.bootstrap-datetimepicker-widget table td.hour:hover,.bootstrap-datetimepicker-widget table td.minute:hover,.bootstrap-datetimepicker-widget table td.second:hover{background:#eee;cursor:pointer}.bootstrap-datetimepicker-widget table td.old,.bootstrap-datetimepicker-widget table td.new{color:#777}.bootstrap-datetimepicker-widget table td.today{position:relative}.bootstrap-datetimepicker-widget table td.today:before{content:'';display:inline-block;border:solid transparent;border-width:0 0 7px 7px;border-bottom-color:#337ab7;border-top-color:rgba(0,0,0,0.2);position:absolute;bottom:4px;right:4px}.bootstrap-datetimepicker-widget table td.active,.bootstrap-datetimepicker-widget table td.active:hover{background-color:#337ab7;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.bootstrap-datetimepicker-widget table td.active.today:before{border-bottom-color:#fff}.bootstrap-datetimepicker-widget table td.disabled,.bootstrap-datetimepicker-widget table td.disabled:hover{background:none;color:#777;cursor:not-allowed}.bootstrap-datetimepicker-widget table td span{display:inline-block;width:54px;height:54px;line-height:54px;margin:2px 1.5px;cursor:pointer;border-radius:4px}.bootstrap-datetimepicker-widget table td span:hover{background:#eee}.bootstrap-datetimepicker-widget table td span.active{background-color:#337ab7;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.bootstrap-datetimepicker-widget table td span.old{color:#777}.bootstrap-datetimepicker-widget table td span.disabled,.bootstrap-datetimepicker-widget table td span.disabled:hover{background:none;color:#777;cursor:not-allowed}.bootstrap-datetimepicker-widget.usetwentyfour td.hour{height:27px;line-height:27px}.bootstrap-datetimepicker-widget.wider{width:21em}.bootstrap-datetimepicker-widget .datepicker-decades .decade{line-height:1.8em !important}.input-group.date .input-group-addon{cursor:pointer}.sr-only{position:absolute;width:1px;height:1px;margin:-1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);border:0} \ No newline at end of file diff --git a/static/vendors/eonasdan-bootstrap-datetimepicker/build/js/bootstrap-datetimepicker.min.js b/static/vendors/eonasdan-bootstrap-datetimepicker/build/js/bootstrap-datetimepicker.min.js new file mode 100644 index 000000000..db3d085d4 --- /dev/null +++ b/static/vendors/eonasdan-bootstrap-datetimepicker/build/js/bootstrap-datetimepicker.min.js @@ -0,0 +1,9 @@ +/*! version : 4.17.37 + ========================================================= + bootstrap-datetimejs + https://github.com/Eonasdan/bootstrap-datetimepicker + Copyright (c) 2015 Jonathan Peterson + ========================================================= + */ +!function(a){"use strict";if("function"==typeof define&&define.amd)define(["jquery","moment"],a);else if("object"==typeof exports)a(require("jquery"),require("moment"));else{if("undefined"==typeof jQuery)throw"bootstrap-datetimepicker requires jQuery to be loaded first";if("undefined"==typeof moment)throw"bootstrap-datetimepicker requires Moment.js to be loaded first";a(jQuery,moment)}}(function(a,b){"use strict";if(!b)throw new Error("bootstrap-datetimepicker requires Moment.js to be loaded first");var c=function(c,d){var e,f,g,h,i,j,k,l={},m=!0,n=!1,o=!1,p=0,q=[{clsName:"days",navFnc:"M",navStep:1},{clsName:"months",navFnc:"y",navStep:1},{clsName:"years",navFnc:"y",navStep:10},{clsName:"decades",navFnc:"y",navStep:100}],r=["days","months","years","decades"],s=["top","bottom","auto"],t=["left","right","auto"],u=["default","top","bottom"],v={up:38,38:"up",down:40,40:"down",left:37,37:"left",right:39,39:"right",tab:9,9:"tab",escape:27,27:"escape",enter:13,13:"enter",pageUp:33,33:"pageUp",pageDown:34,34:"pageDown",shift:16,16:"shift",control:17,17:"control",space:32,32:"space",t:84,84:"t","delete":46,46:"delete"},w={},x=function(a){var c,e,f,g,h,i=!1;return void 0!==b.tz&&void 0!==d.timeZone&&null!==d.timeZone&&""!==d.timeZone&&(i=!0),void 0===a||null===a?c=i?b().tz(d.timeZone).startOf("d"):b().startOf("d"):i?(e=b().tz(d.timeZone).utcOffset(),f=b(a,j,d.useStrict).utcOffset(),f!==e?(g=b().tz(d.timeZone).format("Z"),h=b(a,j,d.useStrict).format("YYYY-MM-DD[T]HH:mm:ss")+g,c=b(h,j,d.useStrict).tz(d.timeZone)):c=b(a,j,d.useStrict).tz(d.timeZone)):c=b(a,j,d.useStrict),c},y=function(a){if("string"!=typeof a||a.length>1)throw new TypeError("isEnabled expects a single character string parameter");switch(a){case"y":return-1!==i.indexOf("Y");case"M":return-1!==i.indexOf("M");case"d":return-1!==i.toLowerCase().indexOf("d");case"h":case"H":return-1!==i.toLowerCase().indexOf("h");case"m":return-1!==i.indexOf("m");case"s":return-1!==i.indexOf("s");default:return!1}},z=function(){return y("h")||y("m")||y("s")},A=function(){return y("y")||y("M")||y("d")},B=function(){var b=a("").append(a("").append(a("").addClass("prev").attr("data-action","previous").append(a("").addClass(d.icons.previous))).append(a("").addClass("picker-switch").attr("data-action","pickerSwitch").attr("colspan",d.calendarWeeks?"6":"5")).append(a("").addClass("next").attr("data-action","next").append(a("").addClass(d.icons.next)))),c=a("").append(a("").append(a("").attr("colspan",d.calendarWeeks?"8":"7")));return[a("
").addClass("datepicker-days").append(a("").addClass("table-condensed").append(b).append(a(""))),a("
").addClass("datepicker-months").append(a("
").addClass("table-condensed").append(b.clone()).append(c.clone())),a("
").addClass("datepicker-years").append(a("
").addClass("table-condensed").append(b.clone()).append(c.clone())),a("
").addClass("datepicker-decades").append(a("
").addClass("table-condensed").append(b.clone()).append(c.clone()))]},C=function(){var b=a(""),c=a(""),e=a("");return y("h")&&(b.append(a("
").append(a("").attr({href:"#",tabindex:"-1",title:d.tooltips.incrementHour}).addClass("btn").attr("data-action","incrementHours").append(a("").addClass(d.icons.up)))),c.append(a("").append(a("").addClass("timepicker-hour").attr({"data-time-component":"hours",title:d.tooltips.pickHour}).attr("data-action","showHours"))),e.append(a("").append(a("").attr({href:"#",tabindex:"-1",title:d.tooltips.decrementHour}).addClass("btn").attr("data-action","decrementHours").append(a("").addClass(d.icons.down))))),y("m")&&(y("h")&&(b.append(a("").addClass("separator")),c.append(a("").addClass("separator").html(":")),e.append(a("").addClass("separator"))),b.append(a("").append(a("").attr({href:"#",tabindex:"-1",title:d.tooltips.incrementMinute}).addClass("btn").attr("data-action","incrementMinutes").append(a("").addClass(d.icons.up)))),c.append(a("").append(a("").addClass("timepicker-minute").attr({"data-time-component":"minutes",title:d.tooltips.pickMinute}).attr("data-action","showMinutes"))),e.append(a("").append(a("").attr({href:"#",tabindex:"-1",title:d.tooltips.decrementMinute}).addClass("btn").attr("data-action","decrementMinutes").append(a("").addClass(d.icons.down))))),y("s")&&(y("m")&&(b.append(a("").addClass("separator")),c.append(a("").addClass("separator").html(":")),e.append(a("").addClass("separator"))),b.append(a("").append(a("").attr({href:"#",tabindex:"-1",title:d.tooltips.incrementSecond}).addClass("btn").attr("data-action","incrementSeconds").append(a("").addClass(d.icons.up)))),c.append(a("").append(a("").addClass("timepicker-second").attr({"data-time-component":"seconds",title:d.tooltips.pickSecond}).attr("data-action","showSeconds"))),e.append(a("").append(a("").attr({href:"#",tabindex:"-1",title:d.tooltips.decrementSecond}).addClass("btn").attr("data-action","decrementSeconds").append(a("").addClass(d.icons.down))))),h||(b.append(a("").addClass("separator")),c.append(a("").append(a("").addClass("separator"))),a("
").addClass("timepicker-picker").append(a("").addClass("table-condensed").append([b,c,e]))},D=function(){var b=a("
").addClass("timepicker-hours").append(a("
").addClass("table-condensed")),c=a("
").addClass("timepicker-minutes").append(a("
").addClass("table-condensed")),d=a("
").addClass("timepicker-seconds").append(a("
").addClass("table-condensed")),e=[C()];return y("h")&&e.push(b),y("m")&&e.push(c),y("s")&&e.push(d),e},E=function(){var b=[];return d.showTodayButton&&b.push(a("
").append(a("").attr({"data-action":"today",title:d.tooltips.today}).append(a("").addClass(d.icons.today)))),!d.sideBySide&&A()&&z()&&b.push(a("").append(a("").attr({"data-action":"togglePicker",title:d.tooltips.selectTime}).append(a("").addClass(d.icons.time)))),d.showClear&&b.push(a("").append(a("").attr({"data-action":"clear",title:d.tooltips.clear}).append(a("").addClass(d.icons.clear)))),d.showClose&&b.push(a("").append(a("").attr({"data-action":"close",title:d.tooltips.close}).append(a("").addClass(d.icons.close)))),a("").addClass("table-condensed").append(a("").append(a("").append(b)))},F=function(){var b=a("
").addClass("bootstrap-datetimepicker-widget dropdown-menu"),c=a("
").addClass("datepicker").append(B()),e=a("
").addClass("timepicker").append(D()),f=a("
    ").addClass("list-unstyled"),g=a("
  • ").addClass("picker-switch"+(d.collapse?" accordion-toggle":"")).append(E());return d.inline&&b.removeClass("dropdown-menu"),h&&b.addClass("usetwentyfour"),y("s")&&!h&&b.addClass("wider"),d.sideBySide&&A()&&z()?(b.addClass("timepicker-sbs"),"top"===d.toolbarPlacement&&b.append(g),b.append(a("
    ").addClass("row").append(c.addClass("col-md-6")).append(e.addClass("col-md-6"))),"bottom"===d.toolbarPlacement&&b.append(g),b):("top"===d.toolbarPlacement&&f.append(g),A()&&f.append(a("
  • ").addClass(d.collapse&&z()?"collapse in":"").append(c)),"default"===d.toolbarPlacement&&f.append(g),z()&&f.append(a("
  • ").addClass(d.collapse&&A()?"collapse":"").append(e)),"bottom"===d.toolbarPlacement&&f.append(g),b.append(f))},G=function(){var b,e={};return b=c.is("input")||d.inline?c.data():c.find("input").data(),b.dateOptions&&b.dateOptions instanceof Object&&(e=a.extend(!0,e,b.dateOptions)),a.each(d,function(a){var c="date"+a.charAt(0).toUpperCase()+a.slice(1);void 0!==b[c]&&(e[a]=b[c])}),e},H=function(){var b,e=(n||c).position(),f=(n||c).offset(),g=d.widgetPositioning.vertical,h=d.widgetPositioning.horizontal;if(d.widgetParent)b=d.widgetParent.append(o);else if(c.is("input"))b=c.after(o).parent();else{if(d.inline)return void(b=c.append(o));b=c,c.children().first().after(o)}if("auto"===g&&(g=f.top+1.5*o.height()>=a(window).height()+a(window).scrollTop()&&o.height()+c.outerHeight()a(window).width()?"right":"left"),"top"===g?o.addClass("top").removeClass("bottom"):o.addClass("bottom").removeClass("top"),"right"===h?o.addClass("pull-right"):o.removeClass("pull-right"),"relative"!==b.css("position")&&(b=b.parents().filter(function(){return"relative"===a(this).css("position")}).first()),0===b.length)throw new Error("datetimepicker component should be placed within a relative positioned container");o.css({top:"top"===g?"auto":e.top+c.outerHeight(),bottom:"top"===g?e.top+c.outerHeight():"auto",left:"left"===h?b===c?0:e.left:"auto",right:"left"===h?"auto":b.outerWidth()-c.outerWidth()-(b===c?0:e.left)})},I=function(a){"dp.change"===a.type&&(a.date&&a.date.isSame(a.oldDate)||!a.date&&!a.oldDate)||c.trigger(a)},J=function(a){"y"===a&&(a="YYYY"),I({type:"dp.update",change:a,viewDate:f.clone()})},K=function(a){o&&(a&&(k=Math.max(p,Math.min(3,k+a))),o.find(".datepicker > div").hide().filter(".datepicker-"+q[k].clsName).show())},L=function(){var b=a("
"),c=f.clone().startOf("w").startOf("d");for(d.calendarWeeks===!0&&b.append(a(""),d.calendarWeeks&&c.append('"),k.push(c)),g="",b.isBefore(f,"M")&&(g+=" old"),b.isAfter(f,"M")&&(g+=" new"),b.isSame(e,"d")&&!m&&(g+=" active"),Q(b,"d")||(g+=" disabled"),b.isSame(x(),"d")&&(g+=" today"),(0===b.day()||6===b.day())&&(g+=" weekend"),c.append('"),b.add(1,"d");i.find("tbody").empty().append(k),S(),T(),U()}},W=function(){var b=o.find(".timepicker-hours table"),c=f.clone().startOf("d"),d=[],e=a("");for(f.hour()>11&&!h&&c.hour(12);c.isSame(f,"d")&&(h||f.hour()<12&&c.hour()<12||f.hour()>11);)c.hour()%4===0&&(e=a(""),d.push(e)),e.append('"),c.add(1,"h");b.empty().append(d)},X=function(){for(var b=o.find(".timepicker-minutes table"),c=f.clone().startOf("h"),e=[],g=a(""),h=1===d.stepping?5:d.stepping;f.isSame(c,"h");)c.minute()%(4*h)===0&&(g=a(""),e.push(g)),g.append('"),c.add(h,"m");b.empty().append(e)},Y=function(){for(var b=o.find(".timepicker-seconds table"),c=f.clone().startOf("m"),d=[],e=a("");f.isSame(c,"m");)c.second()%20===0&&(e=a(""),d.push(e)),e.append('"),c.add(5,"s");b.empty().append(d)},Z=function(){var a,b,c=o.find(".timepicker span[data-time-component]");h||(a=o.find(".timepicker [data-action=togglePeriod]"),b=e.clone().add(e.hours()>=12?-12:12,"h"),a.text(e.format("A")),Q(b,"h")?a.removeClass("disabled"):a.addClass("disabled")),c.filter("[data-time-component=hours]").text(e.format(h?"HH":"hh")),c.filter("[data-time-component=minutes]").text(e.format("mm")),c.filter("[data-time-component=seconds]").text(e.format("ss")),W(),X(),Y()},$=function(){o&&(V(),Z())},_=function(a){var b=m?null:e;return a?(a=a.clone().locale(d.locale),1!==d.stepping&&a.minutes(Math.round(a.minutes()/d.stepping)*d.stepping%60).seconds(0),void(Q(a)?(e=a,f=e.clone(),g.val(e.format(i)),c.data("date",e.format(i)),m=!1,$(),I({type:"dp.change",date:e.clone(),oldDate:b})):(d.keepInvalid||g.val(m?"":e.format(i)),I({type:"dp.error",date:a})))):(m=!0,g.val(""),c.data("date",""),I({type:"dp.change",date:!1,oldDate:b}),void $())},aa=function(){var b=!1;return o?(o.find(".collapse").each(function(){var c=a(this).data("collapse");return c&&c.transitioning?(b=!0,!1):!0}),b?l:(n&&n.hasClass("btn")&&n.toggleClass("active"),o.hide(),a(window).off("resize",H),o.off("click","[data-action]"),o.off("mousedown",!1),o.remove(),o=!1,I({type:"dp.hide",date:e.clone()}),g.blur(),l)):l},ba=function(){_(null)},ca={next:function(){var a=q[k].navFnc;f.add(q[k].navStep,a),V(),J(a)},previous:function(){var a=q[k].navFnc;f.subtract(q[k].navStep,a),V(),J(a)},pickerSwitch:function(){K(1)},selectMonth:function(b){var c=a(b.target).closest("tbody").find("span").index(a(b.target));f.month(c),k===p?(_(e.clone().year(f.year()).month(f.month())),d.inline||aa()):(K(-1),V()),J("M")},selectYear:function(b){var c=parseInt(a(b.target).text(),10)||0;f.year(c),k===p?(_(e.clone().year(f.year())),d.inline||aa()):(K(-1),V()),J("YYYY")},selectDecade:function(b){var c=parseInt(a(b.target).data("selection"),10)||0;f.year(c),k===p?(_(e.clone().year(f.year())),d.inline||aa()):(K(-1),V()),J("YYYY")},selectDay:function(b){var c=f.clone();a(b.target).is(".old")&&c.subtract(1,"M"),a(b.target).is(".new")&&c.add(1,"M"),_(c.date(parseInt(a(b.target).text(),10))),z()||d.keepOpen||d.inline||aa()},incrementHours:function(){var a=e.clone().add(1,"h");Q(a,"h")&&_(a)},incrementMinutes:function(){var a=e.clone().add(d.stepping,"m");Q(a,"m")&&_(a)},incrementSeconds:function(){var a=e.clone().add(1,"s");Q(a,"s")&&_(a)},decrementHours:function(){var a=e.clone().subtract(1,"h");Q(a,"h")&&_(a)},decrementMinutes:function(){var a=e.clone().subtract(d.stepping,"m");Q(a,"m")&&_(a)},decrementSeconds:function(){var a=e.clone().subtract(1,"s");Q(a,"s")&&_(a)},togglePeriod:function(){_(e.clone().add(e.hours()>=12?-12:12,"h"))},togglePicker:function(b){var c,e=a(b.target),f=e.closest("ul"),g=f.find(".in"),h=f.find(".collapse:not(.in)");if(g&&g.length){if(c=g.data("collapse"),c&&c.transitioning)return;g.collapse?(g.collapse("hide"),h.collapse("show")):(g.removeClass("in"),h.addClass("in")),e.is("span")?e.toggleClass(d.icons.time+" "+d.icons.date):e.find("span").toggleClass(d.icons.time+" "+d.icons.date)}},showPicker:function(){o.find(".timepicker > div:not(.timepicker-picker)").hide(),o.find(".timepicker .timepicker-picker").show()},showHours:function(){o.find(".timepicker .timepicker-picker").hide(),o.find(".timepicker .timepicker-hours").show()},showMinutes:function(){o.find(".timepicker .timepicker-picker").hide(),o.find(".timepicker .timepicker-minutes").show()},showSeconds:function(){o.find(".timepicker .timepicker-picker").hide(),o.find(".timepicker .timepicker-seconds").show()},selectHour:function(b){var c=parseInt(a(b.target).text(),10);h||(e.hours()>=12?12!==c&&(c+=12):12===c&&(c=0)),_(e.clone().hours(c)),ca.showPicker.call(l)},selectMinute:function(b){_(e.clone().minutes(parseInt(a(b.target).text(),10))),ca.showPicker.call(l)},selectSecond:function(b){_(e.clone().seconds(parseInt(a(b.target).text(),10))),ca.showPicker.call(l)},clear:ba,today:function(){var a=x();Q(a,"d")&&_(a)},close:aa},da=function(b){return a(b.currentTarget).is(".disabled")?!1:(ca[a(b.currentTarget).data("action")].apply(l,arguments),!1)},ea=function(){var b,c={year:function(a){return a.month(0).date(1).hours(0).seconds(0).minutes(0)},month:function(a){return a.date(1).hours(0).seconds(0).minutes(0)},day:function(a){return a.hours(0).seconds(0).minutes(0)},hour:function(a){return a.seconds(0).minutes(0)},minute:function(a){return a.seconds(0)}};return g.prop("disabled")||!d.ignoreReadonly&&g.prop("readonly")||o?l:(void 0!==g.val()&&0!==g.val().trim().length?_(ga(g.val().trim())):d.useCurrent&&m&&(g.is("input")&&0===g.val().trim().length||d.inline)&&(b=x(),"string"==typeof d.useCurrent&&(b=c[d.useCurrent](b)),_(b)),o=F(),L(),R(),o.find(".timepicker-hours").hide(),o.find(".timepicker-minutes").hide(),o.find(".timepicker-seconds").hide(),$(),K(),a(window).on("resize",H),o.on("click","[data-action]",da),o.on("mousedown",!1),n&&n.hasClass("btn")&&n.toggleClass("active"),o.show(),H(),d.focusOnShow&&!g.is(":focus")&&g.focus(),I({type:"dp.show"}),l)},fa=function(){return o?aa():ea()},ga=function(a){return a=void 0===d.parseInputDate?b.isMoment(a)||a instanceof Date?b(a):x(a):d.parseInputDate(a),a.locale(d.locale),a},ha=function(a){var b,c,e,f,g=null,h=[],i={},j=a.which,k="p";w[j]=k;for(b in w)w.hasOwnProperty(b)&&w[b]===k&&(h.push(b),parseInt(b,10)!==j&&(i[b]=!0));for(b in d.keyBinds)if(d.keyBinds.hasOwnProperty(b)&&"function"==typeof d.keyBinds[b]&&(e=b.split(" "),e.length===h.length&&v[j]===e[e.length-1])){for(f=!0,c=e.length-2;c>=0;c--)if(!(v[e[c]]in i)){f=!1;break}if(f){g=d.keyBinds[b];break}}g&&(g.call(l,o),a.stopPropagation(),a.preventDefault())},ia=function(a){w[a.which]="r",a.stopPropagation(),a.preventDefault()},ja=function(b){var c=a(b.target).val().trim(),d=c?ga(c):null;return _(d),b.stopImmediatePropagation(),!1},ka=function(){g.on({change:ja,blur:d.debug?"":aa,keydown:ha,keyup:ia,focus:d.allowInputToggle?ea:""}),c.is("input")?g.on({focus:ea}):n&&(n.on("click",fa),n.on("mousedown",!1))},la=function(){g.off({change:ja,blur:blur,keydown:ha,keyup:ia,focus:d.allowInputToggle?aa:""}),c.is("input")?g.off({focus:ea}):n&&(n.off("click",fa),n.off("mousedown",!1))},ma=function(b){var c={};return a.each(b,function(){var a=ga(this);a.isValid()&&(c[a.format("YYYY-MM-DD")]=!0)}),Object.keys(c).length?c:!1},na=function(b){var c={};return a.each(b,function(){c[this]=!0}),Object.keys(c).length?c:!1},oa=function(){var a=d.format||"L LT";i=a.replace(/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,function(a){var b=e.localeData().longDateFormat(a)||a;return b.replace(/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,function(a){return e.localeData().longDateFormat(a)||a})}),j=d.extraFormats?d.extraFormats.slice():[],j.indexOf(a)<0&&j.indexOf(i)<0&&j.push(i),h=i.toLowerCase().indexOf("a")<1&&i.replace(/\[.*?\]/g,"").indexOf("h")<1,y("y")&&(p=2),y("M")&&(p=1),y("d")&&(p=0),k=Math.max(p,k),m||_(e)};if(l.destroy=function(){aa(),la(),c.removeData("DateTimePicker"),c.removeData("date")},l.toggle=fa,l.show=ea,l.hide=aa,l.disable=function(){return aa(),n&&n.hasClass("btn")&&n.addClass("disabled"),g.prop("disabled",!0),l},l.enable=function(){return n&&n.hasClass("btn")&&n.removeClass("disabled"),g.prop("disabled",!1),l},l.ignoreReadonly=function(a){if(0===arguments.length)return d.ignoreReadonly;if("boolean"!=typeof a)throw new TypeError("ignoreReadonly () expects a boolean parameter");return d.ignoreReadonly=a,l},l.options=function(b){if(0===arguments.length)return a.extend(!0,{},d);if(!(b instanceof Object))throw new TypeError("options() options parameter should be an object");return a.extend(!0,d,b),a.each(d,function(a,b){if(void 0===l[a])throw new TypeError("option "+a+" is not recognized!");l[a](b)}),l},l.date=function(a){if(0===arguments.length)return m?null:e.clone();if(!(null===a||"string"==typeof a||b.isMoment(a)||a instanceof Date))throw new TypeError("date() parameter must be one of [null, string, moment or Date]");return _(null===a?null:ga(a)),l},l.format=function(a){if(0===arguments.length)return d.format;if("string"!=typeof a&&("boolean"!=typeof a||a!==!1))throw new TypeError("format() expects a sting or boolean:false parameter "+a);return d.format=a,i&&oa(),l},l.timeZone=function(a){return 0===arguments.length?d.timeZone:(d.timeZone=a,l)},l.dayViewHeaderFormat=function(a){if(0===arguments.length)return d.dayViewHeaderFormat;if("string"!=typeof a)throw new TypeError("dayViewHeaderFormat() expects a string parameter");return d.dayViewHeaderFormat=a,l},l.extraFormats=function(a){if(0===arguments.length)return d.extraFormats;if(a!==!1&&!(a instanceof Array))throw new TypeError("extraFormats() expects an array or false parameter");return d.extraFormats=a,j&&oa(),l},l.disabledDates=function(b){if(0===arguments.length)return d.disabledDates?a.extend({},d.disabledDates):d.disabledDates;if(!b)return d.disabledDates=!1,$(),l;if(!(b instanceof Array))throw new TypeError("disabledDates() expects an array parameter");return d.disabledDates=ma(b),d.enabledDates=!1,$(),l},l.enabledDates=function(b){if(0===arguments.length)return d.enabledDates?a.extend({},d.enabledDates):d.enabledDates;if(!b)return d.enabledDates=!1,$(),l;if(!(b instanceof Array))throw new TypeError("enabledDates() expects an array parameter");return d.enabledDates=ma(b),d.disabledDates=!1,$(),l},l.daysOfWeekDisabled=function(a){if(0===arguments.length)return d.daysOfWeekDisabled.splice(0);if("boolean"==typeof a&&!a)return d.daysOfWeekDisabled=!1,$(),l;if(!(a instanceof Array))throw new TypeError("daysOfWeekDisabled() expects an array parameter");if(d.daysOfWeekDisabled=a.reduce(function(a,b){return b=parseInt(b,10),b>6||0>b||isNaN(b)?a:(-1===a.indexOf(b)&&a.push(b),a)},[]).sort(),d.useCurrent&&!d.keepInvalid){for(var b=0;!Q(e,"d");){if(e.add(1,"d"),7===b)throw"Tried 7 times to find a valid date";b++}_(e)}return $(),l},l.maxDate=function(a){if(0===arguments.length)return d.maxDate?d.maxDate.clone():d.maxDate;if("boolean"==typeof a&&a===!1)return d.maxDate=!1,$(),l;"string"==typeof a&&("now"===a||"moment"===a)&&(a=x());var b=ga(a);if(!b.isValid())throw new TypeError("maxDate() Could not parse date parameter: "+a);if(d.minDate&&b.isBefore(d.minDate))throw new TypeError("maxDate() date parameter is before options.minDate: "+b.format(i));return d.maxDate=b,d.useCurrent&&!d.keepInvalid&&e.isAfter(a)&&_(d.maxDate),f.isAfter(b)&&(f=b.clone().subtract(d.stepping,"m")),$(),l},l.minDate=function(a){if(0===arguments.length)return d.minDate?d.minDate.clone():d.minDate;if("boolean"==typeof a&&a===!1)return d.minDate=!1,$(),l;"string"==typeof a&&("now"===a||"moment"===a)&&(a=x());var b=ga(a);if(!b.isValid())throw new TypeError("minDate() Could not parse date parameter: "+a);if(d.maxDate&&b.isAfter(d.maxDate))throw new TypeError("minDate() date parameter is after options.maxDate: "+b.format(i));return d.minDate=b,d.useCurrent&&!d.keepInvalid&&e.isBefore(a)&&_(d.minDate),f.isBefore(b)&&(f=b.clone().add(d.stepping,"m")),$(),l},l.defaultDate=function(a){if(0===arguments.length)return d.defaultDate?d.defaultDate.clone():d.defaultDate;if(!a)return d.defaultDate=!1,l;"string"==typeof a&&("now"===a||"moment"===a)&&(a=x());var b=ga(a);if(!b.isValid())throw new TypeError("defaultDate() Could not parse date parameter: "+a);if(!Q(b))throw new TypeError("defaultDate() date passed is invalid according to component setup validations");return d.defaultDate=b,(d.defaultDate&&d.inline||""===g.val().trim())&&_(d.defaultDate),l},l.locale=function(a){if(0===arguments.length)return d.locale;if(!b.localeData(a))throw new TypeError("locale() locale "+a+" is not loaded from moment locales!");return d.locale=a,e.locale(d.locale),f.locale(d.locale),i&&oa(),o&&(aa(),ea()),l},l.stepping=function(a){return 0===arguments.length?d.stepping:(a=parseInt(a,10),(isNaN(a)||1>a)&&(a=1),d.stepping=a,l)},l.useCurrent=function(a){var b=["year","month","day","hour","minute"];if(0===arguments.length)return d.useCurrent;if("boolean"!=typeof a&&"string"!=typeof a)throw new TypeError("useCurrent() expects a boolean or string parameter");if("string"==typeof a&&-1===b.indexOf(a.toLowerCase()))throw new TypeError("useCurrent() expects a string parameter of "+b.join(", "));return d.useCurrent=a,l},l.collapse=function(a){if(0===arguments.length)return d.collapse;if("boolean"!=typeof a)throw new TypeError("collapse() expects a boolean parameter");return d.collapse===a?l:(d.collapse=a,o&&(aa(),ea()),l)},l.icons=function(b){if(0===arguments.length)return a.extend({},d.icons);if(!(b instanceof Object))throw new TypeError("icons() expects parameter to be an Object");return a.extend(d.icons,b),o&&(aa(),ea()),l},l.tooltips=function(b){if(0===arguments.length)return a.extend({},d.tooltips);if(!(b instanceof Object))throw new TypeError("tooltips() expects parameter to be an Object");return a.extend(d.tooltips,b),o&&(aa(),ea()),l},l.useStrict=function(a){if(0===arguments.length)return d.useStrict;if("boolean"!=typeof a)throw new TypeError("useStrict() expects a boolean parameter");return d.useStrict=a,l},l.sideBySide=function(a){if(0===arguments.length)return d.sideBySide;if("boolean"!=typeof a)throw new TypeError("sideBySide() expects a boolean parameter");return d.sideBySide=a,o&&(aa(),ea()),l},l.viewMode=function(a){if(0===arguments.length)return d.viewMode;if("string"!=typeof a)throw new TypeError("viewMode() expects a string parameter");if(-1===r.indexOf(a))throw new TypeError("viewMode() parameter must be one of ("+r.join(", ")+") value");return d.viewMode=a,k=Math.max(r.indexOf(a),p),K(),l},l.toolbarPlacement=function(a){if(0===arguments.length)return d.toolbarPlacement;if("string"!=typeof a)throw new TypeError("toolbarPlacement() expects a string parameter");if(-1===u.indexOf(a))throw new TypeError("toolbarPlacement() parameter must be one of ("+u.join(", ")+") value");return d.toolbarPlacement=a,o&&(aa(),ea()),l},l.widgetPositioning=function(b){if(0===arguments.length)return a.extend({},d.widgetPositioning);if("[object Object]"!=={}.toString.call(b))throw new TypeError("widgetPositioning() expects an object variable");if(b.horizontal){if("string"!=typeof b.horizontal)throw new TypeError("widgetPositioning() horizontal variable must be a string");if(b.horizontal=b.horizontal.toLowerCase(),-1===t.indexOf(b.horizontal))throw new TypeError("widgetPositioning() expects horizontal parameter to be one of ("+t.join(", ")+")");d.widgetPositioning.horizontal=b.horizontal}if(b.vertical){if("string"!=typeof b.vertical)throw new TypeError("widgetPositioning() vertical variable must be a string");if(b.vertical=b.vertical.toLowerCase(),-1===s.indexOf(b.vertical))throw new TypeError("widgetPositioning() expects vertical parameter to be one of ("+s.join(", ")+")");d.widgetPositioning.vertical=b.vertical}return $(),l},l.calendarWeeks=function(a){if(0===arguments.length)return d.calendarWeeks;if("boolean"!=typeof a)throw new TypeError("calendarWeeks() expects parameter to be a boolean value");return d.calendarWeeks=a,$(),l},l.showTodayButton=function(a){if(0===arguments.length)return d.showTodayButton;if("boolean"!=typeof a)throw new TypeError("showTodayButton() expects a boolean parameter");return d.showTodayButton=a,o&&(aa(),ea()),l},l.showClear=function(a){if(0===arguments.length)return d.showClear;if("boolean"!=typeof a)throw new TypeError("showClear() expects a boolean parameter");return d.showClear=a,o&&(aa(),ea()),l},l.widgetParent=function(b){if(0===arguments.length)return d.widgetParent;if("string"==typeof b&&(b=a(b)),null!==b&&"string"!=typeof b&&!(b instanceof a))throw new TypeError("widgetParent() expects a string or a jQuery object parameter");return d.widgetParent=b,o&&(aa(),ea()),l},l.keepOpen=function(a){if(0===arguments.length)return d.keepOpen;if("boolean"!=typeof a)throw new TypeError("keepOpen() expects a boolean parameter");return d.keepOpen=a,l},l.focusOnShow=function(a){if(0===arguments.length)return d.focusOnShow;if("boolean"!=typeof a)throw new TypeError("focusOnShow() expects a boolean parameter");return d.focusOnShow=a,l},l.inline=function(a){if(0===arguments.length)return d.inline;if("boolean"!=typeof a)throw new TypeError("inline() expects a boolean parameter");return d.inline=a,l},l.clear=function(){return ba(),l},l.keyBinds=function(a){return d.keyBinds=a,l},l.getMoment=function(a){return x(a)},l.debug=function(a){if("boolean"!=typeof a)throw new TypeError("debug() expects a boolean parameter");return d.debug=a,l},l.allowInputToggle=function(a){if(0===arguments.length)return d.allowInputToggle;if("boolean"!=typeof a)throw new TypeError("allowInputToggle() expects a boolean parameter");return d.allowInputToggle=a,l},l.showClose=function(a){if(0===arguments.length)return d.showClose;if("boolean"!=typeof a)throw new TypeError("showClose() expects a boolean parameter");return d.showClose=a,l},l.keepInvalid=function(a){if(0===arguments.length)return d.keepInvalid;if("boolean"!=typeof a)throw new TypeError("keepInvalid() expects a boolean parameter");return d.keepInvalid=a,l},l.datepickerInput=function(a){if(0===arguments.length)return d.datepickerInput;if("string"!=typeof a)throw new TypeError("datepickerInput() expects a string parameter");return d.datepickerInput=a,l},l.parseInputDate=function(a){if(0===arguments.length)return d.parseInputDate; +if("function"!=typeof a)throw new TypeError("parseInputDate() sholud be as function");return d.parseInputDate=a,l},l.disabledTimeIntervals=function(b){if(0===arguments.length)return d.disabledTimeIntervals?a.extend({},d.disabledTimeIntervals):d.disabledTimeIntervals;if(!b)return d.disabledTimeIntervals=!1,$(),l;if(!(b instanceof Array))throw new TypeError("disabledTimeIntervals() expects an array parameter");return d.disabledTimeIntervals=b,$(),l},l.disabledHours=function(b){if(0===arguments.length)return d.disabledHours?a.extend({},d.disabledHours):d.disabledHours;if(!b)return d.disabledHours=!1,$(),l;if(!(b instanceof Array))throw new TypeError("disabledHours() expects an array parameter");if(d.disabledHours=na(b),d.enabledHours=!1,d.useCurrent&&!d.keepInvalid){for(var c=0;!Q(e,"h");){if(e.add(1,"h"),24===c)throw"Tried 24 times to find a valid date";c++}_(e)}return $(),l},l.enabledHours=function(b){if(0===arguments.length)return d.enabledHours?a.extend({},d.enabledHours):d.enabledHours;if(!b)return d.enabledHours=!1,$(),l;if(!(b instanceof Array))throw new TypeError("enabledHours() expects an array parameter");if(d.enabledHours=na(b),d.disabledHours=!1,d.useCurrent&&!d.keepInvalid){for(var c=0;!Q(e,"h");){if(e.add(1,"h"),24===c)throw"Tried 24 times to find a valid date";c++}_(e)}return $(),l},l.viewDate=function(a){if(0===arguments.length)return f.clone();if(!a)return f=e.clone(),l;if(!("string"==typeof a||b.isMoment(a)||a instanceof Date))throw new TypeError("viewDate() parameter must be one of [string, moment or Date]");return f=ga(a),J(),l},c.is("input"))g=c;else if(g=c.find(d.datepickerInput),0===g.size())g=c.find("input");else if(!g.is("input"))throw new Error('CSS class "'+d.datepickerInput+'" cannot be applied to non input element');if(c.hasClass("input-group")&&(n=0===c.find(".datepickerbutton").size()?c.find(".input-group-addon"):c.find(".datepickerbutton")),!d.inline&&!g.is("input"))throw new Error("Could not initialize DateTimePicker without an input element");return e=x(),f=e.clone(),a.extend(!0,d,G()),l.options(d),oa(),ka(),g.prop("disabled")&&l.disable(),g.is("input")&&0!==g.val().trim().length?_(ga(g.val().trim())):d.defaultDate&&void 0===g.attr("placeholder")&&_(d.defaultDate),d.inline&&ea(),l};a.fn.datetimepicker=function(b){return this.each(function(){var d=a(this);d.data("DateTimePicker")||(b=a.extend(!0,{},a.fn.datetimepicker.defaults,b),d.data("DateTimePicker",c(d,b)))})},a.fn.datetimepicker.defaults={timeZone:"Etc/UTC",format:!1,dayViewHeaderFormat:"MMMM YYYY",extraFormats:!1,stepping:1,minDate:!1,maxDate:!1,useCurrent:!0,collapse:!0,locale:b.locale(),defaultDate:!1,disabledDates:!1,enabledDates:!1,icons:{time:"glyphicon glyphicon-time",date:"glyphicon glyphicon-calendar",up:"glyphicon glyphicon-chevron-up",down:"glyphicon glyphicon-chevron-down",previous:"glyphicon glyphicon-chevron-left",next:"glyphicon glyphicon-chevron-right",today:"glyphicon glyphicon-screenshot",clear:"glyphicon glyphicon-trash",close:"glyphicon glyphicon-remove"},tooltips:{today:"Go to today",clear:"Clear selection",close:"Close the picker",selectMonth:"Select Month",prevMonth:"Previous Month",nextMonth:"Next Month",selectYear:"Select Year",prevYear:"Previous Year",nextYear:"Next Year",selectDecade:"Select Decade",prevDecade:"Previous Decade",nextDecade:"Next Decade",prevCentury:"Previous Century",nextCentury:"Next Century",pickHour:"Pick Hour",incrementHour:"Increment Hour",decrementHour:"Decrement Hour",pickMinute:"Pick Minute",incrementMinute:"Increment Minute",decrementMinute:"Decrement Minute",pickSecond:"Pick Second",incrementSecond:"Increment Second",decrementSecond:"Decrement Second",togglePeriod:"Toggle Period",selectTime:"Select Time"},useStrict:!1,sideBySide:!1,daysOfWeekDisabled:!1,calendarWeeks:!1,viewMode:"days",toolbarPlacement:"default",showTodayButton:!1,showClear:!1,showClose:!1,widgetPositioning:{horizontal:"auto",vertical:"auto"},widgetParent:null,ignoreReadonly:!1,keepOpen:!1,focusOnShow:!0,inline:!1,keepInvalid:!1,datepickerInput:".datepickerinput",keyBinds:{up:function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")?this.date(b.clone().subtract(7,"d")):this.date(b.clone().add(this.stepping(),"m"))}},down:function(a){if(!a)return void this.show();var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")?this.date(b.clone().add(7,"d")):this.date(b.clone().subtract(this.stepping(),"m"))},"control up":function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")?this.date(b.clone().subtract(1,"y")):this.date(b.clone().add(1,"h"))}},"control down":function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")?this.date(b.clone().add(1,"y")):this.date(b.clone().subtract(1,"h"))}},left:function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")&&this.date(b.clone().subtract(1,"d"))}},right:function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")&&this.date(b.clone().add(1,"d"))}},pageUp:function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")&&this.date(b.clone().subtract(1,"M"))}},pageDown:function(a){if(a){var b=this.date()||this.getMoment();a.find(".datepicker").is(":visible")&&this.date(b.clone().add(1,"M"))}},enter:function(){this.hide()},escape:function(){this.hide()},"control space":function(a){a.find(".timepicker").is(":visible")&&a.find('.btn[data-action="togglePeriod"]').click()},t:function(){this.date(this.getMoment())},"delete":function(){this.clear()}},debug:!1,allowInputToggle:!1,disabledTimeIntervals:!1,disabledHours:!1,enabledHours:!1,viewDate:!1}}); \ No newline at end of file diff --git a/static/vendors/moment/min/moment-with-locales.min.js b/static/vendors/moment/min/moment-with-locales.min.js new file mode 100644 index 000000000..edaaeb8a6 --- /dev/null +++ b/static/vendors/moment/min/moment-with-locales.min.js @@ -0,0 +1,74 @@ +!function(a,b){"object"==typeof exports&&"undefined"!=typeof module?module.exports=b():"function"==typeof define&&define.amd?define(b):a.moment=b()}(this,function(){"use strict";function a(){return Rd.apply(null,arguments)}function b(a){Rd=a}function c(a){return"[object Array]"===Object.prototype.toString.call(a)}function d(a){return a instanceof Date||"[object Date]"===Object.prototype.toString.call(a)}function e(a,b){var c,d=[];for(c=0;c0)for(c in Td)d=Td[c],e=b[d],m(e)||(a[d]=e);return a}function o(b){n(this,b),this._d=new Date(null!=b._d?b._d.getTime():NaN),Ud===!1&&(Ud=!0,a.updateOffset(this),Ud=!1)}function p(a){return a instanceof o||null!=a&&null!=a._isAMomentObject}function q(a){return 0>a?Math.ceil(a):Math.floor(a)}function r(a){var b=+a,c=0;return 0!==b&&isFinite(b)&&(c=q(b)),c}function s(a,b,c){var d,e=Math.min(a.length,b.length),f=Math.abs(a.length-b.length),g=0;for(d=0;e>d;d++)(c&&a[d]!==b[d]||!c&&r(a[d])!==r(b[d]))&&g++;return g+f}function t(){}function u(a){return a?a.toLowerCase().replace("_","-"):a}function v(a){for(var b,c,d,e,f=0;f0;){if(d=w(e.slice(0,b).join("-")))return d;if(c&&c.length>=b&&s(e,c,!0)>=b-1)break;b--}f++}return null}function w(a){var b=null;if(!Vd[a]&&"undefined"!=typeof module&&module&&module.exports)try{b=Sd._abbr,require("./locale/"+a),x(b)}catch(c){}return Vd[a]}function x(a,b){var c;return a&&(c=m(b)?z(a):y(a,b),c&&(Sd=c)),Sd._abbr}function y(a,b){return null!==b?(b.abbr=a,Vd[a]=Vd[a]||new t,Vd[a].set(b),x(a),Vd[a]):(delete Vd[a],null)}function z(a){var b;if(a&&a._locale&&a._locale._abbr&&(a=a._locale._abbr),!a)return Sd;if(!c(a)){if(b=w(a))return b;a=[a]}return v(a)}function A(a,b){var c=a.toLowerCase();Wd[c]=Wd[c+"s"]=Wd[b]=a}function B(a){return"string"==typeof a?Wd[a]||Wd[a.toLowerCase()]:void 0}function C(a){var b,c,d={};for(c in a)f(a,c)&&(b=B(c),b&&(d[b]=a[c]));return d}function D(a){return a instanceof Function||"[object Function]"===Object.prototype.toString.call(a)}function E(b,c){return function(d){return null!=d?(G(this,b,d),a.updateOffset(this,c),this):F(this,b)}}function F(a,b){return a.isValid()?a._d["get"+(a._isUTC?"UTC":"")+b]():NaN}function G(a,b,c){a.isValid()&&a._d["set"+(a._isUTC?"UTC":"")+b](c)}function H(a,b){var c;if("object"==typeof a)for(c in a)this.set(c,a[c]);else if(a=B(a),D(this[a]))return this[a](b);return this}function I(a,b,c){var d=""+Math.abs(a),e=b-d.length,f=a>=0;return(f?c?"+":"":"-")+Math.pow(10,Math.max(0,e)).toString().substr(1)+d}function J(a,b,c,d){var e=d;"string"==typeof d&&(e=function(){return this[d]()}),a&&($d[a]=e),b&&($d[b[0]]=function(){return I(e.apply(this,arguments),b[1],b[2])}),c&&($d[c]=function(){return this.localeData().ordinal(e.apply(this,arguments),a)})}function K(a){return a.match(/\[[\s\S]/)?a.replace(/^\[|\]$/g,""):a.replace(/\\/g,"")}function L(a){var b,c,d=a.match(Xd);for(b=0,c=d.length;c>b;b++)$d[d[b]]?d[b]=$d[d[b]]:d[b]=K(d[b]);return function(e){var f="";for(b=0;c>b;b++)f+=d[b]instanceof Function?d[b].call(e,a):d[b];return f}}function M(a,b){return a.isValid()?(b=N(b,a.localeData()),Zd[b]=Zd[b]||L(b),Zd[b](a)):a.localeData().invalidDate()}function N(a,b){function c(a){return b.longDateFormat(a)||a}var d=5;for(Yd.lastIndex=0;d>=0&&Yd.test(a);)a=a.replace(Yd,c),Yd.lastIndex=0,d-=1;return a}function O(a,b,c){qe[a]=D(b)?b:function(a,d){return a&&c?c:b}}function P(a,b){return f(qe,a)?qe[a](b._strict,b._locale):new RegExp(Q(a))}function Q(a){return R(a.replace("\\","").replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g,function(a,b,c,d,e){return b||c||d||e}))}function R(a){return a.replace(/[-\/\\^$*+?.()|[\]{}]/g,"\\$&")}function S(a,b){var c,d=b;for("string"==typeof a&&(a=[a]),"number"==typeof b&&(d=function(a,c){c[b]=r(a)}),c=0;cd;d++){if(e=h([2e3,d]),c&&!this._longMonthsParse[d]&&(this._longMonthsParse[d]=new RegExp("^"+this.months(e,"").replace(".","")+"$","i"),this._shortMonthsParse[d]=new RegExp("^"+this.monthsShort(e,"").replace(".","")+"$","i")),c||this._monthsParse[d]||(f="^"+this.months(e,"")+"|^"+this.monthsShort(e,""),this._monthsParse[d]=new RegExp(f.replace(".",""),"i")),c&&"MMMM"===b&&this._longMonthsParse[d].test(a))return d;if(c&&"MMM"===b&&this._shortMonthsParse[d].test(a))return d;if(!c&&this._monthsParse[d].test(a))return d}}function Z(a,b){var c;return a.isValid()?"string"==typeof b&&(b=a.localeData().monthsParse(b),"number"!=typeof b)?a:(c=Math.min(a.date(),V(a.year(),b)),a._d["set"+(a._isUTC?"UTC":"")+"Month"](b,c),a):a}function $(b){return null!=b?(Z(this,b),a.updateOffset(this,!0),this):F(this,"Month")}function _(){return V(this.year(),this.month())}function aa(a){return this._monthsParseExact?(f(this,"_monthsRegex")||ca.call(this),a?this._monthsShortStrictRegex:this._monthsShortRegex):this._monthsShortStrictRegex&&a?this._monthsShortStrictRegex:this._monthsShortRegex}function ba(a){return this._monthsParseExact?(f(this,"_monthsRegex")||ca.call(this),a?this._monthsStrictRegex:this._monthsRegex):this._monthsStrictRegex&&a?this._monthsStrictRegex:this._monthsRegex}function ca(){function a(a,b){return b.length-a.length}var b,c,d=[],e=[],f=[];for(b=0;12>b;b++)c=h([2e3,b]),d.push(this.monthsShort(c,"")),e.push(this.months(c,"")),f.push(this.months(c,"")),f.push(this.monthsShort(c,""));for(d.sort(a),e.sort(a),f.sort(a),b=0;12>b;b++)d[b]=R(d[b]),e[b]=R(e[b]),f[b]=R(f[b]);this._monthsRegex=new RegExp("^("+f.join("|")+")","i"),this._monthsShortRegex=this._monthsRegex,this._monthsStrictRegex=new RegExp("^("+e.join("|")+")$","i"),this._monthsShortStrictRegex=new RegExp("^("+d.join("|")+")$","i")}function da(a){var b,c=a._a;return c&&-2===j(a).overflow&&(b=c[te]<0||c[te]>11?te:c[ue]<1||c[ue]>V(c[se],c[te])?ue:c[ve]<0||c[ve]>24||24===c[ve]&&(0!==c[we]||0!==c[xe]||0!==c[ye])?ve:c[we]<0||c[we]>59?we:c[xe]<0||c[xe]>59?xe:c[ye]<0||c[ye]>999?ye:-1,j(a)._overflowDayOfYear&&(se>b||b>ue)&&(b=ue),j(a)._overflowWeeks&&-1===b&&(b=ze),j(a)._overflowWeekday&&-1===b&&(b=Ae),j(a).overflow=b),a}function ea(b){a.suppressDeprecationWarnings===!1&&"undefined"!=typeof console&&console.warn&&console.warn("Deprecation warning: "+b)}function fa(a,b){var c=!0;return g(function(){return c&&(ea(a+"\nArguments: "+Array.prototype.slice.call(arguments).join(", ")+"\n"+(new Error).stack),c=!1),b.apply(this,arguments)},b)}function ga(a,b){Ge[a]||(ea(b),Ge[a]=!0)}function ha(a){var b,c,d,e,f,g,h=a._i,i=He.exec(h)||Ie.exec(h);if(i){for(j(a).iso=!0,b=0,c=Ke.length;c>b;b++)if(Ke[b][1].exec(i[1])){e=Ke[b][0],d=Ke[b][2]!==!1;break}if(null==e)return void(a._isValid=!1);if(i[3]){for(b=0,c=Le.length;c>b;b++)if(Le[b][1].exec(i[3])){f=(i[2]||" ")+Le[b][0];break}if(null==f)return void(a._isValid=!1)}if(!d&&null!=f)return void(a._isValid=!1);if(i[4]){if(!Je.exec(i[4]))return void(a._isValid=!1);g="Z"}a._f=e+(f||"")+(g||""),wa(a)}else a._isValid=!1}function ia(b){var c=Me.exec(b._i);return null!==c?void(b._d=new Date(+c[1])):(ha(b),void(b._isValid===!1&&(delete b._isValid,a.createFromInputFallback(b))))}function ja(a,b,c,d,e,f,g){var h=new Date(a,b,c,d,e,f,g);return 100>a&&a>=0&&isFinite(h.getFullYear())&&h.setFullYear(a),h}function ka(a){var b=new Date(Date.UTC.apply(null,arguments));return 100>a&&a>=0&&isFinite(b.getUTCFullYear())&&b.setUTCFullYear(a),b}function la(a){return ma(a)?366:365}function ma(a){return a%4===0&&a%100!==0||a%400===0}function na(){return ma(this.year())}function oa(a,b,c){var d=7+b-c,e=(7+ka(a,0,d).getUTCDay()-b)%7;return-e+d-1}function pa(a,b,c,d,e){var f,g,h=(7+c-d)%7,i=oa(a,d,e),j=1+7*(b-1)+h+i;return 0>=j?(f=a-1,g=la(f)+j):j>la(a)?(f=a+1,g=j-la(a)):(f=a,g=j),{year:f,dayOfYear:g}}function qa(a,b,c){var d,e,f=oa(a.year(),b,c),g=Math.floor((a.dayOfYear()-f-1)/7)+1;return 1>g?(e=a.year()-1,d=g+ra(e,b,c)):g>ra(a.year(),b,c)?(d=g-ra(a.year(),b,c),e=a.year()+1):(e=a.year(),d=g),{week:d,year:e}}function ra(a,b,c){var d=oa(a,b,c),e=oa(a+1,b,c);return(la(a)-d+e)/7}function sa(a,b,c){return null!=a?a:null!=b?b:c}function ta(b){var c=new Date(a.now());return b._useUTC?[c.getUTCFullYear(),c.getUTCMonth(),c.getUTCDate()]:[c.getFullYear(),c.getMonth(),c.getDate()]}function ua(a){var b,c,d,e,f=[];if(!a._d){for(d=ta(a),a._w&&null==a._a[ue]&&null==a._a[te]&&va(a),a._dayOfYear&&(e=sa(a._a[se],d[se]),a._dayOfYear>la(e)&&(j(a)._overflowDayOfYear=!0),c=ka(e,0,a._dayOfYear),a._a[te]=c.getUTCMonth(),a._a[ue]=c.getUTCDate()),b=0;3>b&&null==a._a[b];++b)a._a[b]=f[b]=d[b];for(;7>b;b++)a._a[b]=f[b]=null==a._a[b]?2===b?1:0:a._a[b];24===a._a[ve]&&0===a._a[we]&&0===a._a[xe]&&0===a._a[ye]&&(a._nextDay=!0,a._a[ve]=0),a._d=(a._useUTC?ka:ja).apply(null,f),null!=a._tzm&&a._d.setUTCMinutes(a._d.getUTCMinutes()-a._tzm),a._nextDay&&(a._a[ve]=24)}}function va(a){var b,c,d,e,f,g,h,i;b=a._w,null!=b.GG||null!=b.W||null!=b.E?(f=1,g=4,c=sa(b.GG,a._a[se],qa(Ea(),1,4).year),d=sa(b.W,1),e=sa(b.E,1),(1>e||e>7)&&(i=!0)):(f=a._locale._week.dow,g=a._locale._week.doy,c=sa(b.gg,a._a[se],qa(Ea(),f,g).year),d=sa(b.w,1),null!=b.d?(e=b.d,(0>e||e>6)&&(i=!0)):null!=b.e?(e=b.e+f,(b.e<0||b.e>6)&&(i=!0)):e=f),1>d||d>ra(c,f,g)?j(a)._overflowWeeks=!0:null!=i?j(a)._overflowWeekday=!0:(h=pa(c,d,e,f,g),a._a[se]=h.year,a._dayOfYear=h.dayOfYear)}function wa(b){if(b._f===a.ISO_8601)return void ha(b);b._a=[],j(b).empty=!0;var c,d,e,f,g,h=""+b._i,i=h.length,k=0;for(e=N(b._f,b._locale).match(Xd)||[],c=0;c0&&j(b).unusedInput.push(g),h=h.slice(h.indexOf(d)+d.length),k+=d.length),$d[f]?(d?j(b).empty=!1:j(b).unusedTokens.push(f),U(f,d,b)):b._strict&&!d&&j(b).unusedTokens.push(f);j(b).charsLeftOver=i-k,h.length>0&&j(b).unusedInput.push(h),j(b).bigHour===!0&&b._a[ve]<=12&&b._a[ve]>0&&(j(b).bigHour=void 0),b._a[ve]=xa(b._locale,b._a[ve],b._meridiem),ua(b),da(b)}function xa(a,b,c){var d;return null==c?b:null!=a.meridiemHour?a.meridiemHour(b,c):null!=a.isPM?(d=a.isPM(c),d&&12>b&&(b+=12),d||12!==b||(b=0),b):b}function ya(a){var b,c,d,e,f;if(0===a._f.length)return j(a).invalidFormat=!0,void(a._d=new Date(NaN));for(e=0;ef)&&(d=f,c=b));g(a,c||b)}function za(a){if(!a._d){var b=C(a._i);a._a=e([b.year,b.month,b.day||b.date,b.hour,b.minute,b.second,b.millisecond],function(a){return a&&parseInt(a,10)}),ua(a)}}function Aa(a){var b=new o(da(Ba(a)));return b._nextDay&&(b.add(1,"d"),b._nextDay=void 0),b}function Ba(a){var b=a._i,e=a._f;return a._locale=a._locale||z(a._l),null===b||void 0===e&&""===b?l({nullInput:!0}):("string"==typeof b&&(a._i=b=a._locale.preparse(b)),p(b)?new o(da(b)):(c(e)?ya(a):e?wa(a):d(b)?a._d=b:Ca(a),k(a)||(a._d=null),a))}function Ca(b){var f=b._i;void 0===f?b._d=new Date(a.now()):d(f)?b._d=new Date(+f):"string"==typeof f?ia(b):c(f)?(b._a=e(f.slice(0),function(a){return parseInt(a,10)}),ua(b)):"object"==typeof f?za(b):"number"==typeof f?b._d=new Date(f):a.createFromInputFallback(b)}function Da(a,b,c,d,e){var f={};return"boolean"==typeof c&&(d=c,c=void 0),f._isAMomentObject=!0,f._useUTC=f._isUTC=e,f._l=c,f._i=a,f._f=b,f._strict=d,Aa(f)}function Ea(a,b,c,d){return Da(a,b,c,d,!1)}function Fa(a,b){var d,e;if(1===b.length&&c(b[0])&&(b=b[0]),!b.length)return Ea();for(d=b[0],e=1;ea&&(a=-a,c="-"),c+I(~~(a/60),2)+b+I(~~a%60,2)})}function La(a,b){var c=(b||"").match(a)||[],d=c[c.length-1]||[],e=(d+"").match(Re)||["-",0,0],f=+(60*e[1])+r(e[2]);return"+"===e[0]?f:-f}function Ma(b,c){var e,f;return c._isUTC?(e=c.clone(),f=(p(b)||d(b)?+b:+Ea(b))-+e,e._d.setTime(+e._d+f),a.updateOffset(e,!1),e):Ea(b).local()}function Na(a){return 15*-Math.round(a._d.getTimezoneOffset()/15)}function Oa(b,c){var d,e=this._offset||0;return this.isValid()?null!=b?("string"==typeof b?b=La(ne,b):Math.abs(b)<16&&(b=60*b),!this._isUTC&&c&&(d=Na(this)),this._offset=b,this._isUTC=!0,null!=d&&this.add(d,"m"),e!==b&&(!c||this._changeInProgress?cb(this,Za(b-e,"m"),1,!1):this._changeInProgress||(this._changeInProgress=!0,a.updateOffset(this,!0),this._changeInProgress=null)),this):this._isUTC?e:Na(this):null!=b?this:NaN}function Pa(a,b){return null!=a?("string"!=typeof a&&(a=-a),this.utcOffset(a,b),this):-this.utcOffset()}function Qa(a){return this.utcOffset(0,a)}function Ra(a){return this._isUTC&&(this.utcOffset(0,a),this._isUTC=!1,a&&this.subtract(Na(this),"m")),this}function Sa(){return this._tzm?this.utcOffset(this._tzm):"string"==typeof this._i&&this.utcOffset(La(me,this._i)),this}function Ta(a){return this.isValid()?(a=a?Ea(a).utcOffset():0,(this.utcOffset()-a)%60===0):!1}function Ua(){return this.utcOffset()>this.clone().month(0).utcOffset()||this.utcOffset()>this.clone().month(5).utcOffset()}function Va(){if(!m(this._isDSTShifted))return this._isDSTShifted;var a={};if(n(a,this),a=Ba(a),a._a){var b=a._isUTC?h(a._a):Ea(a._a);this._isDSTShifted=this.isValid()&&s(a._a,b.toArray())>0}else this._isDSTShifted=!1;return this._isDSTShifted}function Wa(){return this.isValid()?!this._isUTC:!1}function Xa(){return this.isValid()?this._isUTC:!1}function Ya(){return this.isValid()?this._isUTC&&0===this._offset:!1}function Za(a,b){var c,d,e,g=a,h=null;return Ja(a)?g={ms:a._milliseconds,d:a._days,M:a._months}:"number"==typeof a?(g={},b?g[b]=a:g.milliseconds=a):(h=Se.exec(a))?(c="-"===h[1]?-1:1,g={y:0,d:r(h[ue])*c,h:r(h[ve])*c,m:r(h[we])*c,s:r(h[xe])*c,ms:r(h[ye])*c}):(h=Te.exec(a))?(c="-"===h[1]?-1:1,g={y:$a(h[2],c),M:$a(h[3],c),d:$a(h[4],c),h:$a(h[5],c),m:$a(h[6],c),s:$a(h[7],c),w:$a(h[8],c)}):null==g?g={}:"object"==typeof g&&("from"in g||"to"in g)&&(e=ab(Ea(g.from),Ea(g.to)),g={},g.ms=e.milliseconds,g.M=e.months),d=new Ia(g),Ja(a)&&f(a,"_locale")&&(d._locale=a._locale),d}function $a(a,b){var c=a&&parseFloat(a.replace(",","."));return(isNaN(c)?0:c)*b}function _a(a,b){var c={milliseconds:0,months:0};return c.months=b.month()-a.month()+12*(b.year()-a.year()),a.clone().add(c.months,"M").isAfter(b)&&--c.months,c.milliseconds=+b-+a.clone().add(c.months,"M"),c}function ab(a,b){var c;return a.isValid()&&b.isValid()?(b=Ma(b,a),a.isBefore(b)?c=_a(a,b):(c=_a(b,a),c.milliseconds=-c.milliseconds,c.months=-c.months),c):{milliseconds:0,months:0}}function bb(a,b){return function(c,d){var e,f;return null===d||isNaN(+d)||(ga(b,"moment()."+b+"(period, number) is deprecated. Please use moment()."+b+"(number, period)."),f=c,c=d,d=f),c="string"==typeof c?+c:c,e=Za(c,d),cb(this,e,a),this}}function cb(b,c,d,e){var f=c._milliseconds,g=c._days,h=c._months;b.isValid()&&(e=null==e?!0:e,f&&b._d.setTime(+b._d+f*d),g&&G(b,"Date",F(b,"Date")+g*d),h&&Z(b,F(b,"Month")+h*d),e&&a.updateOffset(b,g||h))}function db(a,b){var c=a||Ea(),d=Ma(c,this).startOf("day"),e=this.diff(d,"days",!0),f=-6>e?"sameElse":-1>e?"lastWeek":0>e?"lastDay":1>e?"sameDay":2>e?"nextDay":7>e?"nextWeek":"sameElse",g=b&&(D(b[f])?b[f]():b[f]);return this.format(g||this.localeData().calendar(f,this,Ea(c)))}function eb(){return new o(this)}function fb(a,b){var c=p(a)?a:Ea(a);return this.isValid()&&c.isValid()?(b=B(m(b)?"millisecond":b),"millisecond"===b?+this>+c:+c<+this.clone().startOf(b)):!1}function gb(a,b){var c=p(a)?a:Ea(a);return this.isValid()&&c.isValid()?(b=B(m(b)?"millisecond":b),"millisecond"===b?+c>+this:+this.clone().endOf(b)<+c):!1}function hb(a,b,c){return this.isAfter(a,c)&&this.isBefore(b,c)}function ib(a,b){var c,d=p(a)?a:Ea(a);return this.isValid()&&d.isValid()?(b=B(b||"millisecond"),"millisecond"===b?+this===+d:(c=+d,+this.clone().startOf(b)<=c&&c<=+this.clone().endOf(b))):!1}function jb(a,b){return this.isSame(a,b)||this.isAfter(a,b)}function kb(a,b){return this.isSame(a,b)||this.isBefore(a,b)}function lb(a,b,c){var d,e,f,g;return this.isValid()?(d=Ma(a,this),d.isValid()?(e=6e4*(d.utcOffset()-this.utcOffset()),b=B(b),"year"===b||"month"===b||"quarter"===b?(g=mb(this,d),"quarter"===b?g/=3:"year"===b&&(g/=12)):(f=this-d,g="second"===b?f/1e3:"minute"===b?f/6e4:"hour"===b?f/36e5:"day"===b?(f-e)/864e5:"week"===b?(f-e)/6048e5:f),c?g:q(g)):NaN):NaN}function mb(a,b){var c,d,e=12*(b.year()-a.year())+(b.month()-a.month()),f=a.clone().add(e,"months");return 0>b-f?(c=a.clone().add(e-1,"months"),d=(b-f)/(f-c)):(c=a.clone().add(e+1,"months"),d=(b-f)/(c-f)),-(e+d)}function nb(){return this.clone().locale("en").format("ddd MMM DD YYYY HH:mm:ss [GMT]ZZ")}function ob(){var a=this.clone().utc();return 0f&&(b=f),Ob.call(this,a,b,c,d,e))}function Ob(a,b,c,d,e){var f=pa(a,b,c,d,e),g=ka(f.year,0,f.dayOfYear);return this.year(g.getUTCFullYear()),this.month(g.getUTCMonth()),this.date(g.getUTCDate()),this}function Pb(a){return null==a?Math.ceil((this.month()+1)/3):this.month(3*(a-1)+this.month()%3)}function Qb(a){return qa(a,this._week.dow,this._week.doy).week}function Rb(){return this._week.dow}function Sb(){return this._week.doy}function Tb(a){var b=this.localeData().week(this);return null==a?b:this.add(7*(a-b),"d")}function Ub(a){var b=qa(this,1,4).week;return null==a?b:this.add(7*(a-b),"d")}function Vb(a,b){return"string"!=typeof a?a:isNaN(a)?(a=b.weekdaysParse(a),"number"==typeof a?a:null):parseInt(a,10)}function Wb(a,b){return c(this._weekdays)?this._weekdays[a.day()]:this._weekdays[this._weekdays.isFormat.test(b)?"format":"standalone"][a.day()]}function Xb(a){return this._weekdaysShort[a.day()]}function Yb(a){return this._weekdaysMin[a.day()]}function Zb(a,b,c){var d,e,f;for(this._weekdaysParse||(this._weekdaysParse=[],this._minWeekdaysParse=[],this._shortWeekdaysParse=[],this._fullWeekdaysParse=[]),d=0;7>d;d++){if(e=Ea([2e3,1]).day(d),c&&!this._fullWeekdaysParse[d]&&(this._fullWeekdaysParse[d]=new RegExp("^"+this.weekdays(e,"").replace(".",".?")+"$","i"),this._shortWeekdaysParse[d]=new RegExp("^"+this.weekdaysShort(e,"").replace(".",".?")+"$","i"),this._minWeekdaysParse[d]=new RegExp("^"+this.weekdaysMin(e,"").replace(".",".?")+"$","i")),this._weekdaysParse[d]||(f="^"+this.weekdays(e,"")+"|^"+this.weekdaysShort(e,"")+"|^"+this.weekdaysMin(e,""),this._weekdaysParse[d]=new RegExp(f.replace(".",""),"i")),c&&"dddd"===b&&this._fullWeekdaysParse[d].test(a))return d;if(c&&"ddd"===b&&this._shortWeekdaysParse[d].test(a))return d;if(c&&"dd"===b&&this._minWeekdaysParse[d].test(a))return d;if(!c&&this._weekdaysParse[d].test(a))return d}}function $b(a){if(!this.isValid())return null!=a?this:NaN;var b=this._isUTC?this._d.getUTCDay():this._d.getDay();return null!=a?(a=Vb(a,this.localeData()),this.add(a-b,"d")):b}function _b(a){if(!this.isValid())return null!=a?this:NaN;var b=(this.day()+7-this.localeData()._week.dow)%7;return null==a?b:this.add(a-b,"d")}function ac(a){return this.isValid()?null==a?this.day()||7:this.day(this.day()%7?a:a-7):null!=a?this:NaN}function bc(a){var b=Math.round((this.clone().startOf("day")-this.clone().startOf("year"))/864e5)+1;return null==a?b:this.add(a-b,"d")}function cc(){return this.hours()%12||12}function dc(a,b){J(a,0,0,function(){return this.localeData().meridiem(this.hours(),this.minutes(),b)})}function ec(a,b){return b._meridiemParse}function fc(a){return"p"===(a+"").toLowerCase().charAt(0)}function gc(a,b,c){return a>11?c?"pm":"PM":c?"am":"AM"}function hc(a,b){b[ye]=r(1e3*("0."+a))}function ic(){return this._isUTC?"UTC":""}function jc(){return this._isUTC?"Coordinated Universal Time":""}function kc(a){return Ea(1e3*a)}function lc(){return Ea.apply(null,arguments).parseZone()}function mc(a,b,c){var d=this._calendar[a];return D(d)?d.call(b,c):d}function nc(a){var b=this._longDateFormat[a],c=this._longDateFormat[a.toUpperCase()];return b||!c?b:(this._longDateFormat[a]=c.replace(/MMMM|MM|DD|dddd/g,function(a){return a.slice(1)}),this._longDateFormat[a])}function oc(){return this._invalidDate}function pc(a){return this._ordinal.replace("%d",a)}function qc(a){return a}function rc(a,b,c,d){var e=this._relativeTime[c];return D(e)?e(a,b,c,d):e.replace(/%d/i,a)}function sc(a,b){var c=this._relativeTime[a>0?"future":"past"];return D(c)?c(b):c.replace(/%s/i,b)}function tc(a){var b,c;for(c in a)b=a[c],D(b)?this[c]=b:this["_"+c]=b;this._ordinalParseLenient=new RegExp(this._ordinalParse.source+"|"+/\d{1,2}/.source)}function uc(a,b,c,d){var e=z(),f=h().set(d,b);return e[c](f,a)}function vc(a,b,c,d,e){if("number"==typeof a&&(b=a,a=void 0),a=a||"",null!=b)return uc(a,b,c,e);var f,g=[];for(f=0;d>f;f++)g[f]=uc(a,f,c,e);return g}function wc(a,b){return vc(a,b,"months",12,"month")}function xc(a,b){return vc(a,b,"monthsShort",12,"month")}function yc(a,b){return vc(a,b,"weekdays",7,"day")}function zc(a,b){return vc(a,b,"weekdaysShort",7,"day")}function Ac(a,b){return vc(a,b,"weekdaysMin",7,"day")}function Bc(){var a=this._data;return this._milliseconds=qf(this._milliseconds),this._days=qf(this._days),this._months=qf(this._months),a.milliseconds=qf(a.milliseconds),a.seconds=qf(a.seconds),a.minutes=qf(a.minutes),a.hours=qf(a.hours),a.months=qf(a.months),a.years=qf(a.years),this}function Cc(a,b,c,d){var e=Za(b,c);return a._milliseconds+=d*e._milliseconds,a._days+=d*e._days,a._months+=d*e._months,a._bubble()}function Dc(a,b){return Cc(this,a,b,1)}function Ec(a,b){return Cc(this,a,b,-1)}function Fc(a){return 0>a?Math.floor(a):Math.ceil(a)}function Gc(){var a,b,c,d,e,f=this._milliseconds,g=this._days,h=this._months,i=this._data;return f>=0&&g>=0&&h>=0||0>=f&&0>=g&&0>=h||(f+=864e5*Fc(Ic(h)+g),g=0,h=0),i.milliseconds=f%1e3,a=q(f/1e3),i.seconds=a%60,b=q(a/60),i.minutes=b%60,c=q(b/60),i.hours=c%24,g+=q(c/24),e=q(Hc(g)),h+=e,g-=Fc(Ic(e)),d=q(h/12),h%=12,i.days=g,i.months=h,i.years=d,this}function Hc(a){return 4800*a/146097}function Ic(a){return 146097*a/4800}function Jc(a){var b,c,d=this._milliseconds;if(a=B(a),"month"===a||"year"===a)return b=this._days+d/864e5,c=this._months+Hc(b),"month"===a?c:c/12;switch(b=this._days+Math.round(Ic(this._months)),a){case"week":return b/7+d/6048e5;case"day":return b+d/864e5;case"hour":return 24*b+d/36e5;case"minute":return 1440*b+d/6e4;case"second":return 86400*b+d/1e3;case"millisecond":return Math.floor(864e5*b)+d;default:throw new Error("Unknown unit "+a)}}function Kc(){return this._milliseconds+864e5*this._days+this._months%12*2592e6+31536e6*r(this._months/12)}function Lc(a){return function(){return this.as(a)}}function Mc(a){return a=B(a),this[a+"s"]()}function Nc(a){return function(){return this._data[a]}}function Oc(){return q(this.days()/7)}function Pc(a,b,c,d,e){return e.relativeTime(b||1,!!c,a,d)}function Qc(a,b,c){var d=Za(a).abs(),e=Gf(d.as("s")),f=Gf(d.as("m")),g=Gf(d.as("h")),h=Gf(d.as("d")),i=Gf(d.as("M")),j=Gf(d.as("y")),k=e=f&&["m"]||f=g&&["h"]||g=h&&["d"]||h=i&&["M"]||i=j&&["y"]||["yy",j];return k[2]=b,k[3]=+a>0,k[4]=c,Pc.apply(null,k)}function Rc(a,b){return void 0===Hf[a]?!1:void 0===b?Hf[a]:(Hf[a]=b,!0)}function Sc(a){var b=this.localeData(),c=Qc(this,!a,b);return a&&(c=b.pastFuture(+this,c)),b.postformat(c)}function Tc(){var a,b,c,d=If(this._milliseconds)/1e3,e=If(this._days),f=If(this._months);a=q(d/60),b=q(a/60),d%=60,a%=60,c=q(f/12),f%=12;var g=c,h=f,i=e,j=b,k=a,l=d,m=this.asSeconds();return m?(0>m?"-":"")+"P"+(g?g+"Y":"")+(h?h+"M":"")+(i?i+"D":"")+(j||k||l?"T":"")+(j?j+"H":"")+(k?k+"M":"")+(l?l+"S":""):"P0D"} +//! moment.js locale configuration +//! locale : belarusian (be) +//! author : Dmitry Demidov : https://github.com/demidov91 +//! author: Praleska: http://praleska.pro/ +//! Author : Menelion Elensúle : https://github.com/Oire +function Uc(a,b){var c=a.split("_");return b%10===1&&b%100!==11?c[0]:b%10>=2&&4>=b%10&&(10>b%100||b%100>=20)?c[1]:c[2]}function Vc(a,b,c){var d={mm:b?"хвіліна_хвіліны_хвілін":"хвіліну_хвіліны_хвілін",hh:b?"гадзіна_гадзіны_гадзін":"гадзіну_гадзіны_гадзін",dd:"дзень_дні_дзён",MM:"месяц_месяцы_месяцаў",yy:"год_гады_гадоў"};return"m"===c?b?"хвіліна":"хвіліну":"h"===c?b?"гадзіна":"гадзіну":a+" "+Uc(d[c],+a)} +//! moment.js locale configuration +//! locale : breton (br) +//! author : Jean-Baptiste Le Duigou : https://github.com/jbleduigou +function Wc(a,b,c){var d={mm:"munutenn",MM:"miz",dd:"devezh"};return a+" "+Zc(d[c],a)}function Xc(a){switch(Yc(a)){case 1:case 3:case 4:case 5:case 9:return a+" bloaz";default:return a+" vloaz"}}function Yc(a){return a>9?Yc(a%10):a}function Zc(a,b){return 2===b?$c(a):a}function $c(a){var b={m:"v",b:"v",d:"z"};return void 0===b[a.charAt(0)]?a:b[a.charAt(0)]+a.substring(1)} +//! moment.js locale configuration +//! locale : bosnian (bs) +//! author : Nedim Cholich : https://github.com/frontyard +//! based on (hr) translation by Bojan Marković +function _c(a,b,c){var d=a+" ";switch(c){case"m":return b?"jedna minuta":"jedne minute";case"mm":return d+=1===a?"minuta":2===a||3===a||4===a?"minute":"minuta";case"h":return b?"jedan sat":"jednog sata";case"hh":return d+=1===a?"sat":2===a||3===a||4===a?"sata":"sati";case"dd":return d+=1===a?"dan":"dana";case"MM":return d+=1===a?"mjesec":2===a||3===a||4===a?"mjeseca":"mjeseci";case"yy":return d+=1===a?"godina":2===a||3===a||4===a?"godine":"godina"}}function ad(a){return a>1&&5>a&&1!==~~(a/10)}function bd(a,b,c,d){var e=a+" ";switch(c){case"s":return b||d?"pár sekund":"pár sekundami";case"m":return b?"minuta":d?"minutu":"minutou";case"mm":return b||d?e+(ad(a)?"minuty":"minut"):e+"minutami";break;case"h":return b?"hodina":d?"hodinu":"hodinou";case"hh":return b||d?e+(ad(a)?"hodiny":"hodin"):e+"hodinami";break;case"d":return b||d?"den":"dnem";case"dd":return b||d?e+(ad(a)?"dny":"dní"):e+"dny";break;case"M":return b||d?"měsíc":"měsícem";case"MM":return b||d?e+(ad(a)?"měsíce":"měsíců"):e+"měsíci";break;case"y":return b||d?"rok":"rokem";case"yy":return b||d?e+(ad(a)?"roky":"let"):e+"lety"}} +//! moment.js locale configuration +//! locale : austrian german (de-at) +//! author : lluchs : https://github.com/lluchs +//! author: Menelion Elensúle: https://github.com/Oire +//! author : Martin Groller : https://github.com/MadMG +//! author : Mikolaj Dadela : https://github.com/mik01aj +function cd(a,b,c,d){var e={m:["eine Minute","einer Minute"],h:["eine Stunde","einer Stunde"],d:["ein Tag","einem Tag"],dd:[a+" Tage",a+" Tagen"],M:["ein Monat","einem Monat"],MM:[a+" Monate",a+" Monaten"],y:["ein Jahr","einem Jahr"],yy:[a+" Jahre",a+" Jahren"]};return b?e[c][0]:e[c][1]} +//! moment.js locale configuration +//! locale : german (de) +//! author : lluchs : https://github.com/lluchs +//! author: Menelion Elensúle: https://github.com/Oire +//! author : Mikolaj Dadela : https://github.com/mik01aj +function dd(a,b,c,d){var e={m:["eine Minute","einer Minute"],h:["eine Stunde","einer Stunde"],d:["ein Tag","einem Tag"],dd:[a+" Tage",a+" Tagen"],M:["ein Monat","einem Monat"],MM:[a+" Monate",a+" Monaten"],y:["ein Jahr","einem Jahr"],yy:[a+" Jahre",a+" Jahren"]};return b?e[c][0]:e[c][1]} +//! moment.js locale configuration +//! locale : estonian (et) +//! author : Henry Kehlmann : https://github.com/madhenry +//! improvements : Illimar Tambek : https://github.com/ragulka +function ed(a,b,c,d){var e={s:["mõne sekundi","mõni sekund","paar sekundit"],m:["ühe minuti","üks minut"],mm:[a+" minuti",a+" minutit"],h:["ühe tunni","tund aega","üks tund"],hh:[a+" tunni",a+" tundi"],d:["ühe päeva","üks päev"],M:["kuu aja","kuu aega","üks kuu"],MM:[a+" kuu",a+" kuud"],y:["ühe aasta","aasta","üks aasta"],yy:[a+" aasta",a+" aastat"]};return b?e[c][2]?e[c][2]:e[c][1]:d?e[c][0]:e[c][1]}function fd(a,b,c,d){var e="";switch(c){case"s":return d?"muutaman sekunnin":"muutama sekunti";case"m":return d?"minuutin":"minuutti";case"mm":e=d?"minuutin":"minuuttia";break;case"h":return d?"tunnin":"tunti";case"hh":e=d?"tunnin":"tuntia";break;case"d":return d?"päivän":"päivä";case"dd":e=d?"päivän":"päivää";break;case"M":return d?"kuukauden":"kuukausi";case"MM":e=d?"kuukauden":"kuukautta";break;case"y":return d?"vuoden":"vuosi";case"yy":e=d?"vuoden":"vuotta"}return e=gd(a,d)+" "+e}function gd(a,b){return 10>a?b?fg[a]:eg[a]:a} +//! moment.js locale configuration +//! locale : hrvatski (hr) +//! author : Bojan Marković : https://github.com/bmarkovic +function hd(a,b,c){var d=a+" ";switch(c){case"m":return b?"jedna minuta":"jedne minute";case"mm":return d+=1===a?"minuta":2===a||3===a||4===a?"minute":"minuta";case"h":return b?"jedan sat":"jednog sata";case"hh":return d+=1===a?"sat":2===a||3===a||4===a?"sata":"sati";case"dd":return d+=1===a?"dan":"dana";case"MM":return d+=1===a?"mjesec":2===a||3===a||4===a?"mjeseca":"mjeseci";case"yy":return d+=1===a?"godina":2===a||3===a||4===a?"godine":"godina"}}function id(a,b,c,d){var e=a;switch(c){case"s":return d||b?"néhány másodperc":"néhány másodperce";case"m":return"egy"+(d||b?" perc":" perce");case"mm":return e+(d||b?" perc":" perce");case"h":return"egy"+(d||b?" óra":" órája");case"hh":return e+(d||b?" óra":" órája");case"d":return"egy"+(d||b?" nap":" napja");case"dd":return e+(d||b?" nap":" napja");case"M":return"egy"+(d||b?" hónap":" hónapja");case"MM":return e+(d||b?" hónap":" hónapja");case"y":return"egy"+(d||b?" év":" éve");case"yy":return e+(d||b?" év":" éve")}return""}function jd(a){return(a?"":"[múlt] ")+"["+pg[this.day()]+"] LT[-kor]"} +//! moment.js locale configuration +//! locale : icelandic (is) +//! author : Hinrik Örn Sigurðsson : https://github.com/hinrik +function kd(a){return a%100===11?!0:a%10===1?!1:!0}function ld(a,b,c,d){var e=a+" ";switch(c){case"s":return b||d?"nokkrar sekúndur":"nokkrum sekúndum";case"m":return b?"mínúta":"mínútu";case"mm":return kd(a)?e+(b||d?"mínútur":"mínútum"):b?e+"mínúta":e+"mínútu";case"hh":return kd(a)?e+(b||d?"klukkustundir":"klukkustundum"):e+"klukkustund";case"d":return b?"dagur":d?"dag":"degi";case"dd":return kd(a)?b?e+"dagar":e+(d?"daga":"dögum"):b?e+"dagur":e+(d?"dag":"degi");case"M":return b?"mánuður":d?"mánuð":"mánuði";case"MM":return kd(a)?b?e+"mánuðir":e+(d?"mánuði":"mánuðum"):b?e+"mánuður":e+(d?"mánuð":"mánuði");case"y":return b||d?"ár":"ári";case"yy":return kd(a)?e+(b||d?"ár":"árum"):e+(b||d?"ár":"ári")}} +//! moment.js locale configuration +//! locale : Luxembourgish (lb) +//! author : mweimerskirch : https://github.com/mweimerskirch, David Raison : https://github.com/kwisatz +function md(a,b,c,d){var e={m:["eng Minutt","enger Minutt"],h:["eng Stonn","enger Stonn"],d:["een Dag","engem Dag"],M:["ee Mount","engem Mount"],y:["ee Joer","engem Joer"]};return b?e[c][0]:e[c][1]}function nd(a){var b=a.substr(0,a.indexOf(" "));return pd(b)?"a "+a:"an "+a}function od(a){var b=a.substr(0,a.indexOf(" "));return pd(b)?"viru "+a:"virun "+a}function pd(a){if(a=parseInt(a,10),isNaN(a))return!1;if(0>a)return!0;if(10>a)return a>=4&&7>=a?!0:!1;if(100>a){var b=a%10,c=a/10;return pd(0===b?c:b)}if(1e4>a){for(;a>=10;)a/=10;return pd(a)}return a/=1e3,pd(a)}function qd(a,b,c,d){return b?"kelios sekundės":d?"kelių sekundžių":"kelias sekundes"}function rd(a,b,c,d){return b?td(c)[0]:d?td(c)[1]:td(c)[2]}function sd(a){return a%10===0||a>10&&20>a}function td(a){return rg[a].split("_")}function ud(a,b,c,d){var e=a+" ";return 1===a?e+rd(a,b,c[0],d):b?e+(sd(a)?td(c)[1]:td(c)[0]):d?e+td(c)[1]:e+(sd(a)?td(c)[1]:td(c)[2])}function vd(a,b,c){return c?b%10===1&&11!==b?a[2]:a[3]:b%10===1&&11!==b?a[0]:a[1]}function wd(a,b,c){return a+" "+vd(sg[c],a,b)}function xd(a,b,c){return vd(sg[c],a,b)}function yd(a,b){return b?"dažas sekundes":"dažām sekundēm"}function zd(a,b,c,d){var e="";if(b)switch(c){case"s":e="काही सेकंद";break;case"m":e="एक मिनिट";break;case"mm":e="%d मिनिटे";break;case"h":e="एक तास";break;case"hh":e="%d तास";break;case"d":e="एक दिवस";break;case"dd":e="%d दिवस";break;case"M":e="एक महिना";break;case"MM":e="%d महिने";break;case"y":e="एक वर्ष";break;case"yy":e="%d वर्षे"}else switch(c){case"s":e="काही सेकंदां";break;case"m":e="एका मिनिटा";break;case"mm":e="%d मिनिटां";break;case"h":e="एका तासा";break;case"hh":e="%d तासां";break;case"d":e="एका दिवसा";break;case"dd":e="%d दिवसां";break;case"M":e="एका महिन्या";break;case"MM":e="%d महिन्यां";break;case"y":e="एका वर्षा";break;case"yy":e="%d वर्षां"}return e.replace(/%d/i,a)}function Ad(a){return 5>a%10&&a%10>1&&~~(a/10)%10!==1}function Bd(a,b,c){var d=a+" ";switch(c){case"m":return b?"minuta":"minutę";case"mm":return d+(Ad(a)?"minuty":"minut");case"h":return b?"godzina":"godzinę";case"hh":return d+(Ad(a)?"godziny":"godzin");case"MM":return d+(Ad(a)?"miesiące":"miesięcy");case"yy":return d+(Ad(a)?"lata":"lat")}} +//! moment.js locale configuration +//! locale : romanian (ro) +//! author : Vlad Gurdiga : https://github.com/gurdiga +//! author : Valentin Agachi : https://github.com/avaly +function Cd(a,b,c){var d={mm:"minute",hh:"ore",dd:"zile",MM:"luni",yy:"ani"},e=" ";return(a%100>=20||a>=100&&a%100===0)&&(e=" de "),a+e+d[c]} +//! moment.js locale configuration +//! locale : russian (ru) +//! author : Viktorminator : https://github.com/Viktorminator +//! Author : Menelion Elensúle : https://github.com/Oire +function Dd(a,b){var c=a.split("_");return b%10===1&&b%100!==11?c[0]:b%10>=2&&4>=b%10&&(10>b%100||b%100>=20)?c[1]:c[2]}function Ed(a,b,c){var d={mm:b?"минута_минуты_минут":"минуту_минуты_минут",hh:"час_часа_часов",dd:"день_дня_дней",MM:"месяц_месяца_месяцев",yy:"год_года_лет"};return"m"===c?b?"минута":"минуту":a+" "+Dd(d[c],+a)}function Fd(a){return a>1&&5>a}function Gd(a,b,c,d){var e=a+" ";switch(c){case"s":return b||d?"pár sekúnd":"pár sekundami";case"m":return b?"minúta":d?"minútu":"minútou";case"mm":return b||d?e+(Fd(a)?"minúty":"minút"):e+"minútami";break;case"h":return b?"hodina":d?"hodinu":"hodinou";case"hh":return b||d?e+(Fd(a)?"hodiny":"hodín"):e+"hodinami";break;case"d":return b||d?"deň":"dňom";case"dd":return b||d?e+(Fd(a)?"dni":"dní"):e+"dňami";break;case"M":return b||d?"mesiac":"mesiacom";case"MM":return b||d?e+(Fd(a)?"mesiace":"mesiacov"):e+"mesiacmi";break;case"y":return b||d?"rok":"rokom";case"yy":return b||d?e+(Fd(a)?"roky":"rokov"):e+"rokmi"}} +//! moment.js locale configuration +//! locale : slovenian (sl) +//! author : Robert Sedovšek : https://github.com/sedovsek +function Hd(a,b,c,d){var e=a+" ";switch(c){case"s":return b||d?"nekaj sekund":"nekaj sekundami";case"m":return b?"ena minuta":"eno minuto";case"mm":return e+=1===a?b?"minuta":"minuto":2===a?b||d?"minuti":"minutama":5>a?b||d?"minute":"minutami":b||d?"minut":"minutami";case"h":return b?"ena ura":"eno uro";case"hh":return e+=1===a?b?"ura":"uro":2===a?b||d?"uri":"urama":5>a?b||d?"ure":"urami":b||d?"ur":"urami";case"d":return b||d?"en dan":"enim dnem";case"dd":return e+=1===a?b||d?"dan":"dnem":2===a?b||d?"dni":"dnevoma":b||d?"dni":"dnevi";case"M":return b||d?"en mesec":"enim mesecem";case"MM":return e+=1===a?b||d?"mesec":"mesecem":2===a?b||d?"meseca":"mesecema":5>a?b||d?"mesece":"meseci":b||d?"mesecev":"meseci";case"y":return b||d?"eno leto":"enim letom";case"yy":return e+=1===a?b||d?"leto":"letom":2===a?b||d?"leti":"letoma":5>a?b||d?"leta":"leti":b||d?"let":"leti"}}function Id(a){var b=a;return b=-1!==a.indexOf("jaj")?b.slice(0,-3)+"leS":-1!==a.indexOf("jar")?b.slice(0,-3)+"waQ":-1!==a.indexOf("DIS")?b.slice(0,-3)+"nem":b+" pIq"}function Jd(a){var b=a;return b=-1!==a.indexOf("jaj")?b.slice(0,-3)+"Hu’":-1!==a.indexOf("jar")?b.slice(0,-3)+"wen":-1!==a.indexOf("DIS")?b.slice(0,-3)+"ben":b+" ret"}function Kd(a,b,c,d){var e=Ld(a);switch(c){case"mm":return e+" tup";case"hh":return e+" rep";case"dd":return e+" jaj";case"MM":return e+" jar";case"yy":return e+" DIS"}}function Ld(a){var b=Math.floor(a%1e3/100),c=Math.floor(a%100/10),d=a%10,e="";return b>0&&(e+=Lg[b]+"vatlh"),c>0&&(e+=(""!==e?" ":"")+Lg[c]+"maH"),d>0&&(e+=(""!==e?" ":"")+Lg[d]),""===e?"pagh":e}function Md(a,b,c,d){var e={s:["viensas secunds","'iensas secunds"],m:["'n míut","'iens míut"],mm:[a+" míuts",""+a+" míuts"],h:["'n þora","'iensa þora"],hh:[a+" þoras",""+a+" þoras"],d:["'n ziua","'iensa ziua"],dd:[a+" ziuas",""+a+" ziuas"],M:["'n mes","'iens mes"],MM:[a+" mesen",""+a+" mesen"],y:["'n ar","'iens ar"],yy:[a+" ars",""+a+" ars"]};return d?e[c][0]:b?e[c][0]:e[c][1]} +//! moment.js locale configuration +//! locale : ukrainian (uk) +//! author : zemlanin : https://github.com/zemlanin +//! Author : Menelion Elensúle : https://github.com/Oire +function Nd(a,b){var c=a.split("_");return b%10===1&&b%100!==11?c[0]:b%10>=2&&4>=b%10&&(10>b%100||b%100>=20)?c[1]:c[2]}function Od(a,b,c){var d={mm:b?"хвилина_хвилини_хвилин":"хвилину_хвилини_хвилин",hh:b?"година_години_годин":"годину_години_годин",dd:"день_дні_днів",MM:"місяць_місяці_місяців",yy:"рік_роки_років"};return"m"===c?b?"хвилина":"хвилину":"h"===c?b?"година":"годину":a+" "+Nd(d[c],+a)}function Pd(a,b){var c={nominative:"неділя_понеділок_вівторок_середа_четвер_п’ятниця_субота".split("_"),accusative:"неділю_понеділок_вівторок_середу_четвер_п’ятницю_суботу".split("_"),genitive:"неділі_понеділка_вівторка_середи_четверга_п’ятниці_суботи".split("_")},d=/(\[[ВвУу]\]) ?dddd/.test(b)?"accusative":/\[?(?:минулої|наступної)? ?\] ?dddd/.test(b)?"genitive":"nominative";return c[d][a.day()]}function Qd(a){return function(){return a+"о"+(11===this.hours()?"б":"")+"] LT"}}var Rd,Sd,Td=a.momentProperties=[],Ud=!1,Vd={},Wd={},Xd=/(\[[^\[]*\])|(\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g,Yd=/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g,Zd={},$d={},_d=/\d/,ae=/\d\d/,be=/\d{3}/,ce=/\d{4}/,de=/[+-]?\d{6}/,ee=/\d\d?/,fe=/\d\d\d\d?/,ge=/\d\d\d\d\d\d?/,he=/\d{1,3}/,ie=/\d{1,4}/,je=/[+-]?\d{1,6}/,ke=/\d+/,le=/[+-]?\d+/,me=/Z|[+-]\d\d:?\d\d/gi,ne=/Z|[+-]\d\d(?::?\d\d)?/gi,oe=/[+-]?\d+(\.\d{1,3})?/,pe=/[0-9]*['a-z\u00A0-\u05FF\u0700-\uD7FF\uF900-\uFDCF\uFDF0-\uFFEF]+|[\u0600-\u06FF\/]+(\s*?[\u0600-\u06FF]+){1,2}/i,qe={},re={},se=0,te=1,ue=2,ve=3,we=4,xe=5,ye=6,ze=7,Ae=8;J("M",["MM",2],"Mo",function(){return this.month()+1}),J("MMM",0,0,function(a){return this.localeData().monthsShort(this,a)}),J("MMMM",0,0,function(a){return this.localeData().months(this,a)}),A("month","M"),O("M",ee),O("MM",ee,ae),O("MMM",function(a,b){return b.monthsShortRegex(a)}),O("MMMM",function(a,b){return b.monthsRegex(a)}),S(["M","MM"],function(a,b){b[te]=r(a)-1}),S(["MMM","MMMM"],function(a,b,c,d){var e=c._locale.monthsParse(a,d,c._strict);null!=e?b[te]=e:j(c).invalidMonth=a});var Be=/D[oD]?(\[[^\[\]]*\]|\s+)+MMMM?/,Ce="January_February_March_April_May_June_July_August_September_October_November_December".split("_"),De="Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),Ee=pe,Fe=pe,Ge={};a.suppressDeprecationWarnings=!1;var He=/^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?/,Ie=/^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?/,Je=/Z|[+-]\d\d(?::?\d\d)?/,Ke=[["YYYYYY-MM-DD",/[+-]\d{6}-\d\d-\d\d/],["YYYY-MM-DD",/\d{4}-\d\d-\d\d/],["GGGG-[W]WW-E",/\d{4}-W\d\d-\d/],["GGGG-[W]WW",/\d{4}-W\d\d/,!1],["YYYY-DDD",/\d{4}-\d{3}/],["YYYY-MM",/\d{4}-\d\d/,!1],["YYYYYYMMDD",/[+-]\d{10}/],["YYYYMMDD",/\d{8}/],["GGGG[W]WWE",/\d{4}W\d{3}/],["GGGG[W]WW",/\d{4}W\d{2}/,!1],["YYYYDDD",/\d{7}/]],Le=[["HH:mm:ss.SSSS",/\d\d:\d\d:\d\d\.\d+/],["HH:mm:ss,SSSS",/\d\d:\d\d:\d\d,\d+/],["HH:mm:ss",/\d\d:\d\d:\d\d/],["HH:mm",/\d\d:\d\d/],["HHmmss.SSSS",/\d\d\d\d\d\d\.\d+/],["HHmmss,SSSS",/\d\d\d\d\d\d,\d+/],["HHmmss",/\d\d\d\d\d\d/],["HHmm",/\d\d\d\d/],["HH",/\d\d/]],Me=/^\/?Date\((\-?\d+)/i;a.createFromInputFallback=fa("moment construction falls back to js Date. This is discouraged and will be removed in upcoming major release. Please refer to https://github.com/moment/moment/issues/1407 for more info.",function(a){a._d=new Date(a._i+(a._useUTC?" UTC":""))}),J("Y",0,0,function(){var a=this.year();return 9999>=a?""+a:"+"+a}),J(0,["YY",2],0,function(){return this.year()%100}),J(0,["YYYY",4],0,"year"),J(0,["YYYYY",5],0,"year"),J(0,["YYYYYY",6,!0],0,"year"),A("year","y"),O("Y",le),O("YY",ee,ae),O("YYYY",ie,ce),O("YYYYY",je,de),O("YYYYYY",je,de),S(["YYYYY","YYYYYY"],se),S("YYYY",function(b,c){c[se]=2===b.length?a.parseTwoDigitYear(b):r(b)}),S("YY",function(b,c){c[se]=a.parseTwoDigitYear(b)}),S("Y",function(a,b){b[se]=parseInt(a,10)}),a.parseTwoDigitYear=function(a){return r(a)+(r(a)>68?1900:2e3)};var Ne=E("FullYear",!1);a.ISO_8601=function(){};var Oe=fa("moment().min is deprecated, use moment.min instead. https://github.com/moment/moment/issues/1548",function(){var a=Ea.apply(null,arguments);return this.isValid()&&a.isValid()?this>a?this:a:l()}),Pe=fa("moment().max is deprecated, use moment.max instead. https://github.com/moment/moment/issues/1548",function(){var a=Ea.apply(null,arguments);return this.isValid()&&a.isValid()?a>this?this:a:l()}),Qe=function(){return Date.now?Date.now():+new Date};Ka("Z",":"),Ka("ZZ",""),O("Z",ne),O("ZZ",ne),S(["Z","ZZ"],function(a,b,c){c._useUTC=!0,c._tzm=La(ne,a)});var Re=/([\+\-]|\d\d)/gi;a.updateOffset=function(){};var Se=/(\-)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)\.?(\d{3})?)?/,Te=/^(-)?P(?:(?:([0-9,.]*)Y)?(?:([0-9,.]*)M)?(?:([0-9,.]*)D)?(?:T(?:([0-9,.]*)H)?(?:([0-9,.]*)M)?(?:([0-9,.]*)S)?)?|([0-9,.]*)W)$/;Za.fn=Ia.prototype;var Ue=bb(1,"add"),Ve=bb(-1,"subtract");a.defaultFormat="YYYY-MM-DDTHH:mm:ssZ";var We=fa("moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.",function(a){return void 0===a?this.localeData():this.locale(a)});J(0,["gg",2],0,function(){return this.weekYear()%100}),J(0,["GG",2],0,function(){return this.isoWeekYear()%100}),Ib("gggg","weekYear"),Ib("ggggg","weekYear"),Ib("GGGG","isoWeekYear"),Ib("GGGGG","isoWeekYear"),A("weekYear","gg"),A("isoWeekYear","GG"),O("G",le),O("g",le),O("GG",ee,ae),O("gg",ee,ae),O("GGGG",ie,ce),O("gggg",ie,ce),O("GGGGG",je,de),O("ggggg",je,de),T(["gggg","ggggg","GGGG","GGGGG"],function(a,b,c,d){b[d.substr(0,2)]=r(a)}),T(["gg","GG"],function(b,c,d,e){c[e]=a.parseTwoDigitYear(b)}),J("Q",0,"Qo","quarter"),A("quarter","Q"),O("Q",_d),S("Q",function(a,b){b[te]=3*(r(a)-1)}),J("w",["ww",2],"wo","week"),J("W",["WW",2],"Wo","isoWeek"),A("week","w"),A("isoWeek","W"),O("w",ee),O("ww",ee,ae),O("W",ee),O("WW",ee,ae),T(["w","ww","W","WW"],function(a,b,c,d){b[d.substr(0,1)]=r(a)});var Xe={dow:0,doy:6};J("D",["DD",2],"Do","date"),A("date","D"),O("D",ee),O("DD",ee,ae),O("Do",function(a,b){return a?b._ordinalParse:b._ordinalParseLenient}),S(["D","DD"],ue),S("Do",function(a,b){b[ue]=r(a.match(ee)[0],10)});var Ye=E("Date",!0);J("d",0,"do","day"),J("dd",0,0,function(a){return this.localeData().weekdaysMin(this,a)}),J("ddd",0,0,function(a){return this.localeData().weekdaysShort(this,a)}),J("dddd",0,0,function(a){return this.localeData().weekdays(this,a)}),J("e",0,0,"weekday"),J("E",0,0,"isoWeekday"),A("day","d"),A("weekday","e"),A("isoWeekday","E"),O("d",ee),O("e",ee),O("E",ee),O("dd",pe),O("ddd",pe),O("dddd",pe),T(["dd","ddd","dddd"],function(a,b,c,d){var e=c._locale.weekdaysParse(a,d,c._strict);null!=e?b.d=e:j(c).invalidWeekday=a}),T(["d","e","E"],function(a,b,c,d){b[d]=r(a)});var Ze="Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),$e="Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),_e="Su_Mo_Tu_We_Th_Fr_Sa".split("_");J("DDD",["DDDD",3],"DDDo","dayOfYear"),A("dayOfYear","DDD"),O("DDD",he),O("DDDD",be),S(["DDD","DDDD"],function(a,b,c){c._dayOfYear=r(a)}),J("H",["HH",2],0,"hour"),J("h",["hh",2],0,cc),J("hmm",0,0,function(){return""+cc.apply(this)+I(this.minutes(),2)}),J("hmmss",0,0,function(){return""+cc.apply(this)+I(this.minutes(),2)+I(this.seconds(),2)}),J("Hmm",0,0,function(){return""+this.hours()+I(this.minutes(),2)}),J("Hmmss",0,0,function(){return""+this.hours()+I(this.minutes(),2)+I(this.seconds(),2)}),dc("a",!0),dc("A",!1),A("hour","h"),O("a",ec),O("A",ec),O("H",ee),O("h",ee),O("HH",ee,ae),O("hh",ee,ae),O("hmm",fe),O("hmmss",ge),O("Hmm",fe),O("Hmmss",ge),S(["H","HH"],ve),S(["a","A"],function(a,b,c){c._isPm=c._locale.isPM(a),c._meridiem=a}),S(["h","hh"],function(a,b,c){b[ve]=r(a),j(c).bigHour=!0}),S("hmm",function(a,b,c){var d=a.length-2;b[ve]=r(a.substr(0,d)),b[we]=r(a.substr(d)),j(c).bigHour=!0}),S("hmmss",function(a,b,c){var d=a.length-4,e=a.length-2;b[ve]=r(a.substr(0,d)),b[we]=r(a.substr(d,2)),b[xe]=r(a.substr(e)),j(c).bigHour=!0}),S("Hmm",function(a,b,c){var d=a.length-2;b[ve]=r(a.substr(0,d)),b[we]=r(a.substr(d))}),S("Hmmss",function(a,b,c){var d=a.length-4,e=a.length-2;b[ve]=r(a.substr(0,d)),b[we]=r(a.substr(d,2)),b[xe]=r(a.substr(e))});var af=/[ap]\.?m?\.?/i,bf=E("Hours",!0);J("m",["mm",2],0,"minute"),A("minute","m"),O("m",ee),O("mm",ee,ae),S(["m","mm"],we);var cf=E("Minutes",!1);J("s",["ss",2],0,"second"),A("second","s"),O("s",ee),O("ss",ee,ae),S(["s","ss"],xe);var df=E("Seconds",!1);J("S",0,0,function(){return~~(this.millisecond()/100)}),J(0,["SS",2],0,function(){return~~(this.millisecond()/10)}),J(0,["SSS",3],0,"millisecond"),J(0,["SSSS",4],0,function(){return 10*this.millisecond()}),J(0,["SSSSS",5],0,function(){return 100*this.millisecond()}),J(0,["SSSSSS",6],0,function(){return 1e3*this.millisecond()}),J(0,["SSSSSSS",7],0,function(){return 1e4*this.millisecond()}),J(0,["SSSSSSSS",8],0,function(){return 1e5*this.millisecond()}),J(0,["SSSSSSSSS",9],0,function(){return 1e6*this.millisecond()}),A("millisecond","ms"),O("S",he,_d),O("SS",he,ae),O("SSS",he,be);var ef;for(ef="SSSS";ef.length<=9;ef+="S")O(ef,ke);for(ef="S";ef.length<=9;ef+="S")S(ef,hc);var ff=E("Milliseconds",!1);J("z",0,0,"zoneAbbr"),J("zz",0,0,"zoneName");var gf=o.prototype;gf.add=Ue,gf.calendar=db,gf.clone=eb,gf.diff=lb,gf.endOf=xb,gf.format=pb,gf.from=qb,gf.fromNow=rb,gf.to=sb,gf.toNow=tb,gf.get=H,gf.invalidAt=Gb,gf.isAfter=fb,gf.isBefore=gb,gf.isBetween=hb,gf.isSame=ib,gf.isSameOrAfter=jb,gf.isSameOrBefore=kb,gf.isValid=Eb,gf.lang=We,gf.locale=ub,gf.localeData=vb,gf.max=Pe,gf.min=Oe,gf.parsingFlags=Fb,gf.set=H,gf.startOf=wb,gf.subtract=Ve,gf.toArray=Bb,gf.toObject=Cb,gf.toDate=Ab,gf.toISOString=ob,gf.toJSON=Db,gf.toString=nb,gf.unix=zb,gf.valueOf=yb,gf.creationData=Hb,gf.year=Ne,gf.isLeapYear=na,gf.weekYear=Jb,gf.isoWeekYear=Kb,gf.quarter=gf.quarters=Pb,gf.month=$,gf.daysInMonth=_,gf.week=gf.weeks=Tb,gf.isoWeek=gf.isoWeeks=Ub,gf.weeksInYear=Mb,gf.isoWeeksInYear=Lb,gf.date=Ye,gf.day=gf.days=$b,gf.weekday=_b,gf.isoWeekday=ac,gf.dayOfYear=bc,gf.hour=gf.hours=bf,gf.minute=gf.minutes=cf,gf.second=gf.seconds=df,gf.millisecond=gf.milliseconds=ff,gf.utcOffset=Oa,gf.utc=Qa,gf.local=Ra,gf.parseZone=Sa,gf.hasAlignedHourOffset=Ta,gf.isDST=Ua,gf.isDSTShifted=Va,gf.isLocal=Wa,gf.isUtcOffset=Xa,gf.isUtc=Ya,gf.isUTC=Ya,gf.zoneAbbr=ic,gf.zoneName=jc,gf.dates=fa("dates accessor is deprecated. Use date instead.",Ye),gf.months=fa("months accessor is deprecated. Use month instead",$),gf.years=fa("years accessor is deprecated. Use year instead",Ne),gf.zone=fa("moment().zone is deprecated, use moment().utcOffset instead. https://github.com/moment/moment/issues/1779",Pa);var hf=gf,jf={sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},kf={LTS:"h:mm:ss A",LT:"h:mm A",L:"MM/DD/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY h:mm A",LLLL:"dddd, MMMM D, YYYY h:mm A"},lf="Invalid date",mf="%d",nf=/\d{1,2}/,of={future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},pf=t.prototype;pf._calendar=jf,pf.calendar=mc,pf._longDateFormat=kf,pf.longDateFormat=nc,pf._invalidDate=lf,pf.invalidDate=oc,pf._ordinal=mf,pf.ordinal=pc,pf._ordinalParse=nf,pf.preparse=qc,pf.postformat=qc,pf._relativeTime=of,pf.relativeTime=rc,pf.pastFuture=sc,pf.set=tc,pf.months=W,pf._months=Ce,pf.monthsShort=X,pf._monthsShort=De,pf.monthsParse=Y,pf._monthsRegex=Fe,pf.monthsRegex=ba,pf._monthsShortRegex=Ee,pf.monthsShortRegex=aa,pf.week=Qb,pf._week=Xe,pf.firstDayOfYear=Sb,pf.firstDayOfWeek=Rb,pf.weekdays=Wb,pf._weekdays=Ze,pf.weekdaysMin=Yb,pf._weekdaysMin=_e,pf.weekdaysShort=Xb,pf._weekdaysShort=$e,pf.weekdaysParse=Zb,pf.isPM=fc,pf._meridiemParse=af,pf.meridiem=gc,x("en",{ordinalParse:/\d{1,2}(th|st|nd|rd)/,ordinal:function(a){var b=a%10,c=1===r(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c}}),a.lang=fa("moment.lang is deprecated. Use moment.locale instead.",x),a.langData=fa("moment.langData is deprecated. Use moment.localeData instead.",z);var qf=Math.abs,rf=Lc("ms"),sf=Lc("s"),tf=Lc("m"),uf=Lc("h"),vf=Lc("d"),wf=Lc("w"),xf=Lc("M"),yf=Lc("y"),zf=Nc("milliseconds"),Af=Nc("seconds"),Bf=Nc("minutes"),Cf=Nc("hours"),Df=Nc("days"),Ef=Nc("months"),Ff=Nc("years"),Gf=Math.round,Hf={s:45,m:45,h:22,d:26,M:11},If=Math.abs,Jf=Ia.prototype;Jf.abs=Bc,Jf.add=Dc,Jf.subtract=Ec,Jf.as=Jc,Jf.asMilliseconds=rf,Jf.asSeconds=sf,Jf.asMinutes=tf,Jf.asHours=uf,Jf.asDays=vf,Jf.asWeeks=wf,Jf.asMonths=xf,Jf.asYears=yf,Jf.valueOf=Kc,Jf._bubble=Gc,Jf.get=Mc,Jf.milliseconds=zf,Jf.seconds=Af,Jf.minutes=Bf,Jf.hours=Cf,Jf.days=Df,Jf.weeks=Oc,Jf.months=Ef,Jf.years=Ff,Jf.humanize=Sc,Jf.toISOString=Tc,Jf.toString=Tc,Jf.toJSON=Tc,Jf.locale=ub,Jf.localeData=vb,Jf.toIsoString=fa("toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)",Tc),Jf.lang=We,J("X",0,0,"unix"),J("x",0,0,"valueOf"),O("x",le),O("X",oe),S("X",function(a,b,c){c._d=new Date(1e3*parseFloat(a,10))}),S("x",function(a,b,c){c._d=new Date(r(a))}), +//! moment.js +//! version : 2.11.1 +//! authors : Tim Wood, Iskren Chernev, Moment.js contributors +//! license : MIT +//! momentjs.com +a.version="2.11.1",b(Ea),a.fn=hf,a.min=Ga,a.max=Ha,a.now=Qe,a.utc=h,a.unix=kc,a.months=wc,a.isDate=d,a.locale=x,a.invalid=l,a.duration=Za,a.isMoment=p,a.weekdays=yc,a.parseZone=lc,a.localeData=z,a.isDuration=Ja,a.monthsShort=xc,a.weekdaysMin=Ac,a.defineLocale=y,a.weekdaysShort=zc,a.normalizeUnits=B,a.relativeTimeThreshold=Rc,a.prototype=hf;var Kf=a,Lf=(Kf.defineLocale("af",{months:"Januarie_Februarie_Maart_April_Mei_Junie_Julie_Augustus_September_Oktober_November_Desember".split("_"),monthsShort:"Jan_Feb_Mar_Apr_Mei_Jun_Jul_Aug_Sep_Okt_Nov_Des".split("_"),weekdays:"Sondag_Maandag_Dinsdag_Woensdag_Donderdag_Vrydag_Saterdag".split("_"),weekdaysShort:"Son_Maa_Din_Woe_Don_Vry_Sat".split("_"),weekdaysMin:"So_Ma_Di_Wo_Do_Vr_Sa".split("_"),meridiemParse:/vm|nm/i,isPM:function(a){return/^nm$/i.test(a)},meridiem:function(a,b,c){return 12>a?c?"vm":"VM":c?"nm":"NM"},longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Vandag om] LT",nextDay:"[Môre om] LT",nextWeek:"dddd [om] LT",lastDay:"[Gister om] LT",lastWeek:"[Laas] dddd [om] LT",sameElse:"L"},relativeTime:{future:"oor %s",past:"%s gelede",s:"'n paar sekondes",m:"'n minuut",mm:"%d minute",h:"'n uur",hh:"%d ure",d:"'n dag",dd:"%d dae",M:"'n maand",MM:"%d maande",y:"'n jaar",yy:"%d jaar"},ordinalParse:/\d{1,2}(ste|de)/,ordinal:function(a){return a+(1===a||8===a||a>=20?"ste":"de")},week:{dow:1,doy:4}}),Kf.defineLocale("ar-ma",{months:"يناير_فبراير_مارس_أبريل_ماي_يونيو_يوليوز_غشت_شتنبر_أكتوبر_نونبر_دجنبر".split("_"),monthsShort:"يناير_فبراير_مارس_أبريل_ماي_يونيو_يوليوز_غشت_شتنبر_أكتوبر_نونبر_دجنبر".split("_"),weekdays:"الأحد_الإتنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"احد_اتنين_ثلاثاء_اربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},week:{dow:6,doy:12}}),{1:"١",2:"٢",3:"٣",4:"٤",5:"٥",6:"٦",7:"٧",8:"٨",9:"٩",0:"٠"}),Mf={"١":"1","٢":"2","٣":"3","٤":"4","٥":"5","٦":"6","٧":"7","٨":"8","٩":"9","٠":"0"},Nf=(Kf.defineLocale("ar-sa",{months:"يناير_فبراير_مارس_أبريل_مايو_يونيو_يوليو_أغسطس_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),monthsShort:"يناير_فبراير_مارس_أبريل_مايو_يونيو_يوليو_أغسطس_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/ص|م/,isPM:function(a){return"م"===a},meridiem:function(a,b,c){return 12>a?"ص":"م"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},preparse:function(a){return a.replace(/[١٢٣٤٥٦٧٨٩٠]/g,function(a){return Mf[a]}).replace(/،/g,",")},postformat:function(a){return a.replace(/\d/g,function(a){return Lf[a]}).replace(/,/g,"،")},week:{dow:6,doy:12}}),Kf.defineLocale("ar-tn",{months:"جانفي_فيفري_مارس_أفريل_ماي_جوان_جويلية_أوت_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),monthsShort:"جانفي_فيفري_مارس_أفريل_ماي_جوان_جويلية_أوت_سبتمبر_أكتوبر_نوفمبر_ديسمبر".split("_"),weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[اليوم على الساعة] LT",nextDay:"[غدا على الساعة] LT",nextWeek:"dddd [على الساعة] LT",lastDay:"[أمس على الساعة] LT",lastWeek:"dddd [على الساعة] LT",sameElse:"L"},relativeTime:{future:"في %s",past:"منذ %s",s:"ثوان",m:"دقيقة",mm:"%d دقائق",h:"ساعة",hh:"%d ساعات",d:"يوم",dd:"%d أيام",M:"شهر",MM:"%d أشهر",y:"سنة",yy:"%d سنوات"},week:{dow:1,doy:4}}),{1:"١",2:"٢",3:"٣",4:"٤",5:"٥",6:"٦",7:"٧",8:"٨",9:"٩",0:"٠"}),Of={"١":"1","٢":"2","٣":"3","٤":"4","٥":"5","٦":"6","٧":"7","٨":"8","٩":"9","٠":"0"},Pf=function(a){return 0===a?0:1===a?1:2===a?2:a%100>=3&&10>=a%100?3:a%100>=11?4:5},Qf={s:["أقل من ثانية","ثانية واحدة",["ثانيتان","ثانيتين"],"%d ثوان","%d ثانية","%d ثانية"],m:["أقل من دقيقة","دقيقة واحدة",["دقيقتان","دقيقتين"],"%d دقائق","%d دقيقة","%d دقيقة"],h:["أقل من ساعة","ساعة واحدة",["ساعتان","ساعتين"],"%d ساعات","%d ساعة","%d ساعة"],d:["أقل من يوم","يوم واحد",["يومان","يومين"],"%d أيام","%d يومًا","%d يوم"],M:["أقل من شهر","شهر واحد",["شهران","شهرين"],"%d أشهر","%d شهرا","%d شهر"],y:["أقل من عام","عام واحد",["عامان","عامين"],"%d أعوام","%d عامًا","%d عام"]},Rf=function(a){return function(b,c,d,e){var f=Pf(b),g=Qf[a][Pf(b)];return 2===f&&(g=g[c?0:1]),g.replace(/%d/i,b)}},Sf=["كانون الثاني يناير","شباط فبراير","آذار مارس","نيسان أبريل","أيار مايو","حزيران يونيو","تموز يوليو","آب أغسطس","أيلول سبتمبر","تشرين الأول أكتوبر","تشرين الثاني نوفمبر","كانون الأول ديسمبر"],Tf=(Kf.defineLocale("ar",{months:Sf,monthsShort:Sf,weekdays:"الأحد_الإثنين_الثلاثاء_الأربعاء_الخميس_الجمعة_السبت".split("_"),weekdaysShort:"أحد_إثنين_ثلاثاء_أربعاء_خميس_جمعة_سبت".split("_"),weekdaysMin:"ح_ن_ث_ر_خ_ج_س".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/‏M/‏YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/ص|م/,isPM:function(a){return"م"===a},meridiem:function(a,b,c){return 12>a?"ص":"م"},calendar:{sameDay:"[اليوم عند الساعة] LT",nextDay:"[غدًا عند الساعة] LT",nextWeek:"dddd [عند الساعة] LT",lastDay:"[أمس عند الساعة] LT",lastWeek:"dddd [عند الساعة] LT",sameElse:"L"},relativeTime:{future:"بعد %s",past:"منذ %s",s:Rf("s"),m:Rf("m"),mm:Rf("m"),h:Rf("h"),hh:Rf("h"),d:Rf("d"),dd:Rf("d"),M:Rf("M"),MM:Rf("M"),y:Rf("y"),yy:Rf("y")},preparse:function(a){return a.replace(/\u200f/g,"").replace(/[١٢٣٤٥٦٧٨٩٠]/g,function(a){return Of[a]}).replace(/،/g,",")},postformat:function(a){return a.replace(/\d/g,function(a){return Nf[a]}).replace(/,/g,"،")},week:{dow:6,doy:12}}),{1:"-inci",5:"-inci",8:"-inci",70:"-inci",80:"-inci",2:"-nci",7:"-nci",20:"-nci",50:"-nci",3:"-üncü",4:"-üncü",100:"-üncü",6:"-ncı",9:"-uncu",10:"-uncu",30:"-uncu",60:"-ıncı",90:"-ıncı"}),Uf=(Kf.defineLocale("az",{months:"yanvar_fevral_mart_aprel_may_iyun_iyul_avqust_sentyabr_oktyabr_noyabr_dekabr".split("_"),monthsShort:"yan_fev_mar_apr_may_iyn_iyl_avq_sen_okt_noy_dek".split("_"),weekdays:"Bazar_Bazar ertəsi_Çərşənbə axşamı_Çərşənbə_Cümə axşamı_Cümə_Şənbə".split("_"),weekdaysShort:"Baz_BzE_ÇAx_Çər_CAx_Cüm_Şən".split("_"),weekdaysMin:"Bz_BE_ÇA_Çə_CA_Cü_Şə".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bugün saat] LT",nextDay:"[sabah saat] LT",nextWeek:"[gələn həftə] dddd [saat] LT",lastDay:"[dünən] LT",lastWeek:"[keçən həftə] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s sonra",past:"%s əvvəl",s:"birneçə saniyyə",m:"bir dəqiqə",mm:"%d dəqiqə",h:"bir saat",hh:"%d saat",d:"bir gün",dd:"%d gün",M:"bir ay",MM:"%d ay",y:"bir il",yy:"%d il"},meridiemParse:/gecə|səhər|gündüz|axşam/,isPM:function(a){return/^(gündüz|axşam)$/.test(a)},meridiem:function(a,b,c){return 4>a?"gecə":12>a?"səhər":17>a?"gündüz":"axşam"},ordinalParse:/\d{1,2}-(ıncı|inci|nci|üncü|ncı|uncu)/,ordinal:function(a){if(0===a)return a+"-ıncı";var b=a%10,c=a%100-b,d=a>=100?100:null;return a+(Tf[b]||Tf[c]||Tf[d])},week:{dow:1,doy:7}}),Kf.defineLocale("be",{months:{format:"студзеня_лютага_сакавіка_красавіка_траўня_чэрвеня_ліпеня_жніўня_верасня_кастрычніка_лістапада_снежня".split("_"),standalone:"студзень_люты_сакавік_красавік_травень_чэрвень_ліпень_жнівень_верасень_кастрычнік_лістапад_снежань".split("_")},monthsShort:"студ_лют_сак_крас_трав_чэрв_ліп_жнів_вер_каст_ліст_снеж".split("_"),weekdays:{format:"нядзелю_панядзелак_аўторак_сераду_чацвер_пятніцу_суботу".split("_"),standalone:"нядзеля_панядзелак_аўторак_серада_чацвер_пятніца_субота".split("_"),isFormat:/\[ ?[Вв] ?(?:мінулую|наступную)? ?\] ?dddd/},weekdaysShort:"нд_пн_ат_ср_чц_пт_сб".split("_"),weekdaysMin:"нд_пн_ат_ср_чц_пт_сб".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY г.",LLL:"D MMMM YYYY г., HH:mm",LLLL:"dddd, D MMMM YYYY г., HH:mm"},calendar:{sameDay:"[Сёння ў] LT",nextDay:"[Заўтра ў] LT",lastDay:"[Учора ў] LT",nextWeek:function(){return"[У] dddd [ў] LT"},lastWeek:function(){switch(this.day()){case 0:case 3:case 5:case 6:return"[У мінулую] dddd [ў] LT";case 1:case 2:case 4:return"[У мінулы] dddd [ў] LT"}},sameElse:"L"},relativeTime:{future:"праз %s",past:"%s таму",s:"некалькі секунд",m:Vc,mm:Vc,h:Vc,hh:Vc,d:"дзень",dd:Vc,M:"месяц",MM:Vc,y:"год",yy:Vc},meridiemParse:/ночы|раніцы|дня|вечара/,isPM:function(a){return/^(дня|вечара)$/.test(a)},meridiem:function(a,b,c){return 4>a?"ночы":12>a?"раніцы":17>a?"дня":"вечара"},ordinalParse:/\d{1,2}-(і|ы|га)/,ordinal:function(a,b){switch(b){case"M":case"d":case"DDD":case"w":case"W":return a%10!==2&&a%10!==3||a%100===12||a%100===13?a+"-ы":a+"-і";case"D":return a+"-га";default:return a}},week:{dow:1,doy:7}}),Kf.defineLocale("bg",{months:"януари_февруари_март_април_май_юни_юли_август_септември_октомври_ноември_декември".split("_"),monthsShort:"янр_фев_мар_апр_май_юни_юли_авг_сеп_окт_ное_дек".split("_"),weekdays:"неделя_понеделник_вторник_сряда_четвъртък_петък_събота".split("_"),weekdaysShort:"нед_пон_вто_сря_чет_пет_съб".split("_"),weekdaysMin:"нд_пн_вт_ср_чт_пт_сб".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[Днес в] LT",nextDay:"[Утре в] LT",nextWeek:"dddd [в] LT",lastDay:"[Вчера в] LT",lastWeek:function(){switch(this.day()){case 0:case 3:case 6:return"[В изминалата] dddd [в] LT";case 1:case 2:case 4:case 5:return"[В изминалия] dddd [в] LT"}},sameElse:"L"},relativeTime:{future:"след %s",past:"преди %s",s:"няколко секунди",m:"минута",mm:"%d минути",h:"час",hh:"%d часа",d:"ден",dd:"%d дни",M:"месец",MM:"%d месеца",y:"година",yy:"%d години"},ordinalParse:/\d{1,2}-(ев|ен|ти|ви|ри|ми)/,ordinal:function(a){var b=a%10,c=a%100;return 0===a?a+"-ев":0===c?a+"-ен":c>10&&20>c?a+"-ти":1===b?a+"-ви":2===b?a+"-ри":7===b||8===b?a+"-ми":a+"-ти"},week:{dow:1,doy:7}}),{1:"১",2:"২",3:"৩",4:"৪",5:"৫",6:"৬",7:"৭",8:"৮",9:"৯",0:"০"}),Vf={"১":"1","২":"2","৩":"3","৪":"4","৫":"5","৬":"6","৭":"7","৮":"8","৯":"9","০":"0"},Wf=(Kf.defineLocale("bn",{months:"জানুয়ারী_ফেবুয়ারী_মার্চ_এপ্রিল_মে_জুন_জুলাই_অগাস্ট_সেপ্টেম্বর_অক্টোবর_নভেম্বর_ডিসেম্বর".split("_"),monthsShort:"জানু_ফেব_মার্চ_এপর_মে_জুন_জুল_অগ_সেপ্ট_অক্টো_নভ_ডিসেম্".split("_"),weekdays:"রবিবার_সোমবার_মঙ্গলবার_বুধবার_বৃহস্পত্তিবার_শুক্রবার_শনিবার".split("_"),weekdaysShort:"রবি_সোম_মঙ্গল_বুধ_বৃহস্পত্তি_শুক্র_শনি".split("_"),weekdaysMin:"রব_সম_মঙ্গ_বু_ব্রিহ_শু_শনি".split("_"),longDateFormat:{LT:"A h:mm সময়",LTS:"A h:mm:ss সময়",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm সময়",LLLL:"dddd, D MMMM YYYY, A h:mm সময়"},calendar:{sameDay:"[আজ] LT",nextDay:"[আগামীকাল] LT",nextWeek:"dddd, LT",lastDay:"[গতকাল] LT",lastWeek:"[গত] dddd, LT",sameElse:"L"},relativeTime:{future:"%s পরে",past:"%s আগে",s:"কয়েক সেকেন্ড",m:"এক মিনিট",mm:"%d মিনিট",h:"এক ঘন্টা",hh:"%d ঘন্টা",d:"এক দিন",dd:"%d দিন",M:"এক মাস",MM:"%d মাস",y:"এক বছর",yy:"%d বছর"},preparse:function(a){return a.replace(/[১২৩৪৫৬৭৮৯০]/g,function(a){return Vf[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return Uf[a]})},meridiemParse:/রাত|সকাল|দুপুর|বিকাল|রাত/,isPM:function(a){return/^(দুপুর|বিকাল|রাত)$/.test(a)},meridiem:function(a,b,c){return 4>a?"রাত":10>a?"সকাল":17>a?"দুপুর":20>a?"বিকাল":"রাত"},week:{dow:0,doy:6}}),{1:"༡",2:"༢",3:"༣",4:"༤",5:"༥",6:"༦",7:"༧",8:"༨",9:"༩",0:"༠"}),Xf={"༡":"1","༢":"2","༣":"3","༤":"4","༥":"5","༦":"6","༧":"7","༨":"8","༩":"9","༠":"0"},Yf=(Kf.defineLocale("bo",{months:"ཟླ་བ་དང་པོ_ཟླ་བ་གཉིས་པ_ཟླ་བ་གསུམ་པ_ཟླ་བ་བཞི་པ_ཟླ་བ་ལྔ་པ_ཟླ་བ་དྲུག་པ_ཟླ་བ་བདུན་པ_ཟླ་བ་བརྒྱད་པ_ཟླ་བ་དགུ་པ_ཟླ་བ་བཅུ་པ_ཟླ་བ་བཅུ་གཅིག་པ_ཟླ་བ་བཅུ་གཉིས་པ".split("_"),monthsShort:"ཟླ་བ་དང་པོ_ཟླ་བ་གཉིས་པ_ཟླ་བ་གསུམ་པ_ཟླ་བ་བཞི་པ_ཟླ་བ་ལྔ་པ_ཟླ་བ་དྲུག་པ_ཟླ་བ་བདུན་པ_ཟླ་བ་བརྒྱད་པ_ཟླ་བ་དགུ་པ_ཟླ་བ་བཅུ་པ_ཟླ་བ་བཅུ་གཅིག་པ_ཟླ་བ་བཅུ་གཉིས་པ".split("_"),weekdays:"གཟའ་ཉི་མ་_གཟའ་ཟླ་བ་_གཟའ་མིག་དམར་_གཟའ་ལྷག་པ་_གཟའ་ཕུར་བུ_གཟའ་པ་སངས་_གཟའ་སྤེན་པ་".split("_"),weekdaysShort:"ཉི་མ་_ཟླ་བ་_མིག་དམར་_ལྷག་པ་_ཕུར་བུ_པ་སངས་_སྤེན་པ་".split("_"),weekdaysMin:"ཉི་མ་_ཟླ་བ་_མིག་དམར་_ལྷག་པ་_ཕུར་བུ_པ་སངས་_སྤེན་པ་".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[དི་རིང] LT",nextDay:"[སང་ཉིན] LT",nextWeek:"[བདུན་ཕྲག་རྗེས་མ], LT",lastDay:"[ཁ་སང] LT",lastWeek:"[བདུན་ཕྲག་མཐའ་མ] dddd, LT",sameElse:"L"},relativeTime:{future:"%s ལ་",past:"%s སྔན་ལ",s:"ལམ་སང",m:"སྐར་མ་གཅིག",mm:"%d སྐར་མ",h:"ཆུ་ཚོད་གཅིག",hh:"%d ཆུ་ཚོད",d:"ཉིན་གཅིག",dd:"%d ཉིན་",M:"ཟླ་བ་གཅིག",MM:"%d ཟླ་བ",y:"ལོ་གཅིག",yy:"%d ལོ"},preparse:function(a){return a.replace(/[༡༢༣༤༥༦༧༨༩༠]/g,function(a){return Xf[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return Wf[a]})},meridiemParse:/མཚན་མོ|ཞོགས་ཀས|ཉིན་གུང|དགོང་དག|མཚན་མོ/,isPM:function(a){return/^(ཉིན་གུང|དགོང་དག|མཚན་མོ)$/.test(a)},meridiem:function(a,b,c){return 4>a?"མཚན་མོ":10>a?"ཞོགས་ཀས":17>a?"ཉིན་གུང":20>a?"དགོང་དག":"མཚན་མོ"},week:{dow:0,doy:6}}),Kf.defineLocale("br",{months:"Genver_C'hwevrer_Meurzh_Ebrel_Mae_Mezheven_Gouere_Eost_Gwengolo_Here_Du_Kerzu".split("_"),monthsShort:"Gen_C'hwe_Meu_Ebr_Mae_Eve_Gou_Eos_Gwe_Her_Du_Ker".split("_"),weekdays:"Sul_Lun_Meurzh_Merc'her_Yaou_Gwener_Sadorn".split("_"),weekdaysShort:"Sul_Lun_Meu_Mer_Yao_Gwe_Sad".split("_"),weekdaysMin:"Su_Lu_Me_Mer_Ya_Gw_Sa".split("_"),longDateFormat:{LT:"h[e]mm A",LTS:"h[e]mm:ss A",L:"DD/MM/YYYY",LL:"D [a viz] MMMM YYYY",LLL:"D [a viz] MMMM YYYY h[e]mm A",LLLL:"dddd, D [a viz] MMMM YYYY h[e]mm A"},calendar:{sameDay:"[Hiziv da] LT",nextDay:"[Warc'hoazh da] LT",nextWeek:"dddd [da] LT",lastDay:"[Dec'h da] LT",lastWeek:"dddd [paset da] LT",sameElse:"L"},relativeTime:{future:"a-benn %s",past:"%s 'zo",s:"un nebeud segondennoù",m:"ur vunutenn",mm:Wc,h:"un eur",hh:"%d eur",d:"un devezh",dd:Wc,M:"ur miz",MM:Wc,y:"ur bloaz",yy:Xc},ordinalParse:/\d{1,2}(añ|vet)/,ordinal:function(a){var b=1===a?"añ":"vet";return a+b},week:{dow:1,doy:4}}),Kf.defineLocale("bs",{months:"januar_februar_mart_april_maj_juni_juli_august_septembar_oktobar_novembar_decembar".split("_"),monthsShort:"jan._feb._mar._apr._maj._jun._jul._aug._sep._okt._nov._dec.".split("_"),weekdays:"nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sri._čet._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_če_pe_su".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[jučer u] LT",lastWeek:function(){switch(this.day()){case 0:case 3:return"[prošlu] dddd [u] LT";case 6:return"[prošle] [subote] [u] LT";case 1:case 2:case 4:case 5:return"[prošli] dddd [u] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"par sekundi",m:_c,mm:_c,h:_c,hh:_c,d:"dan",dd:_c,M:"mjesec",MM:_c,y:"godinu",yy:_c},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),Kf.defineLocale("ca",{months:"gener_febrer_març_abril_maig_juny_juliol_agost_setembre_octubre_novembre_desembre".split("_"),monthsShort:"gen._febr._mar._abr._mai._jun._jul._ag._set._oct._nov._des.".split("_"),weekdays:"diumenge_dilluns_dimarts_dimecres_dijous_divendres_dissabte".split("_"),weekdaysShort:"dg._dl._dt._dc._dj._dv._ds.".split("_"),weekdaysMin:"Dg_Dl_Dt_Dc_Dj_Dv_Ds".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd D MMMM YYYY H:mm"},calendar:{sameDay:function(){return"[avui a "+(1!==this.hours()?"les":"la")+"] LT"},nextDay:function(){return"[demà a "+(1!==this.hours()?"les":"la")+"] LT"},nextWeek:function(){return"dddd [a "+(1!==this.hours()?"les":"la")+"] LT"},lastDay:function(){return"[ahir a "+(1!==this.hours()?"les":"la")+"] LT"},lastWeek:function(){return"[el] dddd [passat a "+(1!==this.hours()?"les":"la")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"fa %s",s:"uns segons",m:"un minut",mm:"%d minuts",h:"una hora",hh:"%d hores",d:"un dia",dd:"%d dies",M:"un mes",MM:"%d mesos",y:"un any",yy:"%d anys"},ordinalParse:/\d{1,2}(r|n|t|è|a)/,ordinal:function(a,b){var c=1===a?"r":2===a?"n":3===a?"r":4===a?"t":"è";return("w"===b||"W"===b)&&(c="a"),a+c},week:{dow:1,doy:4}}),"leden_únor_březen_duben_květen_červen_červenec_srpen_září_říjen_listopad_prosinec".split("_")),Zf="led_úno_bře_dub_kvě_čvn_čvc_srp_zář_říj_lis_pro".split("_"),$f=(Kf.defineLocale("cs",{months:Yf,monthsShort:Zf,monthsParse:function(a,b){var c,d=[];for(c=0;12>c;c++)d[c]=new RegExp("^"+a[c]+"$|^"+b[c]+"$","i");return d}(Yf,Zf),shortMonthsParse:function(a){var b,c=[];for(b=0;12>b;b++)c[b]=new RegExp("^"+a[b]+"$","i");return c}(Zf),longMonthsParse:function(a){var b,c=[];for(b=0;12>b;b++)c[b]=new RegExp("^"+a[b]+"$","i");return c}(Yf),weekdays:"neděle_pondělí_úterý_středa_čtvrtek_pátek_sobota".split("_"),weekdaysShort:"ne_po_út_st_čt_pá_so".split("_"),weekdaysMin:"ne_po_út_st_čt_pá_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd D. MMMM YYYY H:mm"},calendar:{sameDay:"[dnes v] LT",nextDay:"[zítra v] LT",nextWeek:function(){switch(this.day()){case 0:return"[v neděli v] LT";case 1:case 2:return"[v] dddd [v] LT";case 3:return"[ve středu v] LT";case 4:return"[ve čtvrtek v] LT";case 5:return"[v pátek v] LT";case 6:return"[v sobotu v] LT"}},lastDay:"[včera v] LT",lastWeek:function(){switch(this.day()){case 0:return"[minulou neděli v] LT";case 1:case 2:return"[minulé] dddd [v] LT";case 3:return"[minulou středu v] LT";case 4:case 5:return"[minulý] dddd [v] LT";case 6:return"[minulou sobotu v] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"před %s",s:bd,m:bd,mm:bd,h:bd,hh:bd,d:bd,dd:bd,M:bd,MM:bd,y:bd,yy:bd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("cv",{months:"кӑрлач_нарӑс_пуш_ака_май_ҫӗртме_утӑ_ҫурла_авӑн_юпа_чӳк_раштав".split("_"),monthsShort:"кӑр_нар_пуш_ака_май_ҫӗр_утӑ_ҫур_авн_юпа_чӳк_раш".split("_"),weekdays:"вырсарникун_тунтикун_ытларикун_юнкун_кӗҫнерникун_эрнекун_шӑматкун".split("_"),weekdaysShort:"выр_тун_ытл_юн_кӗҫ_эрн_шӑм".split("_"),weekdaysMin:"вр_тн_ыт_юн_кҫ_эр_шм".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"YYYY [ҫулхи] MMMM [уйӑхӗн] D[-мӗшӗ]",LLL:"YYYY [ҫулхи] MMMM [уйӑхӗн] D[-мӗшӗ], HH:mm",LLLL:"dddd, YYYY [ҫулхи] MMMM [уйӑхӗн] D[-мӗшӗ], HH:mm"},calendar:{sameDay:"[Паян] LT [сехетре]",nextDay:"[Ыран] LT [сехетре]",lastDay:"[Ӗнер] LT [сехетре]",nextWeek:"[Ҫитес] dddd LT [сехетре]",lastWeek:"[Иртнӗ] dddd LT [сехетре]",sameElse:"L"},relativeTime:{future:function(a){var b=/сехет$/i.exec(a)?"рен":/ҫул$/i.exec(a)?"тан":"ран";return a+b},past:"%s каялла",s:"пӗр-ик ҫеккунт",m:"пӗр минут",mm:"%d минут",h:"пӗр сехет",hh:"%d сехет",d:"пӗр кун",dd:"%d кун",M:"пӗр уйӑх",MM:"%d уйӑх",y:"пӗр ҫул",yy:"%d ҫул"},ordinalParse:/\d{1,2}-мӗш/,ordinal:"%d-мӗш",week:{dow:1,doy:7}}),Kf.defineLocale("cy",{months:"Ionawr_Chwefror_Mawrth_Ebrill_Mai_Mehefin_Gorffennaf_Awst_Medi_Hydref_Tachwedd_Rhagfyr".split("_"),monthsShort:"Ion_Chwe_Maw_Ebr_Mai_Meh_Gor_Aws_Med_Hyd_Tach_Rhag".split("_"),weekdays:"Dydd Sul_Dydd Llun_Dydd Mawrth_Dydd Mercher_Dydd Iau_Dydd Gwener_Dydd Sadwrn".split("_"),weekdaysShort:"Sul_Llun_Maw_Mer_Iau_Gwe_Sad".split("_"),weekdaysMin:"Su_Ll_Ma_Me_Ia_Gw_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Heddiw am] LT",nextDay:"[Yfory am] LT",nextWeek:"dddd [am] LT",lastDay:"[Ddoe am] LT",lastWeek:"dddd [diwethaf am] LT",sameElse:"L"},relativeTime:{future:"mewn %s",past:"%s yn ôl",s:"ychydig eiliadau",m:"munud",mm:"%d munud",h:"awr",hh:"%d awr",d:"diwrnod",dd:"%d diwrnod",M:"mis",MM:"%d mis",y:"blwyddyn",yy:"%d flynedd"},ordinalParse:/\d{1,2}(fed|ain|af|il|ydd|ed|eg)/,ordinal:function(a){var b=a,c="",d=["","af","il","ydd","ydd","ed","ed","ed","fed","fed","fed","eg","fed","eg","eg","fed","eg","eg","fed","eg","fed"];return b>20?c=40===b||50===b||60===b||80===b||100===b?"fed":"ain":b>0&&(c=d[b]),a+c},week:{dow:1,doy:4}}),Kf.defineLocale("da",{months:"januar_februar_marts_april_maj_juni_juli_august_september_oktober_november_december".split("_"),monthsShort:"jan_feb_mar_apr_maj_jun_jul_aug_sep_okt_nov_dec".split("_"),weekdays:"søndag_mandag_tirsdag_onsdag_torsdag_fredag_lørdag".split("_"),weekdaysShort:"søn_man_tir_ons_tor_fre_lør".split("_"),weekdaysMin:"sø_ma_ti_on_to_fr_lø".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd [d.] D. MMMM YYYY HH:mm"},calendar:{sameDay:"[I dag kl.] LT",nextDay:"[I morgen kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[I går kl.] LT",lastWeek:"[sidste] dddd [kl] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"%s siden",s:"få sekunder",m:"et minut",mm:"%d minutter",h:"en time",hh:"%d timer",d:"en dag",dd:"%d dage",M:"en måned",MM:"%d måneder",y:"et år",yy:"%d år"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("de-at",{months:"Jänner_Februar_März_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jän._Febr._Mrz._Apr._Mai_Jun._Jul._Aug._Sept._Okt._Nov._Dez.".split("_"),weekdays:"Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag".split("_"),weekdaysShort:"So._Mo._Di._Mi._Do._Fr._Sa.".split("_"),weekdaysMin:"So_Mo_Di_Mi_Do_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd, D. MMMM YYYY HH:mm"},calendar:{sameDay:"[heute um] LT [Uhr]",sameElse:"L",nextDay:"[morgen um] LT [Uhr]",nextWeek:"dddd [um] LT [Uhr]",lastDay:"[gestern um] LT [Uhr]",lastWeek:"[letzten] dddd [um] LT [Uhr]"},relativeTime:{future:"in %s",past:"vor %s",s:"ein paar Sekunden",m:cd,mm:"%d Minuten",h:cd,hh:"%d Stunden",d:cd,dd:cd,M:cd,MM:cd,y:cd,yy:cd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("de",{months:"Januar_Februar_März_April_Mai_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jan._Febr._Mrz._Apr._Mai_Jun._Jul._Aug._Sept._Okt._Nov._Dez.".split("_"),weekdays:"Sonntag_Montag_Dienstag_Mittwoch_Donnerstag_Freitag_Samstag".split("_"),weekdaysShort:"So._Mo._Di._Mi._Do._Fr._Sa.".split("_"),weekdaysMin:"So_Mo_Di_Mi_Do_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY HH:mm",LLLL:"dddd, D. MMMM YYYY HH:mm"},calendar:{sameDay:"[heute um] LT [Uhr]",sameElse:"L",nextDay:"[morgen um] LT [Uhr]",nextWeek:"dddd [um] LT [Uhr]",lastDay:"[gestern um] LT [Uhr]",lastWeek:"[letzten] dddd [um] LT [Uhr]"},relativeTime:{future:"in %s",past:"vor %s",s:"ein paar Sekunden",m:dd,mm:"%d Minuten",h:dd,hh:"%d Stunden",d:dd,dd:dd,M:dd,MM:dd,y:dd,yy:dd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),["ޖެނުއަރީ","ފެބްރުއަރީ","މާރިޗު","އޭޕްރީލު","މޭ","ޖޫން","ޖުލައި","އޯގަސްޓު","ސެޕްޓެމްބަރު","އޮކްޓޯބަރު","ނޮވެމްބަރު","ޑިސެމްބަރު"]),_f=["އާދިއްތަ","ހޯމަ","އަންގާރަ","ބުދަ","ބުރާސްފަތި","ހުކުރު","ހޮނިހިރު"],ag=(Kf.defineLocale("dv",{months:$f,monthsShort:$f,weekdays:_f,weekdaysShort:_f,weekdaysMin:"އާދި_ހޯމަ_އަން_ބުދަ_ބުރާ_ހުކު_ހޮނި".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"D/M/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},meridiemParse:/މކ|މފ/,isPM:function(a){return""===a},meridiem:function(a,b,c){return 12>a?"މކ":"މފ"},calendar:{sameDay:"[މިއަދު] LT",nextDay:"[މާދަމާ] LT",nextWeek:"dddd LT",lastDay:"[އިއްޔެ] LT",lastWeek:"[ފާއިތުވި] dddd LT",sameElse:"L"},relativeTime:{future:"ތެރޭގައި %s",past:"ކުރިން %s",s:"ސިކުންތުކޮޅެއް",m:"މިނިޓެއް",mm:"މިނިޓު %d",h:"ގަޑިއިރެއް",hh:"ގަޑިއިރު %d",d:"ދުވަހެއް",dd:"ދުވަސް %d",M:"މަހެއް",MM:"މަސް %d",y:"އަހަރެއް",yy:"އަހަރު %d"},preparse:function(a){return a.replace(/،/g,",")},postformat:function(a){return a.replace(/,/g,"،")},week:{dow:7,doy:12}}),Kf.defineLocale("el",{monthsNominativeEl:"Ιανουάριος_Φεβρουάριος_Μάρτιος_Απρίλιος_Μάιος_Ιούνιος_Ιούλιος_Αύγουστος_Σεπτέμβριος_Οκτώβριος_Νοέμβριος_Δεκέμβριος".split("_"),monthsGenitiveEl:"Ιανουαρίου_Φεβρουαρίου_Μαρτίου_Απριλίου_Μαΐου_Ιουνίου_Ιουλίου_Αυγούστου_Σεπτεμβρίου_Οκτωβρίου_Νοεμβρίου_Δεκεμβρίου".split("_"),months:function(a,b){return/D/.test(b.substring(0,b.indexOf("MMMM")))?this._monthsGenitiveEl[a.month()]:this._monthsNominativeEl[a.month()]},monthsShort:"Ιαν_Φεβ_Μαρ_Απρ_Μαϊ_Ιουν_Ιουλ_Αυγ_Σεπ_Οκτ_Νοε_Δεκ".split("_"),weekdays:"Κυριακή_Δευτέρα_Τρίτη_Τετάρτη_Πέμπτη_Παρασκευή_Σάββατο".split("_"),weekdaysShort:"Κυρ_Δευ_Τρι_Τετ_Πεμ_Παρ_Σαβ".split("_"),weekdaysMin:"Κυ_Δε_Τρ_Τε_Πε_Πα_Σα".split("_"),meridiem:function(a,b,c){return a>11?c?"μμ":"ΜΜ":c?"πμ":"ΠΜ"},isPM:function(a){return"μ"===(a+"").toLowerCase()[0]},meridiemParse:/[ΠΜ]\.?Μ?\.?/i,longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendarEl:{sameDay:"[Σήμερα {}] LT",nextDay:"[Αύριο {}] LT",nextWeek:"dddd [{}] LT",lastDay:"[Χθες {}] LT",lastWeek:function(){switch(this.day()){case 6:return"[το προηγούμενο] dddd [{}] LT";default:return"[την προηγούμενη] dddd [{}] LT"}},sameElse:"L"},calendar:function(a,b){var c=this._calendarEl[a],d=b&&b.hours();return D(c)&&(c=c.apply(b)),c.replace("{}",d%12===1?"στη":"στις")},relativeTime:{future:"σε %s",past:"%s πριν",s:"λίγα δευτερόλεπτα",m:"ένα λεπτό",mm:"%d λεπτά",h:"μία ώρα",hh:"%d ώρες",d:"μία μέρα",dd:"%d μέρες",M:"ένας μήνας",MM:"%d μήνες",y:"ένας χρόνος",yy:"%d χρόνια"},ordinalParse:/\d{1,2}η/,ordinal:"%dη",week:{dow:1,doy:4}}),Kf.defineLocale("en-au",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},ordinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(a){var b=a%10,c=1===~~(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c},week:{dow:1,doy:4}}),Kf.defineLocale("en-ca",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"YYYY-MM-DD",LL:"D MMMM, YYYY",LLL:"D MMMM, YYYY h:mm A",LLLL:"dddd, D MMMM, YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},ordinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(a){var b=a%10,c=1===~~(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c}}),Kf.defineLocale("en-gb",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},ordinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(a){var b=a%10,c=1===~~(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c},week:{dow:1,doy:4}}),Kf.defineLocale("en-ie",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},ordinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(a){var b=a%10,c=1===~~(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c},week:{dow:1,doy:4}}),Kf.defineLocale("en-nz",{months:"January_February_March_April_May_June_July_August_September_October_November_December".split("_"),monthsShort:"Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec".split("_"),weekdays:"Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday".split("_"),weekdaysShort:"Sun_Mon_Tue_Wed_Thu_Fri_Sat".split("_"),weekdaysMin:"Su_Mo_Tu_We_Th_Fr_Sa".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[Today at] LT",nextDay:"[Tomorrow at] LT",nextWeek:"dddd [at] LT",lastDay:"[Yesterday at] LT",lastWeek:"[Last] dddd [at] LT",sameElse:"L"},relativeTime:{future:"in %s",past:"%s ago",s:"a few seconds",m:"a minute",mm:"%d minutes",h:"an hour",hh:"%d hours",d:"a day",dd:"%d days",M:"a month",MM:"%d months",y:"a year",yy:"%d years"},ordinalParse:/\d{1,2}(st|nd|rd|th)/,ordinal:function(a){var b=a%10,c=1===~~(a%100/10)?"th":1===b?"st":2===b?"nd":3===b?"rd":"th";return a+c},week:{dow:1,doy:4}}),Kf.defineLocale("eo",{months:"januaro_februaro_marto_aprilo_majo_junio_julio_aŭgusto_septembro_oktobro_novembro_decembro".split("_"),monthsShort:"jan_feb_mar_apr_maj_jun_jul_aŭg_sep_okt_nov_dec".split("_"),weekdays:"Dimanĉo_Lundo_Mardo_Merkredo_Ĵaŭdo_Vendredo_Sabato".split("_"), +weekdaysShort:"Dim_Lun_Mard_Merk_Ĵaŭ_Ven_Sab".split("_"),weekdaysMin:"Di_Lu_Ma_Me_Ĵa_Ve_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"D[-an de] MMMM, YYYY",LLL:"D[-an de] MMMM, YYYY HH:mm",LLLL:"dddd, [la] D[-an de] MMMM, YYYY HH:mm"},meridiemParse:/[ap]\.t\.m/i,isPM:function(a){return"p"===a.charAt(0).toLowerCase()},meridiem:function(a,b,c){return a>11?c?"p.t.m.":"P.T.M.":c?"a.t.m.":"A.T.M."},calendar:{sameDay:"[Hodiaŭ je] LT",nextDay:"[Morgaŭ je] LT",nextWeek:"dddd [je] LT",lastDay:"[Hieraŭ je] LT",lastWeek:"[pasinta] dddd [je] LT",sameElse:"L"},relativeTime:{future:"je %s",past:"antaŭ %s",s:"sekundoj",m:"minuto",mm:"%d minutoj",h:"horo",hh:"%d horoj",d:"tago",dd:"%d tagoj",M:"monato",MM:"%d monatoj",y:"jaro",yy:"%d jaroj"},ordinalParse:/\d{1,2}a/,ordinal:"%da",week:{dow:1,doy:7}}),"ene._feb._mar._abr._may._jun._jul._ago._sep._oct._nov._dic.".split("_")),bg="ene_feb_mar_abr_may_jun_jul_ago_sep_oct_nov_dic".split("_"),cg=(Kf.defineLocale("es",{months:"enero_febrero_marzo_abril_mayo_junio_julio_agosto_septiembre_octubre_noviembre_diciembre".split("_"),monthsShort:function(a,b){return/-MMM-/.test(b)?bg[a.month()]:ag[a.month()]},weekdays:"domingo_lunes_martes_miércoles_jueves_viernes_sábado".split("_"),weekdaysShort:"dom._lun._mar._mié._jue._vie._sáb.".split("_"),weekdaysMin:"do_lu_ma_mi_ju_vi_sá".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY H:mm",LLLL:"dddd, D [de] MMMM [de] YYYY H:mm"},calendar:{sameDay:function(){return"[hoy a la"+(1!==this.hours()?"s":"")+"] LT"},nextDay:function(){return"[mañana a la"+(1!==this.hours()?"s":"")+"] LT"},nextWeek:function(){return"dddd [a la"+(1!==this.hours()?"s":"")+"] LT"},lastDay:function(){return"[ayer a la"+(1!==this.hours()?"s":"")+"] LT"},lastWeek:function(){return"[el] dddd [pasado a la"+(1!==this.hours()?"s":"")+"] LT"},sameElse:"L"},relativeTime:{future:"en %s",past:"hace %s",s:"unos segundos",m:"un minuto",mm:"%d minutos",h:"una hora",hh:"%d horas",d:"un día",dd:"%d días",M:"un mes",MM:"%d meses",y:"un año",yy:"%d años"},ordinalParse:/\d{1,2}º/,ordinal:"%dº",week:{dow:1,doy:4}}),Kf.defineLocale("et",{months:"jaanuar_veebruar_märts_aprill_mai_juuni_juuli_august_september_oktoober_november_detsember".split("_"),monthsShort:"jaan_veebr_märts_apr_mai_juuni_juuli_aug_sept_okt_nov_dets".split("_"),weekdays:"pühapäev_esmaspäev_teisipäev_kolmapäev_neljapäev_reede_laupäev".split("_"),weekdaysShort:"P_E_T_K_N_R_L".split("_"),weekdaysMin:"P_E_T_K_N_R_L".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[Täna,] LT",nextDay:"[Homme,] LT",nextWeek:"[Järgmine] dddd LT",lastDay:"[Eile,] LT",lastWeek:"[Eelmine] dddd LT",sameElse:"L"},relativeTime:{future:"%s pärast",past:"%s tagasi",s:ed,m:ed,mm:ed,h:ed,hh:ed,d:ed,dd:"%d päeva",M:ed,MM:ed,y:ed,yy:ed},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("eu",{months:"urtarrila_otsaila_martxoa_apirila_maiatza_ekaina_uztaila_abuztua_iraila_urria_azaroa_abendua".split("_"),monthsShort:"urt._ots._mar._api._mai._eka._uzt._abu._ira._urr._aza._abe.".split("_"),weekdays:"igandea_astelehena_asteartea_asteazkena_osteguna_ostirala_larunbata".split("_"),weekdaysShort:"ig._al._ar._az._og._ol._lr.".split("_"),weekdaysMin:"ig_al_ar_az_og_ol_lr".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY[ko] MMMM[ren] D[a]",LLL:"YYYY[ko] MMMM[ren] D[a] HH:mm",LLLL:"dddd, YYYY[ko] MMMM[ren] D[a] HH:mm",l:"YYYY-M-D",ll:"YYYY[ko] MMM D[a]",lll:"YYYY[ko] MMM D[a] HH:mm",llll:"ddd, YYYY[ko] MMM D[a] HH:mm"},calendar:{sameDay:"[gaur] LT[etan]",nextDay:"[bihar] LT[etan]",nextWeek:"dddd LT[etan]",lastDay:"[atzo] LT[etan]",lastWeek:"[aurreko] dddd LT[etan]",sameElse:"L"},relativeTime:{future:"%s barru",past:"duela %s",s:"segundo batzuk",m:"minutu bat",mm:"%d minutu",h:"ordu bat",hh:"%d ordu",d:"egun bat",dd:"%d egun",M:"hilabete bat",MM:"%d hilabete",y:"urte bat",yy:"%d urte"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),{1:"۱",2:"۲",3:"۳",4:"۴",5:"۵",6:"۶",7:"۷",8:"۸",9:"۹",0:"۰"}),dg={"۱":"1","۲":"2","۳":"3","۴":"4","۵":"5","۶":"6","۷":"7","۸":"8","۹":"9","۰":"0"},eg=(Kf.defineLocale("fa",{months:"ژانویه_فوریه_مارس_آوریل_مه_ژوئن_ژوئیه_اوت_سپتامبر_اکتبر_نوامبر_دسامبر".split("_"),monthsShort:"ژانویه_فوریه_مارس_آوریل_مه_ژوئن_ژوئیه_اوت_سپتامبر_اکتبر_نوامبر_دسامبر".split("_"),weekdays:"یک‌شنبه_دوشنبه_سه‌شنبه_چهارشنبه_پنج‌شنبه_جمعه_شنبه".split("_"),weekdaysShort:"یک‌شنبه_دوشنبه_سه‌شنبه_چهارشنبه_پنج‌شنبه_جمعه_شنبه".split("_"),weekdaysMin:"ی_د_س_چ_پ_ج_ش".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},meridiemParse:/قبل از ظهر|بعد از ظهر/,isPM:function(a){return/بعد از ظهر/.test(a)},meridiem:function(a,b,c){return 12>a?"قبل از ظهر":"بعد از ظهر"},calendar:{sameDay:"[امروز ساعت] LT",nextDay:"[فردا ساعت] LT",nextWeek:"dddd [ساعت] LT",lastDay:"[دیروز ساعت] LT",lastWeek:"dddd [پیش] [ساعت] LT",sameElse:"L"},relativeTime:{future:"در %s",past:"%s پیش",s:"چندین ثانیه",m:"یک دقیقه",mm:"%d دقیقه",h:"یک ساعت",hh:"%d ساعت",d:"یک روز",dd:"%d روز",M:"یک ماه",MM:"%d ماه",y:"یک سال",yy:"%d سال"},preparse:function(a){return a.replace(/[۰-۹]/g,function(a){return dg[a]}).replace(/،/g,",")},postformat:function(a){return a.replace(/\d/g,function(a){return cg[a]}).replace(/,/g,"،")},ordinalParse:/\d{1,2}م/,ordinal:"%dم",week:{dow:6,doy:12}}),"nolla yksi kaksi kolme neljä viisi kuusi seitsemän kahdeksan yhdeksän".split(" ")),fg=["nolla","yhden","kahden","kolmen","neljän","viiden","kuuden",eg[7],eg[8],eg[9]],gg=(Kf.defineLocale("fi",{months:"tammikuu_helmikuu_maaliskuu_huhtikuu_toukokuu_kesäkuu_heinäkuu_elokuu_syyskuu_lokakuu_marraskuu_joulukuu".split("_"),monthsShort:"tammi_helmi_maalis_huhti_touko_kesä_heinä_elo_syys_loka_marras_joulu".split("_"),weekdays:"sunnuntai_maanantai_tiistai_keskiviikko_torstai_perjantai_lauantai".split("_"),weekdaysShort:"su_ma_ti_ke_to_pe_la".split("_"),weekdaysMin:"su_ma_ti_ke_to_pe_la".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD.MM.YYYY",LL:"Do MMMM[ta] YYYY",LLL:"Do MMMM[ta] YYYY, [klo] HH.mm",LLLL:"dddd, Do MMMM[ta] YYYY, [klo] HH.mm",l:"D.M.YYYY",ll:"Do MMM YYYY",lll:"Do MMM YYYY, [klo] HH.mm",llll:"ddd, Do MMM YYYY, [klo] HH.mm"},calendar:{sameDay:"[tänään] [klo] LT",nextDay:"[huomenna] [klo] LT",nextWeek:"dddd [klo] LT",lastDay:"[eilen] [klo] LT",lastWeek:"[viime] dddd[na] [klo] LT",sameElse:"L"},relativeTime:{future:"%s päästä",past:"%s sitten",s:fd,m:fd,mm:fd,h:fd,hh:fd,d:fd,dd:fd,M:fd,MM:fd,y:fd,yy:fd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("fo",{months:"januar_februar_mars_apríl_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan_feb_mar_apr_mai_jun_jul_aug_sep_okt_nov_des".split("_"),weekdays:"sunnudagur_mánadagur_týsdagur_mikudagur_hósdagur_fríggjadagur_leygardagur".split("_"),weekdaysShort:"sun_mán_týs_mik_hós_frí_ley".split("_"),weekdaysMin:"su_má_tý_mi_hó_fr_le".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D. MMMM, YYYY HH:mm"},calendar:{sameDay:"[Í dag kl.] LT",nextDay:"[Í morgin kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[Í gjár kl.] LT",lastWeek:"[síðstu] dddd [kl] LT",sameElse:"L"},relativeTime:{future:"um %s",past:"%s síðani",s:"fá sekund",m:"ein minutt",mm:"%d minuttir",h:"ein tími",hh:"%d tímar",d:"ein dagur",dd:"%d dagar",M:"ein mánaði",MM:"%d mánaðir",y:"eitt ár",yy:"%d ár"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("fr-ca",{months:"janvier_février_mars_avril_mai_juin_juillet_août_septembre_octobre_novembre_décembre".split("_"),monthsShort:"janv._févr._mars_avr._mai_juin_juil._août_sept._oct._nov._déc.".split("_"),weekdays:"dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"),weekdaysShort:"dim._lun._mar._mer._jeu._ven._sam.".split("_"),weekdaysMin:"Di_Lu_Ma_Me_Je_Ve_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Aujourd'hui à] LT",nextDay:"[Demain à] LT",nextWeek:"dddd [à] LT",lastDay:"[Hier à] LT",lastWeek:"dddd [dernier à] LT",sameElse:"L"},relativeTime:{future:"dans %s",past:"il y a %s",s:"quelques secondes",m:"une minute",mm:"%d minutes",h:"une heure",hh:"%d heures",d:"un jour",dd:"%d jours",M:"un mois",MM:"%d mois",y:"un an",yy:"%d ans"},ordinalParse:/\d{1,2}(er|e)/,ordinal:function(a){return a+(1===a?"er":"e")}}),Kf.defineLocale("fr-ch",{months:"janvier_février_mars_avril_mai_juin_juillet_août_septembre_octobre_novembre_décembre".split("_"),monthsShort:"janv._févr._mars_avr._mai_juin_juil._août_sept._oct._nov._déc.".split("_"),weekdays:"dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"),weekdaysShort:"dim._lun._mar._mer._jeu._ven._sam.".split("_"),weekdaysMin:"Di_Lu_Ma_Me_Je_Ve_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Aujourd'hui à] LT",nextDay:"[Demain à] LT",nextWeek:"dddd [à] LT",lastDay:"[Hier à] LT",lastWeek:"dddd [dernier à] LT",sameElse:"L"},relativeTime:{future:"dans %s",past:"il y a %s",s:"quelques secondes",m:"une minute",mm:"%d minutes",h:"une heure",hh:"%d heures",d:"un jour",dd:"%d jours",M:"un mois",MM:"%d mois",y:"un an",yy:"%d ans"},ordinalParse:/\d{1,2}(er|e)/,ordinal:function(a){return a+(1===a?"er":"e")},week:{dow:1,doy:4}}),Kf.defineLocale("fr",{months:"janvier_février_mars_avril_mai_juin_juillet_août_septembre_octobre_novembre_décembre".split("_"),monthsShort:"janv._févr._mars_avr._mai_juin_juil._août_sept._oct._nov._déc.".split("_"),weekdays:"dimanche_lundi_mardi_mercredi_jeudi_vendredi_samedi".split("_"),weekdaysShort:"dim._lun._mar._mer._jeu._ven._sam.".split("_"),weekdaysMin:"Di_Lu_Ma_Me_Je_Ve_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Aujourd'hui à] LT",nextDay:"[Demain à] LT",nextWeek:"dddd [à] LT",lastDay:"[Hier à] LT",lastWeek:"dddd [dernier à] LT",sameElse:"L"},relativeTime:{future:"dans %s",past:"il y a %s",s:"quelques secondes",m:"une minute",mm:"%d minutes",h:"une heure",hh:"%d heures",d:"un jour",dd:"%d jours",M:"un mois",MM:"%d mois",y:"un an",yy:"%d ans"},ordinalParse:/\d{1,2}(er|)/,ordinal:function(a){return a+(1===a?"er":"")},week:{dow:1,doy:4}}),"jan._feb._mrt._apr._mai_jun._jul._aug._sep._okt._nov._des.".split("_")),hg="jan_feb_mrt_apr_mai_jun_jul_aug_sep_okt_nov_des".split("_"),ig=(Kf.defineLocale("fy",{months:"jannewaris_febrewaris_maart_april_maaie_juny_july_augustus_septimber_oktober_novimber_desimber".split("_"),monthsShort:function(a,b){return/-MMM-/.test(b)?hg[a.month()]:gg[a.month()]},weekdays:"snein_moandei_tiisdei_woansdei_tongersdei_freed_sneon".split("_"),weekdaysShort:"si._mo._ti._wo._to._fr._so.".split("_"),weekdaysMin:"Si_Mo_Ti_Wo_To_Fr_So".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[hjoed om] LT",nextDay:"[moarn om] LT",nextWeek:"dddd [om] LT",lastDay:"[juster om] LT",lastWeek:"[ôfrûne] dddd [om] LT",sameElse:"L"},relativeTime:{future:"oer %s",past:"%s lyn",s:"in pear sekonden",m:"ien minút",mm:"%d minuten",h:"ien oere",hh:"%d oeren",d:"ien dei",dd:"%d dagen",M:"ien moanne",MM:"%d moannen",y:"ien jier",yy:"%d jierren"},ordinalParse:/\d{1,2}(ste|de)/,ordinal:function(a){return a+(1===a||8===a||a>=20?"ste":"de")},week:{dow:1,doy:4}}),["Am Faoilleach","An Gearran","Am Màrt","An Giblean","An Cèitean","An t-Ògmhios","An t-Iuchar","An Lùnastal","An t-Sultain","An Dàmhair","An t-Samhain","An Dùbhlachd"]),jg=["Faoi","Gear","Màrt","Gibl","Cèit","Ògmh","Iuch","Lùn","Sult","Dàmh","Samh","Dùbh"],kg=["Didòmhnaich","Diluain","Dimàirt","Diciadain","Diardaoin","Dihaoine","Disathairne"],lg=["Did","Dil","Dim","Dic","Dia","Dih","Dis"],mg=["Dò","Lu","Mà","Ci","Ar","Ha","Sa"],ng=(Kf.defineLocale("gd",{months:ig,monthsShort:jg,monthsParseExact:!0,weekdays:kg,weekdaysShort:lg,weekdaysMin:mg,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[An-diugh aig] LT",nextDay:"[A-màireach aig] LT",nextWeek:"dddd [aig] LT",lastDay:"[An-dè aig] LT",lastWeek:"dddd [seo chaidh] [aig] LT",sameElse:"L"},relativeTime:{future:"ann an %s",past:"bho chionn %s",s:"beagan diogan",m:"mionaid",mm:"%d mionaidean",h:"uair",hh:"%d uairean",d:"latha",dd:"%d latha",M:"mìos",MM:"%d mìosan",y:"bliadhna",yy:"%d bliadhna"},ordinalParse:/\d{1,2}(d|na|mh)/,ordinal:function(a){var b=1===a?"d":a%10===2?"na":"mh";return a+b},week:{dow:1,doy:4}}),Kf.defineLocale("gl",{months:"Xaneiro_Febreiro_Marzo_Abril_Maio_Xuño_Xullo_Agosto_Setembro_Outubro_Novembro_Decembro".split("_"),monthsShort:"Xan._Feb._Mar._Abr._Mai._Xuñ._Xul._Ago._Set._Out._Nov._Dec.".split("_"),weekdays:"Domingo_Luns_Martes_Mércores_Xoves_Venres_Sábado".split("_"),weekdaysShort:"Dom._Lun._Mar._Mér._Xov._Ven._Sáb.".split("_"),weekdaysMin:"Do_Lu_Ma_Mé_Xo_Ve_Sá".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd D MMMM YYYY H:mm"},calendar:{sameDay:function(){return"[hoxe "+(1!==this.hours()?"ás":"á")+"] LT"},nextDay:function(){return"[mañá "+(1!==this.hours()?"ás":"á")+"] LT"},nextWeek:function(){return"dddd ["+(1!==this.hours()?"ás":"a")+"] LT"},lastDay:function(){return"[onte "+(1!==this.hours()?"á":"a")+"] LT"},lastWeek:function(){return"[o] dddd [pasado "+(1!==this.hours()?"ás":"a")+"] LT"},sameElse:"L"},relativeTime:{future:function(a){return"uns segundos"===a?"nuns segundos":"en "+a},past:"hai %s",s:"uns segundos",m:"un minuto",mm:"%d minutos",h:"unha hora",hh:"%d horas",d:"un día",dd:"%d días",M:"un mes",MM:"%d meses",y:"un ano",yy:"%d anos"},ordinalParse:/\d{1,2}º/,ordinal:"%dº",week:{dow:1,doy:7}}),Kf.defineLocale("he",{months:"ינואר_פברואר_מרץ_אפריל_מאי_יוני_יולי_אוגוסט_ספטמבר_אוקטובר_נובמבר_דצמבר".split("_"),monthsShort:"ינו׳_פבר׳_מרץ_אפר׳_מאי_יוני_יולי_אוג׳_ספט׳_אוק׳_נוב׳_דצמ׳".split("_"),weekdays:"ראשון_שני_שלישי_רביעי_חמישי_שישי_שבת".split("_"),weekdaysShort:"א׳_ב׳_ג׳_ד׳_ה׳_ו׳_ש׳".split("_"),weekdaysMin:"א_ב_ג_ד_ה_ו_ש".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [ב]MMMM YYYY",LLL:"D [ב]MMMM YYYY HH:mm",LLLL:"dddd, D [ב]MMMM YYYY HH:mm",l:"D/M/YYYY",ll:"D MMM YYYY",lll:"D MMM YYYY HH:mm",llll:"ddd, D MMM YYYY HH:mm"},calendar:{sameDay:"[היום ב־]LT",nextDay:"[מחר ב־]LT",nextWeek:"dddd [בשעה] LT",lastDay:"[אתמול ב־]LT",lastWeek:"[ביום] dddd [האחרון בשעה] LT",sameElse:"L"},relativeTime:{future:"בעוד %s",past:"לפני %s",s:"מספר שניות",m:"דקה",mm:"%d דקות",h:"שעה",hh:function(a){return 2===a?"שעתיים":a+" שעות"},d:"יום",dd:function(a){return 2===a?"יומיים":a+" ימים"},M:"חודש",MM:function(a){return 2===a?"חודשיים":a+" חודשים"},y:"שנה",yy:function(a){return 2===a?"שנתיים":a%10===0&&10!==a?a+" שנה":a+" שנים"}}}),{1:"१",2:"२",3:"३",4:"४",5:"५",6:"६",7:"७",8:"८",9:"९",0:"०"}),og={"१":"1","२":"2","३":"3","४":"4","५":"5","६":"6","७":"7","८":"8","९":"9","०":"0"},pg=(Kf.defineLocale("hi",{months:"जनवरी_फ़रवरी_मार्च_अप्रैल_मई_जून_जुलाई_अगस्त_सितम्बर_अक्टूबर_नवम्बर_दिसम्बर".split("_"),monthsShort:"जन._फ़र._मार्च_अप्रै._मई_जून_जुल._अग._सित._अक्टू._नव._दिस.".split("_"),weekdays:"रविवार_सोमवार_मंगलवार_बुधवार_गुरूवार_शुक्रवार_शनिवार".split("_"),weekdaysShort:"रवि_सोम_मंगल_बुध_गुरू_शुक्र_शनि".split("_"),weekdaysMin:"र_सो_मं_बु_गु_शु_श".split("_"),longDateFormat:{LT:"A h:mm बजे",LTS:"A h:mm:ss बजे",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm बजे",LLLL:"dddd, D MMMM YYYY, A h:mm बजे"},calendar:{sameDay:"[आज] LT",nextDay:"[कल] LT",nextWeek:"dddd, LT",lastDay:"[कल] LT",lastWeek:"[पिछले] dddd, LT",sameElse:"L"},relativeTime:{future:"%s में",past:"%s पहले",s:"कुछ ही क्षण",m:"एक मिनट",mm:"%d मिनट",h:"एक घंटा",hh:"%d घंटे",d:"एक दिन",dd:"%d दिन",M:"एक महीने",MM:"%d महीने",y:"एक वर्ष",yy:"%d वर्ष"},preparse:function(a){return a.replace(/[१२३४५६७८९०]/g,function(a){return og[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return ng[a]})},meridiemParse:/रात|सुबह|दोपहर|शाम/,meridiemHour:function(a,b){return 12===a&&(a=0),"रात"===b?4>a?a:a+12:"सुबह"===b?a:"दोपहर"===b?a>=10?a:a+12:"शाम"===b?a+12:void 0},meridiem:function(a,b,c){return 4>a?"रात":10>a?"सुबह":17>a?"दोपहर":20>a?"शाम":"रात"},week:{dow:0,doy:6}}),Kf.defineLocale("hr",{months:{format:"siječnja_veljače_ožujka_travnja_svibnja_lipnja_srpnja_kolovoza_rujna_listopada_studenoga_prosinca".split("_"),standalone:"siječanj_veljača_ožujak_travanj_svibanj_lipanj_srpanj_kolovoz_rujan_listopad_studeni_prosinac".split("_")},monthsShort:"sij._velj._ožu._tra._svi._lip._srp._kol._ruj._lis._stu._pro.".split("_"),weekdays:"nedjelja_ponedjeljak_utorak_srijeda_četvrtak_petak_subota".split("_"),weekdaysShort:"ned._pon._uto._sri._čet._pet._sub.".split("_"),weekdaysMin:"ne_po_ut_sr_če_pe_su".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[jučer u] LT",lastWeek:function(){switch(this.day()){case 0:case 3:return"[prošlu] dddd [u] LT";case 6:return"[prošle] [subote] [u] LT";case 1:case 2:case 4:case 5:return"[prošli] dddd [u] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"par sekundi",m:hd,mm:hd,h:hd,hh:hd,d:"dan",dd:hd,M:"mjesec",MM:hd,y:"godinu",yy:hd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),"vasárnap hétfőn kedden szerdán csütörtökön pénteken szombaton".split(" ")),qg=(Kf.defineLocale("hu",{months:"január_február_március_április_május_június_július_augusztus_szeptember_október_november_december".split("_"),monthsShort:"jan_feb_márc_ápr_máj_jún_júl_aug_szept_okt_nov_dec".split("_"),weekdays:"vasárnap_hétfő_kedd_szerda_csütörtök_péntek_szombat".split("_"),weekdaysShort:"vas_hét_kedd_sze_csüt_pén_szo".split("_"),weekdaysMin:"v_h_k_sze_cs_p_szo".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"YYYY.MM.DD.",LL:"YYYY. MMMM D.",LLL:"YYYY. MMMM D. H:mm",LLLL:"YYYY. MMMM D., dddd H:mm"},meridiemParse:/de|du/i,isPM:function(a){return"u"===a.charAt(1).toLowerCase()},meridiem:function(a,b,c){return 12>a?c===!0?"de":"DE":c===!0?"du":"DU"},calendar:{sameDay:"[ma] LT[-kor]",nextDay:"[holnap] LT[-kor]",nextWeek:function(){return jd.call(this,!0)},lastDay:"[tegnap] LT[-kor]",lastWeek:function(){return jd.call(this,!1)},sameElse:"L"},relativeTime:{future:"%s múlva",past:"%s",s:id,m:id,mm:id,h:id,hh:id,d:id,dd:id,M:id,MM:id,y:id,yy:id},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),Kf.defineLocale("hy-am",{months:{format:"հունվարի_փետրվարի_մարտի_ապրիլի_մայիսի_հունիսի_հուլիսի_օգոստոսի_սեպտեմբերի_հոկտեմբերի_նոյեմբերի_դեկտեմբերի".split("_"),standalone:"հունվար_փետրվար_մարտ_ապրիլ_մայիս_հունիս_հուլիս_օգոստոս_սեպտեմբեր_հոկտեմբեր_նոյեմբեր_դեկտեմբեր".split("_")},monthsShort:"հնվ_փտր_մրտ_ապր_մյս_հնս_հլս_օգս_սպտ_հկտ_նմբ_դկտ".split("_"),weekdays:"կիրակի_երկուշաբթի_երեքշաբթի_չորեքշաբթի_հինգշաբթի_ուրբաթ_շաբաթ".split("_"),weekdaysShort:"կրկ_երկ_երք_չրք_հնգ_ուրբ_շբթ".split("_"),weekdaysMin:"կրկ_երկ_երք_չրք_հնգ_ուրբ_շբթ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY թ.",LLL:"D MMMM YYYY թ., HH:mm",LLLL:"dddd, D MMMM YYYY թ., HH:mm"},calendar:{sameDay:"[այսօր] LT",nextDay:"[վաղը] LT",lastDay:"[երեկ] LT",nextWeek:function(){return"dddd [օրը ժամը] LT"},lastWeek:function(){return"[անցած] dddd [օրը ժամը] LT"},sameElse:"L"},relativeTime:{future:"%s հետո",past:"%s առաջ",s:"մի քանի վայրկյան",m:"րոպե",mm:"%d րոպե",h:"ժամ",hh:"%d ժամ",d:"օր",dd:"%d օր",M:"ամիս",MM:"%d ամիս",y:"տարի",yy:"%d տարի"},meridiemParse:/գիշերվա|առավոտվա|ցերեկվա|երեկոյան/,isPM:function(a){return/^(ցերեկվա|երեկոյան)$/.test(a)},meridiem:function(a){return 4>a?"գիշերվա":12>a?"առավոտվա":17>a?"ցերեկվա":"երեկոյան"},ordinalParse:/\d{1,2}|\d{1,2}-(ին|րդ)/,ordinal:function(a,b){switch(b){case"DDD":case"w":case"W":case"DDDo":return 1===a?a+"-ին":a+"-րդ";default:return a}},week:{dow:1,doy:7}}),Kf.defineLocale("id",{months:"Januari_Februari_Maret_April_Mei_Juni_Juli_Agustus_September_Oktober_November_Desember".split("_"),monthsShort:"Jan_Feb_Mar_Apr_Mei_Jun_Jul_Ags_Sep_Okt_Nov_Des".split("_"),weekdays:"Minggu_Senin_Selasa_Rabu_Kamis_Jumat_Sabtu".split("_"),weekdaysShort:"Min_Sen_Sel_Rab_Kam_Jum_Sab".split("_"),weekdaysMin:"Mg_Sn_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|siang|sore|malam/,meridiemHour:function(a,b){return 12===a&&(a=0),"pagi"===b?a:"siang"===b?a>=11?a:a+12:"sore"===b||"malam"===b?a+12:void 0},meridiem:function(a,b,c){return 11>a?"pagi":15>a?"siang":19>a?"sore":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Besok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kemarin pukul] LT",lastWeek:"dddd [lalu pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lalu",s:"beberapa detik",m:"semenit",mm:"%d menit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:1,doy:7}}),Kf.defineLocale("is",{months:"janúar_febrúar_mars_apríl_maí_júní_júlí_ágúst_september_október_nóvember_desember".split("_"),monthsShort:"jan_feb_mar_apr_maí_jún_júl_ágú_sep_okt_nóv_des".split("_"),weekdays:"sunnudagur_mánudagur_þriðjudagur_miðvikudagur_fimmtudagur_föstudagur_laugardagur".split("_"),weekdaysShort:"sun_mán_þri_mið_fim_fös_lau".split("_"),weekdaysMin:"Su_Má_Þr_Mi_Fi_Fö_La".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD/MM/YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] H:mm",LLLL:"dddd, D. MMMM YYYY [kl.] H:mm"},calendar:{sameDay:"[í dag kl.] LT",nextDay:"[á morgun kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[í gær kl.] LT",lastWeek:"[síðasta] dddd [kl.] LT",sameElse:"L"},relativeTime:{future:"eftir %s",past:"fyrir %s síðan",s:ld,m:ld,mm:ld,h:"klukkustund",hh:ld,d:ld,dd:ld,M:ld,MM:ld,y:ld,yy:ld},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("it",{months:"gennaio_febbraio_marzo_aprile_maggio_giugno_luglio_agosto_settembre_ottobre_novembre_dicembre".split("_"),monthsShort:"gen_feb_mar_apr_mag_giu_lug_ago_set_ott_nov_dic".split("_"),weekdays:"Domenica_Lunedì_Martedì_Mercoledì_Giovedì_Venerdì_Sabato".split("_"),weekdaysShort:"Dom_Lun_Mar_Mer_Gio_Ven_Sab".split("_"),weekdaysMin:"Do_Lu_Ma_Me_Gi_Ve_Sa".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Oggi alle] LT",nextDay:"[Domani alle] LT",nextWeek:"dddd [alle] LT",lastDay:"[Ieri alle] LT",lastWeek:function(){switch(this.day()){case 0:return"[la scorsa] dddd [alle] LT";default:return"[lo scorso] dddd [alle] LT"}},sameElse:"L"},relativeTime:{future:function(a){return(/^[0-9].+$/.test(a)?"tra":"in")+" "+a},past:"%s fa",s:"alcuni secondi",m:"un minuto",mm:"%d minuti",h:"un'ora",hh:"%d ore",d:"un giorno",dd:"%d giorni",M:"un mese",MM:"%d mesi",y:"un anno",yy:"%d anni"},ordinalParse:/\d{1,2}º/,ordinal:"%dº",week:{dow:1,doy:4}}),Kf.defineLocale("ja",{months:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"日曜日_月曜日_火曜日_水曜日_木曜日_金曜日_土曜日".split("_"),weekdaysShort:"日_月_火_水_木_金_土".split("_"),weekdaysMin:"日_月_火_水_木_金_土".split("_"),longDateFormat:{LT:"Ah時m分",LTS:"Ah時m分s秒",L:"YYYY/MM/DD",LL:"YYYY年M月D日",LLL:"YYYY年M月D日Ah時m分",LLLL:"YYYY年M月D日Ah時m分 dddd"},meridiemParse:/午前|午後/i,isPM:function(a){return"午後"===a},meridiem:function(a,b,c){return 12>a?"午前":"午後"},calendar:{sameDay:"[今日] LT",nextDay:"[明日] LT",nextWeek:"[来週]dddd LT",lastDay:"[昨日] LT",lastWeek:"[前週]dddd LT",sameElse:"L"},relativeTime:{future:"%s後",past:"%s前",s:"数秒",m:"1分",mm:"%d分",h:"1時間",hh:"%d時間",d:"1日",dd:"%d日",M:"1ヶ月",MM:"%dヶ月",y:"1年",yy:"%d年"}}),Kf.defineLocale("jv",{months:"Januari_Februari_Maret_April_Mei_Juni_Juli_Agustus_September_Oktober_Nopember_Desember".split("_"),monthsShort:"Jan_Feb_Mar_Apr_Mei_Jun_Jul_Ags_Sep_Okt_Nop_Des".split("_"),weekdays:"Minggu_Senen_Seloso_Rebu_Kemis_Jemuwah_Septu".split("_"),weekdaysShort:"Min_Sen_Sel_Reb_Kem_Jem_Sep".split("_"),weekdaysMin:"Mg_Sn_Sl_Rb_Km_Jm_Sp".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/enjing|siyang|sonten|ndalu/,meridiemHour:function(a,b){return 12===a&&(a=0),"enjing"===b?a:"siyang"===b?a>=11?a:a+12:"sonten"===b||"ndalu"===b?a+12:void 0},meridiem:function(a,b,c){return 11>a?"enjing":15>a?"siyang":19>a?"sonten":"ndalu"},calendar:{sameDay:"[Dinten puniko pukul] LT",nextDay:"[Mbenjang pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kala wingi pukul] LT",lastWeek:"dddd [kepengker pukul] LT",sameElse:"L"},relativeTime:{future:"wonten ing %s",past:"%s ingkang kepengker",s:"sawetawis detik",m:"setunggal menit",mm:"%d menit",h:"setunggal jam",hh:"%d jam",d:"sedinten",dd:"%d dinten",M:"sewulan",MM:"%d wulan",y:"setaun",yy:"%d taun"},week:{dow:1,doy:7}}),Kf.defineLocale("ka",{months:{standalone:"იანვარი_თებერვალი_მარტი_აპრილი_მაისი_ივნისი_ივლისი_აგვისტო_სექტემბერი_ოქტომბერი_ნოემბერი_დეკემბერი".split("_"),format:"იანვარს_თებერვალს_მარტს_აპრილის_მაისს_ივნისს_ივლისს_აგვისტს_სექტემბერს_ოქტომბერს_ნოემბერს_დეკემბერს".split("_")},monthsShort:"იან_თებ_მარ_აპრ_მაი_ივნ_ივლ_აგვ_სექ_ოქტ_ნოე_დეკ".split("_"),weekdays:{standalone:"კვირა_ორშაბათი_სამშაბათი_ოთხშაბათი_ხუთშაბათი_პარასკევი_შაბათი".split("_"),format:"კვირას_ორშაბათს_სამშაბათს_ოთხშაბათს_ხუთშაბათს_პარასკევს_შაბათს".split("_"),isFormat:/(წინა|შემდეგ)/},weekdaysShort:"კვი_ორშ_სამ_ოთხ_ხუთ_პარ_შაბ".split("_"),weekdaysMin:"კვ_ორ_სა_ოთ_ხუ_პა_შა".split("_"),longDateFormat:{LT:"h:mm A",LTS:"h:mm:ss A",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY h:mm A",LLLL:"dddd, D MMMM YYYY h:mm A"},calendar:{sameDay:"[დღეს] LT[-ზე]",nextDay:"[ხვალ] LT[-ზე]",lastDay:"[გუშინ] LT[-ზე]",nextWeek:"[შემდეგ] dddd LT[-ზე]",lastWeek:"[წინა] dddd LT-ზე",sameElse:"L"},relativeTime:{future:function(a){return/(წამი|წუთი|საათი|წელი)/.test(a)?a.replace(/ი$/,"ში"):a+"ში"},past:function(a){return/(წამი|წუთი|საათი|დღე|თვე)/.test(a)?a.replace(/(ი|ე)$/,"ის წინ"):/წელი/.test(a)?a.replace(/წელი$/,"წლის წინ"):void 0},s:"რამდენიმე წამი",m:"წუთი",mm:"%d წუთი",h:"საათი",hh:"%d საათი",d:"დღე",dd:"%d დღე",M:"თვე",MM:"%d თვე",y:"წელი",yy:"%d წელი"},ordinalParse:/0|1-ლი|მე-\d{1,2}|\d{1,2}-ე/,ordinal:function(a){return 0===a?a:1===a?a+"-ლი":20>a||100>=a&&a%20===0||a%100===0?"მე-"+a:a+"-ე"},week:{dow:1,doy:7}}),{0:"-ші",1:"-ші",2:"-ші",3:"-ші",4:"-ші",5:"-ші",6:"-шы",7:"-ші",8:"-ші",9:"-шы",10:"-шы",20:"-шы",30:"-шы",40:"-шы",50:"-ші",60:"-шы",70:"-ші",80:"-ші",90:"-шы",100:"-ші"}),rg=(Kf.defineLocale("kk",{months:"Қаңтар_Ақпан_Наурыз_Сәуір_Мамыр_Маусым_Шілде_Тамыз_Қыркүйек_Қазан_Қараша_Желтоқсан".split("_"),monthsShort:"Қаң_Ақп_Нау_Сәу_Мам_Мау_Шіл_Там_Қыр_Қаз_Қар_Жел".split("_"),weekdays:"Жексенбі_Дүйсенбі_Сейсенбі_Сәрсенбі_Бейсенбі_Жұма_Сенбі".split("_"),weekdaysShort:"Жек_Дүй_Сей_Сәр_Бей_Жұм_Сен".split("_"),weekdaysMin:"Жк_Дй_Сй_Ср_Бй_Жм_Сн".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Бүгін сағат] LT",nextDay:"[Ертең сағат] LT",nextWeek:"dddd [сағат] LT",lastDay:"[Кеше сағат] LT",lastWeek:"[Өткен аптаның] dddd [сағат] LT",sameElse:"L"},relativeTime:{future:"%s ішінде",past:"%s бұрын",s:"бірнеше секунд",m:"бір минут",mm:"%d минут",h:"бір сағат",hh:"%d сағат",d:"бір күн",dd:"%d күн",M:"бір ай",MM:"%d ай",y:"бір жыл",yy:"%d жыл"},ordinalParse:/\d{1,2}-(ші|шы)/,ordinal:function(a){var b=a%10,c=a>=100?100:null;return a+(qg[a]||qg[b]||qg[c])},week:{dow:1,doy:7}}),Kf.defineLocale("km",{months:"មករា_កុម្ភៈ_មិនា_មេសា_ឧសភា_មិថុនា_កក្កដា_សីហា_កញ្ញា_តុលា_វិច្ឆិកា_ធ្នូ".split("_"),monthsShort:"មករា_កុម្ភៈ_មិនា_មេសា_ឧសភា_មិថុនា_កក្កដា_សីហា_កញ្ញា_តុលា_វិច្ឆិកា_ធ្នូ".split("_"),weekdays:"អាទិត្យ_ច័ន្ទ_អង្គារ_ពុធ_ព្រហស្បតិ៍_សុក្រ_សៅរ៍".split("_"),weekdaysShort:"អាទិត្យ_ច័ន្ទ_អង្គារ_ពុធ_ព្រហស្បតិ៍_សុក្រ_សៅរ៍".split("_"),weekdaysMin:"អាទិត្យ_ច័ន្ទ_អង្គារ_ពុធ_ព្រហស្បតិ៍_សុក្រ_សៅរ៍".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[ថ្ងៃនេះ ម៉ោង] LT",nextDay:"[ស្អែក ម៉ោង] LT",nextWeek:"dddd [ម៉ោង] LT",lastDay:"[ម្សិលមិញ ម៉ោង] LT",lastWeek:"dddd [សប្តាហ៍មុន] [ម៉ោង] LT",sameElse:"L"},relativeTime:{future:"%sទៀត",past:"%sមុន",s:"ប៉ុន្មានវិនាទី",m:"មួយនាទី",mm:"%d នាទី",h:"មួយម៉ោង",hh:"%d ម៉ោង",d:"មួយថ្ងៃ",dd:"%d ថ្ងៃ",M:"មួយខែ",MM:"%d ខែ",y:"មួយឆ្នាំ",yy:"%d ឆ្នាំ"},week:{dow:1,doy:4}}),Kf.defineLocale("ko",{months:"1월_2월_3월_4월_5월_6월_7월_8월_9월_10월_11월_12월".split("_"),monthsShort:"1월_2월_3월_4월_5월_6월_7월_8월_9월_10월_11월_12월".split("_"),weekdays:"일요일_월요일_화요일_수요일_목요일_금요일_토요일".split("_"),weekdaysShort:"일_월_화_수_목_금_토".split("_"),weekdaysMin:"일_월_화_수_목_금_토".split("_"),longDateFormat:{LT:"A h시 m분",LTS:"A h시 m분 s초",L:"YYYY.MM.DD",LL:"YYYY년 MMMM D일",LLL:"YYYY년 MMMM D일 A h시 m분",LLLL:"YYYY년 MMMM D일 dddd A h시 m분"},calendar:{sameDay:"오늘 LT",nextDay:"내일 LT",nextWeek:"dddd LT",lastDay:"어제 LT",lastWeek:"지난주 dddd LT",sameElse:"L"},relativeTime:{future:"%s 후",past:"%s 전",s:"몇초",ss:"%d초",m:"일분",mm:"%d분",h:"한시간",hh:"%d시간",d:"하루",dd:"%d일",M:"한달",MM:"%d달",y:"일년",yy:"%d년"},ordinalParse:/\d{1,2}일/,ordinal:"%d일",meridiemParse:/오전|오후/,isPM:function(a){return"오후"===a},meridiem:function(a,b,c){return 12>a?"오전":"오후"}}),Kf.defineLocale("lb",{months:"Januar_Februar_Mäerz_Abrëll_Mee_Juni_Juli_August_September_Oktober_November_Dezember".split("_"),monthsShort:"Jan._Febr._Mrz._Abr._Mee_Jun._Jul._Aug._Sept._Okt._Nov._Dez.".split("_"),weekdays:"Sonndeg_Méindeg_Dënschdeg_Mëttwoch_Donneschdeg_Freideg_Samschdeg".split("_"),weekdaysShort:"So._Mé._Dë._Më._Do._Fr._Sa.".split("_"),weekdaysMin:"So_Mé_Dë_Më_Do_Fr_Sa".split("_"),longDateFormat:{LT:"H:mm [Auer]",LTS:"H:mm:ss [Auer]",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm [Auer]",LLLL:"dddd, D. MMMM YYYY H:mm [Auer]"},calendar:{sameDay:"[Haut um] LT",sameElse:"L",nextDay:"[Muer um] LT",nextWeek:"dddd [um] LT",lastDay:"[Gëschter um] LT",lastWeek:function(){switch(this.day()){case 2:case 4:return"[Leschten] dddd [um] LT";default:return"[Leschte] dddd [um] LT"}}},relativeTime:{future:nd,past:od,s:"e puer Sekonnen",m:md,mm:"%d Minutten",h:md,hh:"%d Stonnen",d:md,dd:"%d Deeg",M:md,MM:"%d Méint",y:md,yy:"%d Joer"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("lo",{months:"ມັງກອນ_ກຸມພາ_ມີນາ_ເມສາ_ພຶດສະພາ_ມິຖຸນາ_ກໍລະກົດ_ສິງຫາ_ກັນຍາ_ຕຸລາ_ພະຈິກ_ທັນວາ".split("_"),monthsShort:"ມັງກອນ_ກຸມພາ_ມີນາ_ເມສາ_ພຶດສະພາ_ມິຖຸນາ_ກໍລະກົດ_ສິງຫາ_ກັນຍາ_ຕຸລາ_ພະຈິກ_ທັນວາ".split("_"),weekdays:"ອາທິດ_ຈັນ_ອັງຄານ_ພຸດ_ພະຫັດ_ສຸກ_ເສົາ".split("_"),weekdaysShort:"ທິດ_ຈັນ_ອັງຄານ_ພຸດ_ພະຫັດ_ສຸກ_ເສົາ".split("_"),weekdaysMin:"ທ_ຈ_ອຄ_ພ_ພຫ_ສກ_ສ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"ວັນdddd D MMMM YYYY HH:mm"},meridiemParse:/ຕອນເຊົ້າ|ຕອນແລງ/,isPM:function(a){return"ຕອນແລງ"===a},meridiem:function(a,b,c){return 12>a?"ຕອນເຊົ້າ":"ຕອນແລງ"},calendar:{sameDay:"[ມື້ນີ້ເວລາ] LT",nextDay:"[ມື້ອື່ນເວລາ] LT",nextWeek:"[ວັນ]dddd[ໜ້າເວລາ] LT",lastDay:"[ມື້ວານນີ້ເວລາ] LT",lastWeek:"[ວັນ]dddd[ແລ້ວນີ້ເວລາ] LT",sameElse:"L"},relativeTime:{future:"ອີກ %s",past:"%sຜ່ານມາ",s:"ບໍ່ເທົ່າໃດວິນາທີ",m:"1 ນາທີ",mm:"%d ນາທີ",h:"1 ຊົ່ວໂມງ",hh:"%d ຊົ່ວໂມງ",d:"1 ມື້",dd:"%d ມື້",M:"1 ເດືອນ",MM:"%d ເດືອນ",y:"1 ປີ",yy:"%d ປີ"},ordinalParse:/(ທີ່)\d{1,2}/,ordinal:function(a){ +return"ທີ່"+a}}),{m:"minutė_minutės_minutę",mm:"minutės_minučių_minutes",h:"valanda_valandos_valandą",hh:"valandos_valandų_valandas",d:"diena_dienos_dieną",dd:"dienos_dienų_dienas",M:"mėnuo_mėnesio_mėnesį",MM:"mėnesiai_mėnesių_mėnesius",y:"metai_metų_metus",yy:"metai_metų_metus"}),sg=(Kf.defineLocale("lt",{months:{format:"sausio_vasario_kovo_balandžio_gegužės_birželio_liepos_rugpjūčio_rugsėjo_spalio_lapkričio_gruodžio".split("_"),standalone:"sausis_vasaris_kovas_balandis_gegužė_birželis_liepa_rugpjūtis_rugsėjis_spalis_lapkritis_gruodis".split("_")},monthsShort:"sau_vas_kov_bal_geg_bir_lie_rgp_rgs_spa_lap_grd".split("_"),weekdays:{format:"sekmadienį_pirmadienį_antradienį_trečiadienį_ketvirtadienį_penktadienį_šeštadienį".split("_"),standalone:"sekmadienis_pirmadienis_antradienis_trečiadienis_ketvirtadienis_penktadienis_šeštadienis".split("_"),isFormat:/dddd HH:mm/},weekdaysShort:"Sek_Pir_Ant_Tre_Ket_Pen_Šeš".split("_"),weekdaysMin:"S_P_A_T_K_Pn_Š".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"YYYY [m.] MMMM D [d.]",LLL:"YYYY [m.] MMMM D [d.], HH:mm [val.]",LLLL:"YYYY [m.] MMMM D [d.], dddd, HH:mm [val.]",l:"YYYY-MM-DD",ll:"YYYY [m.] MMMM D [d.]",lll:"YYYY [m.] MMMM D [d.], HH:mm [val.]",llll:"YYYY [m.] MMMM D [d.], ddd, HH:mm [val.]"},calendar:{sameDay:"[Šiandien] LT",nextDay:"[Rytoj] LT",nextWeek:"dddd LT",lastDay:"[Vakar] LT",lastWeek:"[Praėjusį] dddd LT",sameElse:"L"},relativeTime:{future:"po %s",past:"prieš %s",s:qd,m:rd,mm:ud,h:rd,hh:ud,d:rd,dd:ud,M:rd,MM:ud,y:rd,yy:ud},ordinalParse:/\d{1,2}-oji/,ordinal:function(a){return a+"-oji"},week:{dow:1,doy:4}}),{m:"minūtes_minūtēm_minūte_minūtes".split("_"),mm:"minūtes_minūtēm_minūte_minūtes".split("_"),h:"stundas_stundām_stunda_stundas".split("_"),hh:"stundas_stundām_stunda_stundas".split("_"),d:"dienas_dienām_diena_dienas".split("_"),dd:"dienas_dienām_diena_dienas".split("_"),M:"mēneša_mēnešiem_mēnesis_mēneši".split("_"),MM:"mēneša_mēnešiem_mēnesis_mēneši".split("_"),y:"gada_gadiem_gads_gadi".split("_"),yy:"gada_gadiem_gads_gadi".split("_")}),tg=(Kf.defineLocale("lv",{months:"janvāris_februāris_marts_aprīlis_maijs_jūnijs_jūlijs_augusts_septembris_oktobris_novembris_decembris".split("_"),monthsShort:"jan_feb_mar_apr_mai_jūn_jūl_aug_sep_okt_nov_dec".split("_"),weekdays:"svētdiena_pirmdiena_otrdiena_trešdiena_ceturtdiena_piektdiena_sestdiena".split("_"),weekdaysShort:"Sv_P_O_T_C_Pk_S".split("_"),weekdaysMin:"Sv_P_O_T_C_Pk_S".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY.",LL:"YYYY. [gada] D. MMMM",LLL:"YYYY. [gada] D. MMMM, HH:mm",LLLL:"YYYY. [gada] D. MMMM, dddd, HH:mm"},calendar:{sameDay:"[Šodien pulksten] LT",nextDay:"[Rīt pulksten] LT",nextWeek:"dddd [pulksten] LT",lastDay:"[Vakar pulksten] LT",lastWeek:"[Pagājušā] dddd [pulksten] LT",sameElse:"L"},relativeTime:{future:"pēc %s",past:"pirms %s",s:yd,m:xd,mm:wd,h:xd,hh:wd,d:xd,dd:wd,M:xd,MM:wd,y:xd,yy:wd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),{words:{m:["jedan minut","jednog minuta"],mm:["minut","minuta","minuta"],h:["jedan sat","jednog sata"],hh:["sat","sata","sati"],dd:["dan","dana","dana"],MM:["mjesec","mjeseca","mjeseci"],yy:["godina","godine","godina"]},correctGrammaticalCase:function(a,b){return 1===a?b[0]:a>=2&&4>=a?b[1]:b[2]},translate:function(a,b,c){var d=tg.words[c];return 1===c.length?b?d[0]:d[1]:a+" "+tg.correctGrammaticalCase(a,d)}}),ug=(Kf.defineLocale("me",{months:["januar","februar","mart","april","maj","jun","jul","avgust","septembar","oktobar","novembar","decembar"],monthsShort:["jan.","feb.","mar.","apr.","maj","jun","jul","avg.","sep.","okt.","nov.","dec."],weekdays:["nedjelja","ponedjeljak","utorak","srijeda","četvrtak","petak","subota"],weekdaysShort:["ned.","pon.","uto.","sri.","čet.","pet.","sub."],weekdaysMin:["ne","po","ut","sr","če","pe","su"],longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sjutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedjelju] [u] LT";case 3:return"[u] [srijedu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[juče u] LT",lastWeek:function(){var a=["[prošle] [nedjelje] [u] LT","[prošlog] [ponedjeljka] [u] LT","[prošlog] [utorka] [u] LT","[prošle] [srijede] [u] LT","[prošlog] [četvrtka] [u] LT","[prošlog] [petka] [u] LT","[prošle] [subote] [u] LT"];return a[this.day()]},sameElse:"L"},relativeTime:{future:"za %s",past:"prije %s",s:"nekoliko sekundi",m:tg.translate,mm:tg.translate,h:tg.translate,hh:tg.translate,d:"dan",dd:tg.translate,M:"mjesec",MM:tg.translate,y:"godinu",yy:tg.translate},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),Kf.defineLocale("mk",{months:"јануари_февруари_март_април_мај_јуни_јули_август_септември_октомври_ноември_декември".split("_"),monthsShort:"јан_фев_мар_апр_мај_јун_јул_авг_сеп_окт_ное_дек".split("_"),weekdays:"недела_понеделник_вторник_среда_четврток_петок_сабота".split("_"),weekdaysShort:"нед_пон_вто_сре_чет_пет_саб".split("_"),weekdaysMin:"нe_пo_вт_ср_че_пе_сa".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"D.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[Денес во] LT",nextDay:"[Утре во] LT",nextWeek:"[Во] dddd [во] LT",lastDay:"[Вчера во] LT",lastWeek:function(){switch(this.day()){case 0:case 3:case 6:return"[Изминатата] dddd [во] LT";case 1:case 2:case 4:case 5:return"[Изминатиот] dddd [во] LT"}},sameElse:"L"},relativeTime:{future:"после %s",past:"пред %s",s:"неколку секунди",m:"минута",mm:"%d минути",h:"час",hh:"%d часа",d:"ден",dd:"%d дена",M:"месец",MM:"%d месеци",y:"година",yy:"%d години"},ordinalParse:/\d{1,2}-(ев|ен|ти|ви|ри|ми)/,ordinal:function(a){var b=a%10,c=a%100;return 0===a?a+"-ев":0===c?a+"-ен":c>10&&20>c?a+"-ти":1===b?a+"-ви":2===b?a+"-ри":7===b||8===b?a+"-ми":a+"-ти"},week:{dow:1,doy:7}}),Kf.defineLocale("ml",{months:"ജനുവരി_ഫെബ്രുവരി_മാർച്ച്_ഏപ്രിൽ_മേയ്_ജൂൺ_ജൂലൈ_ഓഗസ്റ്റ്_സെപ്റ്റംബർ_ഒക്ടോബർ_നവംബർ_ഡിസംബർ".split("_"),monthsShort:"ജനു._ഫെബ്രു._മാർ._ഏപ്രി._മേയ്_ജൂൺ_ജൂലൈ._ഓഗ._സെപ്റ്റ._ഒക്ടോ._നവം._ഡിസം.".split("_"),weekdays:"ഞായറാഴ്ച_തിങ്കളാഴ്ച_ചൊവ്വാഴ്ച_ബുധനാഴ്ച_വ്യാഴാഴ്ച_വെള്ളിയാഴ്ച_ശനിയാഴ്ച".split("_"),weekdaysShort:"ഞായർ_തിങ്കൾ_ചൊവ്വ_ബുധൻ_വ്യാഴം_വെള്ളി_ശനി".split("_"),weekdaysMin:"ഞാ_തി_ചൊ_ബു_വ്യാ_വെ_ശ".split("_"),longDateFormat:{LT:"A h:mm -നു",LTS:"A h:mm:ss -നു",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm -നു",LLLL:"dddd, D MMMM YYYY, A h:mm -നു"},calendar:{sameDay:"[ഇന്ന്] LT",nextDay:"[നാളെ] LT",nextWeek:"dddd, LT",lastDay:"[ഇന്നലെ] LT",lastWeek:"[കഴിഞ്ഞ] dddd, LT",sameElse:"L"},relativeTime:{future:"%s കഴിഞ്ഞ്",past:"%s മുൻപ്",s:"അൽപ നിമിഷങ്ങൾ",m:"ഒരു മിനിറ്റ്",mm:"%d മിനിറ്റ്",h:"ഒരു മണിക്കൂർ",hh:"%d മണിക്കൂർ",d:"ഒരു ദിവസം",dd:"%d ദിവസം",M:"ഒരു മാസം",MM:"%d മാസം",y:"ഒരു വർഷം",yy:"%d വർഷം"},meridiemParse:/രാത്രി|രാവിലെ|ഉച്ച കഴിഞ്ഞ്|വൈകുന്നേരം|രാത്രി/i,isPM:function(a){return/^(ഉച്ച കഴിഞ്ഞ്|വൈകുന്നേരം|രാത്രി)$/.test(a)},meridiem:function(a,b,c){return 4>a?"രാത്രി":12>a?"രാവിലെ":17>a?"ഉച്ച കഴിഞ്ഞ്":20>a?"വൈകുന്നേരം":"രാത്രി"}}),{1:"१",2:"२",3:"३",4:"४",5:"५",6:"६",7:"७",8:"८",9:"९",0:"०"}),vg={"१":"1","२":"2","३":"3","४":"4","५":"5","६":"6","७":"7","८":"8","९":"9","०":"0"},wg=(Kf.defineLocale("mr",{months:"जानेवारी_फेब्रुवारी_मार्च_एप्रिल_मे_जून_जुलै_ऑगस्ट_सप्टेंबर_ऑक्टोबर_नोव्हेंबर_डिसेंबर".split("_"),monthsShort:"जाने._फेब्रु._मार्च._एप्रि._मे._जून._जुलै._ऑग._सप्टें._ऑक्टो._नोव्हें._डिसें.".split("_"),weekdays:"रविवार_सोमवार_मंगळवार_बुधवार_गुरूवार_शुक्रवार_शनिवार".split("_"),weekdaysShort:"रवि_सोम_मंगळ_बुध_गुरू_शुक्र_शनि".split("_"),weekdaysMin:"र_सो_मं_बु_गु_शु_श".split("_"),longDateFormat:{LT:"A h:mm वाजता",LTS:"A h:mm:ss वाजता",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm वाजता",LLLL:"dddd, D MMMM YYYY, A h:mm वाजता"},calendar:{sameDay:"[आज] LT",nextDay:"[उद्या] LT",nextWeek:"dddd, LT",lastDay:"[काल] LT",lastWeek:"[मागील] dddd, LT",sameElse:"L"},relativeTime:{future:"%sमध्ये",past:"%sपूर्वी",s:zd,m:zd,mm:zd,h:zd,hh:zd,d:zd,dd:zd,M:zd,MM:zd,y:zd,yy:zd},preparse:function(a){return a.replace(/[१२३४५६७८९०]/g,function(a){return vg[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return ug[a]})},meridiemParse:/रात्री|सकाळी|दुपारी|सायंकाळी/,meridiemHour:function(a,b){return 12===a&&(a=0),"रात्री"===b?4>a?a:a+12:"सकाळी"===b?a:"दुपारी"===b?a>=10?a:a+12:"सायंकाळी"===b?a+12:void 0},meridiem:function(a,b,c){return 4>a?"रात्री":10>a?"सकाळी":17>a?"दुपारी":20>a?"सायंकाळी":"रात्री"},week:{dow:0,doy:6}}),Kf.defineLocale("ms-my",{months:"Januari_Februari_Mac_April_Mei_Jun_Julai_Ogos_September_Oktober_November_Disember".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ogs_Sep_Okt_Nov_Dis".split("_"),weekdays:"Ahad_Isnin_Selasa_Rabu_Khamis_Jumaat_Sabtu".split("_"),weekdaysShort:"Ahd_Isn_Sel_Rab_Kha_Jum_Sab".split("_"),weekdaysMin:"Ah_Is_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|tengahari|petang|malam/,meridiemHour:function(a,b){return 12===a&&(a=0),"pagi"===b?a:"tengahari"===b?a>=11?a:a+12:"petang"===b||"malam"===b?a+12:void 0},meridiem:function(a,b,c){return 11>a?"pagi":15>a?"tengahari":19>a?"petang":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Esok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kelmarin pukul] LT",lastWeek:"dddd [lepas pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lepas",s:"beberapa saat",m:"seminit",mm:"%d minit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:1,doy:7}}),Kf.defineLocale("ms",{months:"Januari_Februari_Mac_April_Mei_Jun_Julai_Ogos_September_Oktober_November_Disember".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ogs_Sep_Okt_Nov_Dis".split("_"),weekdays:"Ahad_Isnin_Selasa_Rabu_Khamis_Jumaat_Sabtu".split("_"),weekdaysShort:"Ahd_Isn_Sel_Rab_Kha_Jum_Sab".split("_"),weekdaysMin:"Ah_Is_Sl_Rb_Km_Jm_Sb".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY [pukul] HH.mm",LLLL:"dddd, D MMMM YYYY [pukul] HH.mm"},meridiemParse:/pagi|tengahari|petang|malam/,meridiemHour:function(a,b){return 12===a&&(a=0),"pagi"===b?a:"tengahari"===b?a>=11?a:a+12:"petang"===b||"malam"===b?a+12:void 0},meridiem:function(a,b,c){return 11>a?"pagi":15>a?"tengahari":19>a?"petang":"malam"},calendar:{sameDay:"[Hari ini pukul] LT",nextDay:"[Esok pukul] LT",nextWeek:"dddd [pukul] LT",lastDay:"[Kelmarin pukul] LT",lastWeek:"dddd [lepas pukul] LT",sameElse:"L"},relativeTime:{future:"dalam %s",past:"%s yang lepas",s:"beberapa saat",m:"seminit",mm:"%d minit",h:"sejam",hh:"%d jam",d:"sehari",dd:"%d hari",M:"sebulan",MM:"%d bulan",y:"setahun",yy:"%d tahun"},week:{dow:1,doy:7}}),{1:"၁",2:"၂",3:"၃",4:"၄",5:"၅",6:"၆",7:"၇",8:"၈",9:"၉",0:"၀"}),xg={"၁":"1","၂":"2","၃":"3","၄":"4","၅":"5","၆":"6","၇":"7","၈":"8","၉":"9","၀":"0"},yg=(Kf.defineLocale("my",{months:"ဇန်နဝါရီ_ဖေဖော်ဝါရီ_မတ်_ဧပြီ_မေ_ဇွန်_ဇူလိုင်_သြဂုတ်_စက်တင်ဘာ_အောက်တိုဘာ_နိုဝင်ဘာ_ဒီဇင်ဘာ".split("_"),monthsShort:"ဇန်_ဖေ_မတ်_ပြီ_မေ_ဇွန်_လိုင်_သြ_စက်_အောက်_နို_ဒီ".split("_"),weekdays:"တနင်္ဂနွေ_တနင်္လာ_အင်္ဂါ_ဗုဒ္ဓဟူး_ကြာသပတေး_သောကြာ_စနေ".split("_"),weekdaysShort:"နွေ_လာ_ဂါ_ဟူး_ကြာ_သော_နေ".split("_"),weekdaysMin:"နွေ_လာ_ဂါ_ဟူး_ကြာ_သော_နေ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[ယနေ.] LT [မှာ]",nextDay:"[မနက်ဖြန်] LT [မှာ]",nextWeek:"dddd LT [မှာ]",lastDay:"[မနေ.က] LT [မှာ]",lastWeek:"[ပြီးခဲ့သော] dddd LT [မှာ]",sameElse:"L"},relativeTime:{future:"လာမည့် %s မှာ",past:"လွန်ခဲ့သော %s က",s:"စက္ကန်.အနည်းငယ်",m:"တစ်မိနစ်",mm:"%d မိနစ်",h:"တစ်နာရီ",hh:"%d နာရီ",d:"တစ်ရက်",dd:"%d ရက်",M:"တစ်လ",MM:"%d လ",y:"တစ်နှစ်",yy:"%d နှစ်"},preparse:function(a){return a.replace(/[၁၂၃၄၅၆၇၈၉၀]/g,function(a){return xg[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return wg[a]})},week:{dow:1,doy:4}}),Kf.defineLocale("nb",{months:"januar_februar_mars_april_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan._feb._mars_april_mai_juni_juli_aug._sep._okt._nov._des.".split("_"),weekdays:"søndag_mandag_tirsdag_onsdag_torsdag_fredag_lørdag".split("_"),weekdaysShort:"sø._ma._ti._on._to._fr._lø.".split("_"),weekdaysMin:"sø_ma_ti_on_to_fr_lø".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] HH:mm",LLLL:"dddd D. MMMM YYYY [kl.] HH:mm"},calendar:{sameDay:"[i dag kl.] LT",nextDay:"[i morgen kl.] LT",nextWeek:"dddd [kl.] LT",lastDay:"[i går kl.] LT",lastWeek:"[forrige] dddd [kl.] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"for %s siden",s:"noen sekunder",m:"ett minutt",mm:"%d minutter",h:"en time",hh:"%d timer",d:"en dag",dd:"%d dager",M:"en måned",MM:"%d måneder",y:"ett år",yy:"%d år"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),{1:"१",2:"२",3:"३",4:"४",5:"५",6:"६",7:"७",8:"८",9:"९",0:"०"}),zg={"१":"1","२":"2","३":"3","४":"4","५":"5","६":"6","७":"7","८":"8","९":"9","०":"0"},Ag=(Kf.defineLocale("ne",{months:"जनवरी_फेब्रुवरी_मार्च_अप्रिल_मई_जुन_जुलाई_अगष्ट_सेप्टेम्बर_अक्टोबर_नोभेम्बर_डिसेम्बर".split("_"),monthsShort:"जन._फेब्रु._मार्च_अप्रि._मई_जुन_जुलाई._अग._सेप्ट._अक्टो._नोभे._डिसे.".split("_"),weekdays:"आइतबार_सोमबार_मङ्गलबार_बुधबार_बिहिबार_शुक्रबार_शनिबार".split("_"),weekdaysShort:"आइत._सोम._मङ्गल._बुध._बिहि._शुक्र._शनि.".split("_"),weekdaysMin:"आ._सो._मं._बु._बि._शु._श.".split("_"),longDateFormat:{LT:"Aको h:mm बजे",LTS:"Aको h:mm:ss बजे",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, Aको h:mm बजे",LLLL:"dddd, D MMMM YYYY, Aको h:mm बजे"},preparse:function(a){return a.replace(/[१२३४५६७८९०]/g,function(a){return zg[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return yg[a]})},meridiemParse:/राति|बिहान|दिउँसो|साँझ/,meridiemHour:function(a,b){return 12===a&&(a=0),"राति"===b?4>a?a:a+12:"बिहान"===b?a:"दिउँसो"===b?a>=10?a:a+12:"साँझ"===b?a+12:void 0},meridiem:function(a,b,c){return 3>a?"राति":12>a?"बिहान":16>a?"दिउँसो":20>a?"साँझ":"राति"},calendar:{sameDay:"[आज] LT",nextDay:"[भोलि] LT",nextWeek:"[आउँदो] dddd[,] LT",lastDay:"[हिजो] LT",lastWeek:"[गएको] dddd[,] LT",sameElse:"L"},relativeTime:{future:"%sमा",past:"%s अगाडि",s:"केही क्षण",m:"एक मिनेट",mm:"%d मिनेट",h:"एक घण्टा",hh:"%d घण्टा",d:"एक दिन",dd:"%d दिन",M:"एक महिना",MM:"%d महिना",y:"एक बर्ष",yy:"%d बर्ष"},week:{dow:0,doy:6}}),"jan._feb._mrt._apr._mei_jun._jul._aug._sep._okt._nov._dec.".split("_")),Bg="jan_feb_mrt_apr_mei_jun_jul_aug_sep_okt_nov_dec".split("_"),Cg=(Kf.defineLocale("nl",{months:"januari_februari_maart_april_mei_juni_juli_augustus_september_oktober_november_december".split("_"),monthsShort:function(a,b){return/-MMM-/.test(b)?Bg[a.month()]:Ag[a.month()]},weekdays:"zondag_maandag_dinsdag_woensdag_donderdag_vrijdag_zaterdag".split("_"),weekdaysShort:"zo._ma._di._wo._do._vr._za.".split("_"),weekdaysMin:"Zo_Ma_Di_Wo_Do_Vr_Za".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD-MM-YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[vandaag om] LT",nextDay:"[morgen om] LT",nextWeek:"dddd [om] LT",lastDay:"[gisteren om] LT",lastWeek:"[afgelopen] dddd [om] LT",sameElse:"L"},relativeTime:{future:"over %s",past:"%s geleden",s:"een paar seconden",m:"één minuut",mm:"%d minuten",h:"één uur",hh:"%d uur",d:"één dag",dd:"%d dagen",M:"één maand",MM:"%d maanden",y:"één jaar",yy:"%d jaar"},ordinalParse:/\d{1,2}(ste|de)/,ordinal:function(a){return a+(1===a||8===a||a>=20?"ste":"de")},week:{dow:1,doy:4}}),Kf.defineLocale("nn",{months:"januar_februar_mars_april_mai_juni_juli_august_september_oktober_november_desember".split("_"),monthsShort:"jan_feb_mar_apr_mai_jun_jul_aug_sep_okt_nov_des".split("_"),weekdays:"sundag_måndag_tysdag_onsdag_torsdag_fredag_laurdag".split("_"),weekdaysShort:"sun_mån_tys_ons_tor_fre_lau".split("_"),weekdaysMin:"su_må_ty_on_to_fr_lø".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY [kl.] H:mm",LLLL:"dddd D. MMMM YYYY [kl.] HH:mm"},calendar:{sameDay:"[I dag klokka] LT",nextDay:"[I morgon klokka] LT",nextWeek:"dddd [klokka] LT",lastDay:"[I går klokka] LT",lastWeek:"[Føregåande] dddd [klokka] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"for %s sidan",s:"nokre sekund",m:"eit minutt",mm:"%d minutt",h:"ein time",hh:"%d timar",d:"ein dag",dd:"%d dagar",M:"ein månad",MM:"%d månader",y:"eit år",yy:"%d år"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),"styczeń_luty_marzec_kwiecień_maj_czerwiec_lipiec_sierpień_wrzesień_październik_listopad_grudzień".split("_")),Dg="stycznia_lutego_marca_kwietnia_maja_czerwca_lipca_sierpnia_września_października_listopada_grudnia".split("_"),Eg=(Kf.defineLocale("pl",{months:function(a,b){return""===b?"("+Dg[a.month()]+"|"+Cg[a.month()]+")":/D MMMM/.test(b)?Dg[a.month()]:Cg[a.month()]},monthsShort:"sty_lut_mar_kwi_maj_cze_lip_sie_wrz_paź_lis_gru".split("_"),weekdays:"niedziela_poniedziałek_wtorek_środa_czwartek_piątek_sobota".split("_"),weekdaysShort:"nie_pon_wt_śr_czw_pt_sb".split("_"),weekdaysMin:"Nd_Pn_Wt_Śr_Cz_Pt_So".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Dziś o] LT",nextDay:"[Jutro o] LT",nextWeek:"[W] dddd [o] LT",lastDay:"[Wczoraj o] LT",lastWeek:function(){switch(this.day()){case 0:return"[W zeszłą niedzielę o] LT";case 3:return"[W zeszłą środę o] LT";case 6:return"[W zeszłą sobotę o] LT";default:return"[W zeszły] dddd [o] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"%s temu",s:"kilka sekund",m:Bd,mm:Bd,h:Bd,hh:Bd,d:"1 dzień",dd:"%d dni",M:"miesiąc",MM:Bd,y:"rok",yy:Bd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("pt-br",{months:"Janeiro_Fevereiro_Março_Abril_Maio_Junho_Julho_Agosto_Setembro_Outubro_Novembro_Dezembro".split("_"),monthsShort:"Jan_Fev_Mar_Abr_Mai_Jun_Jul_Ago_Set_Out_Nov_Dez".split("_"),weekdays:"Domingo_Segunda-Feira_Terça-Feira_Quarta-Feira_Quinta-Feira_Sexta-Feira_Sábado".split("_"),weekdaysShort:"Dom_Seg_Ter_Qua_Qui_Sex_Sáb".split("_"),weekdaysMin:"Dom_2ª_3ª_4ª_5ª_6ª_Sáb".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY [às] HH:mm",LLLL:"dddd, D [de] MMMM [de] YYYY [às] HH:mm"},calendar:{sameDay:"[Hoje às] LT",nextDay:"[Amanhã às] LT",nextWeek:"dddd [às] LT",lastDay:"[Ontem às] LT",lastWeek:function(){return 0===this.day()||6===this.day()?"[Último] dddd [às] LT":"[Última] dddd [às] LT"},sameElse:"L"},relativeTime:{future:"em %s",past:"%s atrás",s:"poucos segundos",m:"um minuto",mm:"%d minutos",h:"uma hora",hh:"%d horas",d:"um dia",dd:"%d dias",M:"um mês",MM:"%d meses",y:"um ano",yy:"%d anos"},ordinalParse:/\d{1,2}º/,ordinal:"%dº"}),Kf.defineLocale("pt",{months:"Janeiro_Fevereiro_Março_Abril_Maio_Junho_Julho_Agosto_Setembro_Outubro_Novembro_Dezembro".split("_"),monthsShort:"Jan_Fev_Mar_Abr_Mai_Jun_Jul_Ago_Set_Out_Nov_Dez".split("_"),weekdays:"Domingo_Segunda-Feira_Terça-Feira_Quarta-Feira_Quinta-Feira_Sexta-Feira_Sábado".split("_"),weekdaysShort:"Dom_Seg_Ter_Qua_Qui_Sex_Sáb".split("_"),weekdaysMin:"Dom_2ª_3ª_4ª_5ª_6ª_Sáb".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D [de] MMMM [de] YYYY",LLL:"D [de] MMMM [de] YYYY HH:mm",LLLL:"dddd, D [de] MMMM [de] YYYY HH:mm"},calendar:{sameDay:"[Hoje às] LT",nextDay:"[Amanhã às] LT",nextWeek:"dddd [às] LT",lastDay:"[Ontem às] LT",lastWeek:function(){return 0===this.day()||6===this.day()?"[Último] dddd [às] LT":"[Última] dddd [às] LT"},sameElse:"L"},relativeTime:{future:"em %s",past:"há %s",s:"segundos",m:"um minuto",mm:"%d minutos",h:"uma hora",hh:"%d horas",d:"um dia",dd:"%d dias",M:"um mês",MM:"%d meses",y:"um ano",yy:"%d anos"},ordinalParse:/\d{1,2}º/,ordinal:"%dº",week:{dow:1,doy:4}}),Kf.defineLocale("ro",{months:"ianuarie_februarie_martie_aprilie_mai_iunie_iulie_august_septembrie_octombrie_noiembrie_decembrie".split("_"),monthsShort:"ian._febr._mart._apr._mai_iun._iul._aug._sept._oct._nov._dec.".split("_"),weekdays:"duminică_luni_marți_miercuri_joi_vineri_sâmbătă".split("_"),weekdaysShort:"Dum_Lun_Mar_Mie_Joi_Vin_Sâm".split("_"),weekdaysMin:"Du_Lu_Ma_Mi_Jo_Vi_Sâ".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY H:mm",LLLL:"dddd, D MMMM YYYY H:mm"},calendar:{sameDay:"[azi la] LT",nextDay:"[mâine la] LT",nextWeek:"dddd [la] LT",lastDay:"[ieri la] LT",lastWeek:"[fosta] dddd [la] LT",sameElse:"L"},relativeTime:{future:"peste %s",past:"%s în urmă",s:"câteva secunde",m:"un minut",mm:Cd,h:"o oră",hh:Cd,d:"o zi",dd:Cd,M:"o lună",MM:Cd,y:"un an",yy:Cd},week:{dow:1,doy:7}}),[/^янв/i,/^фев/i,/^мар/i,/^апр/i,/^ма[й|я]/i,/^июн/i,/^июл/i,/^авг/i,/^сен/i,/^окт/i,/^ноя/i,/^дек/i]),Fg=(Kf.defineLocale("ru",{months:{format:"Января_Февраля_Марта_Апреля_Мая_Июня_Июля_Августа_Сентября_Октября_Ноября_Декабря".split("_"),standalone:"Январь_Февраль_Март_Апрель_Май_Июнь_Июль_Август_Сентябрь_Октябрь_Ноябрь_Декабрь".split("_")},monthsShort:{format:"янв_фев_мар_апр_мая_июня_июля_авг_сен_окт_ноя_дек".split("_"),standalone:"янв_фев_март_апр_май_июнь_июль_авг_сен_окт_ноя_дек".split("_")},weekdays:{standalone:"Воскресенье_Понедельник_Вторник_Среда_Четверг_Пятница_Суббота".split("_"),format:"Воскресенье_Понедельник_Вторник_Среду_Четверг_Пятницу_Субботу".split("_"),isFormat:/\[ ?[Вв] ?(?:прошлую|следующую|эту)? ?\] ?dddd/},weekdaysShort:"Вс_Пн_Вт_Ср_Чт_Пт_Сб".split("_"),weekdaysMin:"Вс_Пн_Вт_Ср_Чт_Пт_Сб".split("_"),monthsParse:Eg,longMonthsParse:Eg,shortMonthsParse:Eg,longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY г.",LLL:"D MMMM YYYY г., HH:mm",LLLL:"dddd, D MMMM YYYY г., HH:mm"},calendar:{sameDay:"[Сегодня в] LT",nextDay:"[Завтра в] LT",lastDay:"[Вчера в] LT",nextWeek:function(a){if(a.week()===this.week())return 2===this.day()?"[Во] dddd [в] LT":"[В] dddd [в] LT";switch(this.day()){case 0:return"[В следующее] dddd [в] LT";case 1:case 2:case 4:return"[В следующий] dddd [в] LT";case 3:case 5:case 6:return"[В следующую] dddd [в] LT"}},lastWeek:function(a){if(a.week()===this.week())return 2===this.day()?"[Во] dddd [в] LT":"[В] dddd [в] LT";switch(this.day()){case 0:return"[В прошлое] dddd [в] LT";case 1:case 2:case 4:return"[В прошлый] dddd [в] LT";case 3:case 5:case 6:return"[В прошлую] dddd [в] LT"}},sameElse:"L"},relativeTime:{future:"через %s",past:"%s назад",s:"несколько секунд",m:Ed,mm:Ed,h:"час",hh:Ed,d:"день",dd:Ed,M:"месяц",MM:Ed,y:"год",yy:Ed},meridiemParse:/ночи|утра|дня|вечера/i,isPM:function(a){return/^(дня|вечера)$/.test(a)},meridiem:function(a,b,c){return 4>a?"ночи":12>a?"утра":17>a?"дня":"вечера"},ordinalParse:/\d{1,2}-(й|го|я)/,ordinal:function(a,b){switch(b){case"M":case"d":case"DDD":return a+"-й";case"D":return a+"-го";case"w":case"W":return a+"-я";default:return a}},week:{dow:1,doy:7}}),Kf.defineLocale("se",{months:"ođđajagemánnu_guovvamánnu_njukčamánnu_cuoŋománnu_miessemánnu_geassemánnu_suoidnemánnu_borgemánnu_čakčamánnu_golggotmánnu_skábmamánnu_juovlamánnu".split("_"),monthsShort:"ođđj_guov_njuk_cuo_mies_geas_suoi_borg_čakč_golg_skáb_juov".split("_"),weekdays:"sotnabeaivi_vuossárga_maŋŋebárga_gaskavahkku_duorastat_bearjadat_lávvardat".split("_"),weekdaysShort:"sotn_vuos_maŋ_gask_duor_bear_láv".split("_"),weekdaysMin:"s_v_m_g_d_b_L".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"MMMM D. [b.] YYYY",LLL:"MMMM D. [b.] YYYY [ti.] HH:mm",LLLL:"dddd, MMMM D. [b.] YYYY [ti.] HH:mm"},calendar:{sameDay:"[otne ti] LT",nextDay:"[ihttin ti] LT",nextWeek:"dddd [ti] LT",lastDay:"[ikte ti] LT",lastWeek:"[ovddit] dddd [ti] LT",sameElse:"L"},relativeTime:{future:"%s geažes",past:"maŋit %s",s:"moadde sekunddat",m:"okta minuhta",mm:"%d minuhtat",h:"okta diimmu",hh:"%d diimmut",d:"okta beaivi",dd:"%d beaivvit",M:"okta mánnu",MM:"%d mánut",y:"okta jahki",yy:"%d jagit"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("si",{months:"ජනවාරි_පෙබරවාරි_මාර්තු_අප්‍රේල්_මැයි_ජූනි_ජූලි_අගෝස්තු_සැප්තැම්බර්_ඔක්තෝබර්_නොවැම්බර්_දෙසැම්බර්".split("_"),monthsShort:"ජන_පෙබ_මාර්_අප්_මැයි_ජූනි_ජූලි_අගෝ_සැප්_ඔක්_නොවැ_දෙසැ".split("_"),weekdays:"ඉරිදා_සඳුදා_අඟහරුවාදා_බදාදා_බ්‍රහස්පතින්දා_සිකුරාදා_සෙනසුරාදා".split("_"),weekdaysShort:"ඉරි_සඳු_අඟ_බදා_බ්‍රහ_සිකු_සෙන".split("_"),weekdaysMin:"ඉ_ස_අ_බ_බ්‍ර_සි_සෙ".split("_"),longDateFormat:{LT:"a h:mm",LTS:"a h:mm:ss",L:"YYYY/MM/DD",LL:"YYYY MMMM D",LLL:"YYYY MMMM D, a h:mm",LLLL:"YYYY MMMM D [වැනි] dddd, a h:mm:ss"},calendar:{sameDay:"[අද] LT[ට]",nextDay:"[හෙට] LT[ට]",nextWeek:"dddd LT[ට]",lastDay:"[ඊයේ] LT[ට]",lastWeek:"[පසුගිය] dddd LT[ට]",sameElse:"L"},relativeTime:{future:"%sකින්",past:"%sකට පෙර",s:"තත්පර කිහිපය",m:"මිනිත්තුව",mm:"මිනිත්තු %d",h:"පැය",hh:"පැය %d",d:"දිනය",dd:"දින %d",M:"මාසය",MM:"මාස %d",y:"වසර",yy:"වසර %d"},ordinalParse:/\d{1,2} වැනි/,ordinal:function(a){return a+" වැනි"},meridiem:function(a,b,c){return a>11?c?"ප.ව.":"පස් වරු":c?"පෙ.ව.":"පෙර වරු"}}),"január_február_marec_apríl_máj_jún_júl_august_september_október_november_december".split("_")),Gg="jan_feb_mar_apr_máj_jún_júl_aug_sep_okt_nov_dec".split("_"),Hg=(Kf.defineLocale("sk",{months:Fg,monthsShort:Gg,weekdays:"nedeľa_pondelok_utorok_streda_štvrtok_piatok_sobota".split("_"),weekdaysShort:"ne_po_ut_st_št_pi_so".split("_"),weekdaysMin:"ne_po_ut_st_št_pi_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD.MM.YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd D. MMMM YYYY H:mm"},calendar:{sameDay:"[dnes o] LT",nextDay:"[zajtra o] LT",nextWeek:function(){switch(this.day()){case 0:return"[v nedeľu o] LT";case 1:case 2:return"[v] dddd [o] LT";case 3:return"[v stredu o] LT";case 4:return"[vo štvrtok o] LT";case 5:return"[v piatok o] LT";case 6:return"[v sobotu o] LT"}},lastDay:"[včera o] LT",lastWeek:function(){switch(this.day()){case 0:return"[minulú nedeľu o] LT";case 1:case 2:return"[minulý] dddd [o] LT";case 3:return"[minulú stredu o] LT";case 4:case 5:return"[minulý] dddd [o] LT";case 6:return"[minulú sobotu o] LT"}},sameElse:"L"},relativeTime:{future:"za %s",past:"pred %s",s:Gd,m:Gd,mm:Gd,h:Gd,hh:Gd,d:Gd,dd:Gd,M:Gd,MM:Gd,y:Gd,yy:Gd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("sl",{months:"januar_februar_marec_april_maj_junij_julij_avgust_september_oktober_november_december".split("_"),monthsShort:"jan._feb._mar._apr._maj._jun._jul._avg._sep._okt._nov._dec.".split("_"),weekdays:"nedelja_ponedeljek_torek_sreda_četrtek_petek_sobota".split("_"),weekdaysShort:"ned._pon._tor._sre._čet._pet._sob.".split("_"),weekdaysMin:"ne_po_to_sr_če_pe_so".split("_"),longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danes ob] LT",nextDay:"[jutri ob] LT",nextWeek:function(){switch(this.day()){case 0:return"[v] [nedeljo] [ob] LT";case 3:return"[v] [sredo] [ob] LT";case 6:return"[v] [soboto] [ob] LT";case 1:case 2:case 4:case 5:return"[v] dddd [ob] LT"}},lastDay:"[včeraj ob] LT",lastWeek:function(){switch(this.day()){case 0:return"[prejšnjo] [nedeljo] [ob] LT";case 3:return"[prejšnjo] [sredo] [ob] LT";case 6:return"[prejšnjo] [soboto] [ob] LT";case 1:case 2:case 4:case 5:return"[prejšnji] dddd [ob] LT"}},sameElse:"L"},relativeTime:{future:"čez %s",past:"pred %s",s:Hd,m:Hd,mm:Hd,h:Hd,hh:Hd,d:Hd,dd:Hd,M:Hd,MM:Hd,y:Hd,yy:Hd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),Kf.defineLocale("sq",{months:"Janar_Shkurt_Mars_Prill_Maj_Qershor_Korrik_Gusht_Shtator_Tetor_Nëntor_Dhjetor".split("_"),monthsShort:"Jan_Shk_Mar_Pri_Maj_Qer_Kor_Gus_Sht_Tet_Nën_Dhj".split("_"),weekdays:"E Diel_E Hënë_E Martë_E Mërkurë_E Enjte_E Premte_E Shtunë".split("_"),weekdaysShort:"Die_Hën_Mar_Mër_Enj_Pre_Sht".split("_"),weekdaysMin:"D_H_Ma_Më_E_P_Sh".split("_"),meridiemParse:/PD|MD/,isPM:function(a){return"M"===a.charAt(0)},meridiem:function(a,b,c){return 12>a?"PD":"MD"},longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[Sot në] LT",nextDay:"[Nesër në] LT",nextWeek:"dddd [në] LT",lastDay:"[Dje në] LT",lastWeek:"dddd [e kaluar në] LT",sameElse:"L"},relativeTime:{future:"në %s",past:"%s më parë",s:"disa sekonda",m:"një minutë",mm:"%d minuta",h:"një orë",hh:"%d orë",d:"një ditë",dd:"%d ditë",M:"një muaj",MM:"%d muaj",y:"një vit",yy:"%d vite"},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),{words:{m:["један минут","једне минуте"],mm:["минут","минуте","минута"],h:["један сат","једног сата"],hh:["сат","сата","сати"],dd:["дан","дана","дана"],MM:["месец","месеца","месеци"],yy:["година","године","година"]},correctGrammaticalCase:function(a,b){return 1===a?b[0]:a>=2&&4>=a?b[1]:b[2]},translate:function(a,b,c){var d=Hg.words[c];return 1===c.length?b?d[0]:d[1]:a+" "+Hg.correctGrammaticalCase(a,d)}}),Ig=(Kf.defineLocale("sr-cyrl",{months:["јануар","фебруар","март","април","мај","јун","јул","август","септембар","октобар","новембар","децембар"],monthsShort:["јан.","феб.","мар.","апр.","мај","јун","јул","авг.","сеп.","окт.","нов.","дец."],weekdays:["недеља","понедељак","уторак","среда","четвртак","петак","субота"],weekdaysShort:["нед.","пон.","уто.","сре.","чет.","пет.","суб."],weekdaysMin:["не","по","ут","ср","че","пе","су"],longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[данас у] LT",nextDay:"[сутра у] LT",nextWeek:function(){switch(this.day()){case 0:return"[у] [недељу] [у] LT";case 3:return"[у] [среду] [у] LT";case 6:return"[у] [суботу] [у] LT";case 1:case 2:case 4:case 5:return"[у] dddd [у] LT"}},lastDay:"[јуче у] LT",lastWeek:function(){var a=["[прошле] [недеље] [у] LT","[прошлог] [понедељка] [у] LT","[прошлог] [уторка] [у] LT","[прошле] [среде] [у] LT","[прошлог] [четвртка] [у] LT","[прошлог] [петка] [у] LT","[прошле] [суботе] [у] LT"];return a[this.day()]},sameElse:"L"},relativeTime:{future:"за %s",past:"пре %s",s:"неколико секунди",m:Hg.translate,mm:Hg.translate,h:Hg.translate,hh:Hg.translate,d:"дан",dd:Hg.translate,M:"месец",MM:Hg.translate,y:"годину",yy:Hg.translate},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),{words:{m:["jedan minut","jedne minute"],mm:["minut","minute","minuta"],h:["jedan sat","jednog sata"],hh:["sat","sata","sati"],dd:["dan","dana","dana"],MM:["mesec","meseca","meseci"],yy:["godina","godine","godina"]},correctGrammaticalCase:function(a,b){return 1===a?b[0]:a>=2&&4>=a?b[1]:b[2]},translate:function(a,b,c){var d=Ig.words[c];return 1===c.length?b?d[0]:d[1]:a+" "+Ig.correctGrammaticalCase(a,d)}}),Jg=(Kf.defineLocale("sr",{months:["januar","februar","mart","april","maj","jun","jul","avgust","septembar","oktobar","novembar","decembar"],monthsShort:["jan.","feb.","mar.","apr.","maj","jun","jul","avg.","sep.","okt.","nov.","dec."],weekdays:["nedelja","ponedeljak","utorak","sreda","četvrtak","petak","subota"],weekdaysShort:["ned.","pon.","uto.","sre.","čet.","pet.","sub."],weekdaysMin:["ne","po","ut","sr","če","pe","su"],longDateFormat:{LT:"H:mm",LTS:"H:mm:ss",L:"DD. MM. YYYY",LL:"D. MMMM YYYY",LLL:"D. MMMM YYYY H:mm",LLLL:"dddd, D. MMMM YYYY H:mm"},calendar:{sameDay:"[danas u] LT",nextDay:"[sutra u] LT",nextWeek:function(){switch(this.day()){case 0:return"[u] [nedelju] [u] LT";case 3:return"[u] [sredu] [u] LT";case 6:return"[u] [subotu] [u] LT";case 1:case 2:case 4:case 5:return"[u] dddd [u] LT"}},lastDay:"[juče u] LT",lastWeek:function(){var a=["[prošle] [nedelje] [u] LT","[prošlog] [ponedeljka] [u] LT","[prošlog] [utorka] [u] LT","[prošle] [srede] [u] LT","[prošlog] [četvrtka] [u] LT","[prošlog] [petka] [u] LT","[prošle] [subote] [u] LT"];return a[this.day()]},sameElse:"L"},relativeTime:{future:"za %s",past:"pre %s",s:"nekoliko sekundi",m:Ig.translate,mm:Ig.translate,h:Ig.translate,hh:Ig.translate,d:"dan",dd:Ig.translate,M:"mesec",MM:Ig.translate,y:"godinu",yy:Ig.translate +},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:7}}),Kf.defineLocale("sv",{months:"januari_februari_mars_april_maj_juni_juli_augusti_september_oktober_november_december".split("_"),monthsShort:"jan_feb_mar_apr_maj_jun_jul_aug_sep_okt_nov_dec".split("_"),weekdays:"söndag_måndag_tisdag_onsdag_torsdag_fredag_lördag".split("_"),weekdaysShort:"sön_mån_tis_ons_tor_fre_lör".split("_"),weekdaysMin:"sö_må_ti_on_to_fr_lö".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"YYYY-MM-DD",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[Idag] LT",nextDay:"[Imorgon] LT",lastDay:"[Igår] LT",nextWeek:"[På] dddd LT",lastWeek:"[I] dddd[s] LT",sameElse:"L"},relativeTime:{future:"om %s",past:"för %s sedan",s:"några sekunder",m:"en minut",mm:"%d minuter",h:"en timme",hh:"%d timmar",d:"en dag",dd:"%d dagar",M:"en månad",MM:"%d månader",y:"ett år",yy:"%d år"},ordinalParse:/\d{1,2}(e|a)/,ordinal:function(a){var b=a%10,c=1===~~(a%100/10)?"e":1===b?"a":2===b?"a":"e";return a+c},week:{dow:1,doy:4}}),Kf.defineLocale("sw",{months:"Januari_Februari_Machi_Aprili_Mei_Juni_Julai_Agosti_Septemba_Oktoba_Novemba_Desemba".split("_"),monthsShort:"Jan_Feb_Mac_Apr_Mei_Jun_Jul_Ago_Sep_Okt_Nov_Des".split("_"),weekdays:"Jumapili_Jumatatu_Jumanne_Jumatano_Alhamisi_Ijumaa_Jumamosi".split("_"),weekdaysShort:"Jpl_Jtat_Jnne_Jtan_Alh_Ijm_Jmos".split("_"),weekdaysMin:"J2_J3_J4_J5_Al_Ij_J1".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[leo saa] LT",nextDay:"[kesho saa] LT",nextWeek:"[wiki ijayo] dddd [saat] LT",lastDay:"[jana] LT",lastWeek:"[wiki iliyopita] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s baadaye",past:"tokea %s",s:"hivi punde",m:"dakika moja",mm:"dakika %d",h:"saa limoja",hh:"masaa %d",d:"siku moja",dd:"masiku %d",M:"mwezi mmoja",MM:"miezi %d",y:"mwaka mmoja",yy:"miaka %d"},week:{dow:1,doy:7}}),{1:"௧",2:"௨",3:"௩",4:"௪",5:"௫",6:"௬",7:"௭",8:"௮",9:"௯",0:"௦"}),Kg={"௧":"1","௨":"2","௩":"3","௪":"4","௫":"5","௬":"6","௭":"7","௮":"8","௯":"9","௦":"0"},Lg=(Kf.defineLocale("ta",{months:"ஜனவரி_பிப்ரவரி_மார்ச்_ஏப்ரல்_மே_ஜூன்_ஜூலை_ஆகஸ்ட்_செப்டெம்பர்_அக்டோபர்_நவம்பர்_டிசம்பர்".split("_"),monthsShort:"ஜனவரி_பிப்ரவரி_மார்ச்_ஏப்ரல்_மே_ஜூன்_ஜூலை_ஆகஸ்ட்_செப்டெம்பர்_அக்டோபர்_நவம்பர்_டிசம்பர்".split("_"),weekdays:"ஞாயிற்றுக்கிழமை_திங்கட்கிழமை_செவ்வாய்கிழமை_புதன்கிழமை_வியாழக்கிழமை_வெள்ளிக்கிழமை_சனிக்கிழமை".split("_"),weekdaysShort:"ஞாயிறு_திங்கள்_செவ்வாய்_புதன்_வியாழன்_வெள்ளி_சனி".split("_"),weekdaysMin:"ஞா_தி_செ_பு_வி_வெ_ச".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, HH:mm",LLLL:"dddd, D MMMM YYYY, HH:mm"},calendar:{sameDay:"[இன்று] LT",nextDay:"[நாளை] LT",nextWeek:"dddd, LT",lastDay:"[நேற்று] LT",lastWeek:"[கடந்த வாரம்] dddd, LT",sameElse:"L"},relativeTime:{future:"%s இல்",past:"%s முன்",s:"ஒரு சில விநாடிகள்",m:"ஒரு நிமிடம்",mm:"%d நிமிடங்கள்",h:"ஒரு மணி நேரம்",hh:"%d மணி நேரம்",d:"ஒரு நாள்",dd:"%d நாட்கள்",M:"ஒரு மாதம்",MM:"%d மாதங்கள்",y:"ஒரு வருடம்",yy:"%d ஆண்டுகள்"},ordinalParse:/\d{1,2}வது/,ordinal:function(a){return a+"வது"},preparse:function(a){return a.replace(/[௧௨௩௪௫௬௭௮௯௦]/g,function(a){return Kg[a]})},postformat:function(a){return a.replace(/\d/g,function(a){return Jg[a]})},meridiemParse:/யாமம்|வைகறை|காலை|நண்பகல்|எற்பாடு|மாலை/,meridiem:function(a,b,c){return 2>a?" யாமம்":6>a?" வைகறை":10>a?" காலை":14>a?" நண்பகல்":18>a?" எற்பாடு":22>a?" மாலை":" யாமம்"},meridiemHour:function(a,b){return 12===a&&(a=0),"யாமம்"===b?2>a?a:a+12:"வைகறை"===b||"காலை"===b?a:"நண்பகல்"===b&&a>=10?a:a+12},week:{dow:0,doy:6}}),Kf.defineLocale("te",{months:"జనవరి_ఫిబ్రవరి_మార్చి_ఏప్రిల్_మే_జూన్_జూలై_ఆగస్టు_సెప్టెంబర్_అక్టోబర్_నవంబర్_డిసెంబర్".split("_"),monthsShort:"జన._ఫిబ్ర._మార్చి_ఏప్రి._మే_జూన్_జూలై_ఆగ._సెప్._అక్టో._నవ._డిసె.".split("_"),weekdays:"ఆదివారం_సోమవారం_మంగళవారం_బుధవారం_గురువారం_శుక్రవారం_శనివారం".split("_"),weekdaysShort:"ఆది_సోమ_మంగళ_బుధ_గురు_శుక్ర_శని".split("_"),weekdaysMin:"ఆ_సో_మం_బు_గు_శు_శ".split("_"),longDateFormat:{LT:"A h:mm",LTS:"A h:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY, A h:mm",LLLL:"dddd, D MMMM YYYY, A h:mm"},calendar:{sameDay:"[నేడు] LT",nextDay:"[రేపు] LT",nextWeek:"dddd, LT",lastDay:"[నిన్న] LT",lastWeek:"[గత] dddd, LT",sameElse:"L"},relativeTime:{future:"%s లో",past:"%s క్రితం",s:"కొన్ని క్షణాలు",m:"ఒక నిమిషం",mm:"%d నిమిషాలు",h:"ఒక గంట",hh:"%d గంటలు",d:"ఒక రోజు",dd:"%d రోజులు",M:"ఒక నెల",MM:"%d నెలలు",y:"ఒక సంవత్సరం",yy:"%d సంవత్సరాలు"},ordinalParse:/\d{1,2}వ/,ordinal:"%dవ",meridiemParse:/రాత్రి|ఉదయం|మధ్యాహ్నం|సాయంత్రం/,meridiemHour:function(a,b){return 12===a&&(a=0),"రాత్రి"===b?4>a?a:a+12:"ఉదయం"===b?a:"మధ్యాహ్నం"===b?a>=10?a:a+12:"సాయంత్రం"===b?a+12:void 0},meridiem:function(a,b,c){return 4>a?"రాత్రి":10>a?"ఉదయం":17>a?"మధ్యాహ్నం":20>a?"సాయంత్రం":"రాత్రి"},week:{dow:0,doy:6}}),Kf.defineLocale("th",{months:"มกราคม_กุมภาพันธ์_มีนาคม_เมษายน_พฤษภาคม_มิถุนายน_กรกฎาคม_สิงหาคม_กันยายน_ตุลาคม_พฤศจิกายน_ธันวาคม".split("_"),monthsShort:"มกรา_กุมภา_มีนา_เมษา_พฤษภา_มิถุนา_กรกฎา_สิงหา_กันยา_ตุลา_พฤศจิกา_ธันวา".split("_"),weekdays:"อาทิตย์_จันทร์_อังคาร_พุธ_พฤหัสบดี_ศุกร์_เสาร์".split("_"),weekdaysShort:"อาทิตย์_จันทร์_อังคาร_พุธ_พฤหัส_ศุกร์_เสาร์".split("_"),weekdaysMin:"อา._จ._อ._พ._พฤ._ศ._ส.".split("_"),longDateFormat:{LT:"H นาฬิกา m นาที",LTS:"H นาฬิกา m นาที s วินาที",L:"YYYY/MM/DD",LL:"D MMMM YYYY",LLL:"D MMMM YYYY เวลา H นาฬิกา m นาที",LLLL:"วันddddที่ D MMMM YYYY เวลา H นาฬิกา m นาที"},meridiemParse:/ก่อนเที่ยง|หลังเที่ยง/,isPM:function(a){return"หลังเที่ยง"===a},meridiem:function(a,b,c){return 12>a?"ก่อนเที่ยง":"หลังเที่ยง"},calendar:{sameDay:"[วันนี้ เวลา] LT",nextDay:"[พรุ่งนี้ เวลา] LT",nextWeek:"dddd[หน้า เวลา] LT",lastDay:"[เมื่อวานนี้ เวลา] LT",lastWeek:"[วัน]dddd[ที่แล้ว เวลา] LT",sameElse:"L"},relativeTime:{future:"อีก %s",past:"%sที่แล้ว",s:"ไม่กี่วินาที",m:"1 นาที",mm:"%d นาที",h:"1 ชั่วโมง",hh:"%d ชั่วโมง",d:"1 วัน",dd:"%d วัน",M:"1 เดือน",MM:"%d เดือน",y:"1 ปี",yy:"%d ปี"}}),Kf.defineLocale("tl-ph",{months:"Enero_Pebrero_Marso_Abril_Mayo_Hunyo_Hulyo_Agosto_Setyembre_Oktubre_Nobyembre_Disyembre".split("_"),monthsShort:"Ene_Peb_Mar_Abr_May_Hun_Hul_Ago_Set_Okt_Nob_Dis".split("_"),weekdays:"Linggo_Lunes_Martes_Miyerkules_Huwebes_Biyernes_Sabado".split("_"),weekdaysShort:"Lin_Lun_Mar_Miy_Huw_Biy_Sab".split("_"),weekdaysMin:"Li_Lu_Ma_Mi_Hu_Bi_Sab".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"MM/D/YYYY",LL:"MMMM D, YYYY",LLL:"MMMM D, YYYY HH:mm",LLLL:"dddd, MMMM DD, YYYY HH:mm"},calendar:{sameDay:"[Ngayon sa] LT",nextDay:"[Bukas sa] LT",nextWeek:"dddd [sa] LT",lastDay:"[Kahapon sa] LT",lastWeek:"dddd [huling linggo] LT",sameElse:"L"},relativeTime:{future:"sa loob ng %s",past:"%s ang nakalipas",s:"ilang segundo",m:"isang minuto",mm:"%d minuto",h:"isang oras",hh:"%d oras",d:"isang araw",dd:"%d araw",M:"isang buwan",MM:"%d buwan",y:"isang taon",yy:"%d taon"},ordinalParse:/\d{1,2}/,ordinal:function(a){return a},week:{dow:1,doy:4}}),"pagh_wa’_cha’_wej_loS_vagh_jav_Soch_chorgh_Hut".split("_")),Mg=(Kf.defineLocale("tlh",{months:"tera’ jar wa’_tera’ jar cha’_tera’ jar wej_tera’ jar loS_tera’ jar vagh_tera’ jar jav_tera’ jar Soch_tera’ jar chorgh_tera’ jar Hut_tera’ jar wa’maH_tera’ jar wa’maH wa’_tera’ jar wa’maH cha’".split("_"),monthsShort:"jar wa’_jar cha’_jar wej_jar loS_jar vagh_jar jav_jar Soch_jar chorgh_jar Hut_jar wa’maH_jar wa’maH wa’_jar wa’maH cha’".split("_"),weekdays:"lojmItjaj_DaSjaj_povjaj_ghItlhjaj_loghjaj_buqjaj_ghInjaj".split("_"),weekdaysShort:"lojmItjaj_DaSjaj_povjaj_ghItlhjaj_loghjaj_buqjaj_ghInjaj".split("_"),weekdaysMin:"lojmItjaj_DaSjaj_povjaj_ghItlhjaj_loghjaj_buqjaj_ghInjaj".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[DaHjaj] LT",nextDay:"[wa’leS] LT",nextWeek:"LLL",lastDay:"[wa’Hu’] LT",lastWeek:"LLL",sameElse:"L"},relativeTime:{future:Id,past:Jd,s:"puS lup",m:"wa’ tup",mm:Kd,h:"wa’ rep",hh:Kd,d:"wa’ jaj",dd:Kd,M:"wa’ jar",MM:Kd,y:"wa’ DIS",yy:Kd},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),{1:"'inci",5:"'inci",8:"'inci",70:"'inci",80:"'inci",2:"'nci",7:"'nci",20:"'nci",50:"'nci",3:"'üncü",4:"'üncü",100:"'üncü",6:"'ncı",9:"'uncu",10:"'uncu",30:"'uncu",60:"'ıncı",90:"'ıncı"}),Ng=(Kf.defineLocale("tr",{months:"Ocak_Şubat_Mart_Nisan_Mayıs_Haziran_Temmuz_Ağustos_Eylül_Ekim_Kasım_Aralık".split("_"),monthsShort:"Oca_Şub_Mar_Nis_May_Haz_Tem_Ağu_Eyl_Eki_Kas_Ara".split("_"),weekdays:"Pazar_Pazartesi_Salı_Çarşamba_Perşembe_Cuma_Cumartesi".split("_"),weekdaysShort:"Paz_Pts_Sal_Çar_Per_Cum_Cts".split("_"),weekdaysMin:"Pz_Pt_Sa_Ça_Pe_Cu_Ct".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd, D MMMM YYYY HH:mm"},calendar:{sameDay:"[bugün saat] LT",nextDay:"[yarın saat] LT",nextWeek:"[haftaya] dddd [saat] LT",lastDay:"[dün] LT",lastWeek:"[geçen hafta] dddd [saat] LT",sameElse:"L"},relativeTime:{future:"%s sonra",past:"%s önce",s:"birkaç saniye",m:"bir dakika",mm:"%d dakika",h:"bir saat",hh:"%d saat",d:"bir gün",dd:"%d gün",M:"bir ay",MM:"%d ay",y:"bir yıl",yy:"%d yıl"},ordinalParse:/\d{1,2}'(inci|nci|üncü|ncı|uncu|ıncı)/,ordinal:function(a){if(0===a)return a+"'ıncı";var b=a%10,c=a%100-b,d=a>=100?100:null;return a+(Mg[b]||Mg[c]||Mg[d])},week:{dow:1,doy:7}}),Kf.defineLocale("tzl",{months:"Januar_Fevraglh_Març_Avrïu_Mai_Gün_Julia_Guscht_Setemvar_Listopäts_Noemvar_Zecemvar".split("_"),monthsShort:"Jan_Fev_Mar_Avr_Mai_Gün_Jul_Gus_Set_Lis_Noe_Zec".split("_"),weekdays:"Súladi_Lúneçi_Maitzi_Márcuri_Xhúadi_Viénerçi_Sáturi".split("_"),weekdaysShort:"Súl_Lún_Mai_Már_Xhú_Vié_Sát".split("_"),weekdaysMin:"Sú_Lú_Ma_Má_Xh_Vi_Sá".split("_"),longDateFormat:{LT:"HH.mm",LTS:"HH.mm.ss",L:"DD.MM.YYYY",LL:"D. MMMM [dallas] YYYY",LLL:"D. MMMM [dallas] YYYY HH.mm",LLLL:"dddd, [li] D. MMMM [dallas] YYYY HH.mm"},meridiem:function(a,b,c){return a>11?c?"d'o":"D'O":c?"d'a":"D'A"},calendar:{sameDay:"[oxhi à] LT",nextDay:"[demà à] LT",nextWeek:"dddd [à] LT",lastDay:"[ieiri à] LT",lastWeek:"[sür el] dddd [lasteu à] LT",sameElse:"L"},relativeTime:{future:"osprei %s",past:"ja%s",s:Md,m:Md,mm:Md,h:Md,hh:Md,d:Md,dd:Md,M:Md,MM:Md,y:Md,yy:Md},ordinalParse:/\d{1,2}\./,ordinal:"%d.",week:{dow:1,doy:4}}),Kf.defineLocale("tzm-latn",{months:"innayr_brˤayrˤ_marˤsˤ_ibrir_mayyw_ywnyw_ywlywz_ɣwšt_šwtanbir_ktˤwbrˤ_nwwanbir_dwjnbir".split("_"),monthsShort:"innayr_brˤayrˤ_marˤsˤ_ibrir_mayyw_ywnyw_ywlywz_ɣwšt_šwtanbir_ktˤwbrˤ_nwwanbir_dwjnbir".split("_"),weekdays:"asamas_aynas_asinas_akras_akwas_asimwas_asiḍyas".split("_"),weekdaysShort:"asamas_aynas_asinas_akras_akwas_asimwas_asiḍyas".split("_"),weekdaysMin:"asamas_aynas_asinas_akras_akwas_asimwas_asiḍyas".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[asdkh g] LT",nextDay:"[aska g] LT",nextWeek:"dddd [g] LT",lastDay:"[assant g] LT",lastWeek:"dddd [g] LT",sameElse:"L"},relativeTime:{future:"dadkh s yan %s",past:"yan %s",s:"imik",m:"minuḍ",mm:"%d minuḍ",h:"saɛa",hh:"%d tassaɛin",d:"ass",dd:"%d ossan",M:"ayowr",MM:"%d iyyirn",y:"asgas",yy:"%d isgasn"},week:{dow:6,doy:12}}),Kf.defineLocale("tzm",{months:"ⵉⵏⵏⴰⵢⵔ_ⴱⵕⴰⵢⵕ_ⵎⴰⵕⵚ_ⵉⴱⵔⵉⵔ_ⵎⴰⵢⵢⵓ_ⵢⵓⵏⵢⵓ_ⵢⵓⵍⵢⵓⵣ_ⵖⵓⵛⵜ_ⵛⵓⵜⴰⵏⴱⵉⵔ_ⴽⵟⵓⴱⵕ_ⵏⵓⵡⴰⵏⴱⵉⵔ_ⴷⵓⵊⵏⴱⵉⵔ".split("_"),monthsShort:"ⵉⵏⵏⴰⵢⵔ_ⴱⵕⴰⵢⵕ_ⵎⴰⵕⵚ_ⵉⴱⵔⵉⵔ_ⵎⴰⵢⵢⵓ_ⵢⵓⵏⵢⵓ_ⵢⵓⵍⵢⵓⵣ_ⵖⵓⵛⵜ_ⵛⵓⵜⴰⵏⴱⵉⵔ_ⴽⵟⵓⴱⵕ_ⵏⵓⵡⴰⵏⴱⵉⵔ_ⴷⵓⵊⵏⴱⵉⵔ".split("_"),weekdays:"ⴰⵙⴰⵎⴰⵙ_ⴰⵢⵏⴰⵙ_ⴰⵙⵉⵏⴰⵙ_ⴰⴽⵔⴰⵙ_ⴰⴽⵡⴰⵙ_ⴰⵙⵉⵎⵡⴰⵙ_ⴰⵙⵉⴹⵢⴰⵙ".split("_"),weekdaysShort:"ⴰⵙⴰⵎⴰⵙ_ⴰⵢⵏⴰⵙ_ⴰⵙⵉⵏⴰⵙ_ⴰⴽⵔⴰⵙ_ⴰⴽⵡⴰⵙ_ⴰⵙⵉⵎⵡⴰⵙ_ⴰⵙⵉⴹⵢⴰⵙ".split("_"),weekdaysMin:"ⴰⵙⴰⵎⴰⵙ_ⴰⵢⵏⴰⵙ_ⴰⵙⵉⵏⴰⵙ_ⴰⴽⵔⴰⵙ_ⴰⴽⵡⴰⵙ_ⴰⵙⵉⵎⵡⴰⵙ_ⴰⵙⵉⴹⵢⴰⵙ".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"dddd D MMMM YYYY HH:mm"},calendar:{sameDay:"[ⴰⵙⴷⵅ ⴴ] LT",nextDay:"[ⴰⵙⴽⴰ ⴴ] LT",nextWeek:"dddd [ⴴ] LT",lastDay:"[ⴰⵚⴰⵏⵜ ⴴ] LT",lastWeek:"dddd [ⴴ] LT",sameElse:"L"},relativeTime:{future:"ⴷⴰⴷⵅ ⵙ ⵢⴰⵏ %s",past:"ⵢⴰⵏ %s",s:"ⵉⵎⵉⴽ",m:"ⵎⵉⵏⵓⴺ",mm:"%d ⵎⵉⵏⵓⴺ",h:"ⵙⴰⵄⴰ",hh:"%d ⵜⴰⵙⵙⴰⵄⵉⵏ",d:"ⴰⵙⵙ",dd:"%d oⵙⵙⴰⵏ",M:"ⴰⵢoⵓⵔ",MM:"%d ⵉⵢⵢⵉⵔⵏ",y:"ⴰⵙⴳⴰⵙ",yy:"%d ⵉⵙⴳⴰⵙⵏ"},week:{dow:6,doy:12}}),Kf.defineLocale("uk",{months:{format:"січня_лютого_березня_квітня_травня_червня_липня_серпня_вересня_жовтня_листопада_грудня".split("_"),standalone:"січень_лютий_березень_квітень_травень_червень_липень_серпень_вересень_жовтень_листопад_грудень".split("_")},monthsShort:"січ_лют_бер_квіт_трав_черв_лип_серп_вер_жовт_лист_груд".split("_"),weekdays:Pd,weekdaysShort:"нд_пн_вт_ср_чт_пт_сб".split("_"),weekdaysMin:"нд_пн_вт_ср_чт_пт_сб".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD.MM.YYYY",LL:"D MMMM YYYY р.",LLL:"D MMMM YYYY р., HH:mm",LLLL:"dddd, D MMMM YYYY р., HH:mm"},calendar:{sameDay:Qd("[Сьогодні "),nextDay:Qd("[Завтра "),lastDay:Qd("[Вчора "),nextWeek:Qd("[У] dddd ["),lastWeek:function(){switch(this.day()){case 0:case 3:case 5:case 6:return Qd("[Минулої] dddd [").call(this);case 1:case 2:case 4:return Qd("[Минулого] dddd [").call(this)}},sameElse:"L"},relativeTime:{future:"за %s",past:"%s тому",s:"декілька секунд",m:Od,mm:Od,h:"годину",hh:Od,d:"день",dd:Od,M:"місяць",MM:Od,y:"рік",yy:Od},meridiemParse:/ночі|ранку|дня|вечора/,isPM:function(a){return/^(дня|вечора)$/.test(a)},meridiem:function(a,b,c){return 4>a?"ночі":12>a?"ранку":17>a?"дня":"вечора"},ordinalParse:/\d{1,2}-(й|го)/,ordinal:function(a,b){switch(b){case"M":case"d":case"DDD":case"w":case"W":return a+"-й";case"D":return a+"-го";default:return a}},week:{dow:1,doy:7}}),Kf.defineLocale("uz",{months:"январ_феврал_март_апрел_май_июн_июл_август_сентябр_октябр_ноябр_декабр".split("_"),monthsShort:"янв_фев_мар_апр_май_июн_июл_авг_сен_окт_ноя_дек".split("_"),weekdays:"Якшанба_Душанба_Сешанба_Чоршанба_Пайшанба_Жума_Шанба".split("_"),weekdaysShort:"Якш_Душ_Сеш_Чор_Пай_Жум_Шан".split("_"),weekdaysMin:"Як_Ду_Се_Чо_Па_Жу_Ша".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM YYYY",LLL:"D MMMM YYYY HH:mm",LLLL:"D MMMM YYYY, dddd HH:mm"},calendar:{sameDay:"[Бугун соат] LT [да]",nextDay:"[Эртага] LT [да]",nextWeek:"dddd [куни соат] LT [да]",lastDay:"[Кеча соат] LT [да]",lastWeek:"[Утган] dddd [куни соат] LT [да]",sameElse:"L"},relativeTime:{future:"Якин %s ичида",past:"Бир неча %s олдин",s:"фурсат",m:"бир дакика",mm:"%d дакика",h:"бир соат",hh:"%d соат",d:"бир кун",dd:"%d кун",M:"бир ой",MM:"%d ой",y:"бир йил",yy:"%d йил"},week:{dow:1,doy:7}}),Kf.defineLocale("vi",{months:"tháng 1_tháng 2_tháng 3_tháng 4_tháng 5_tháng 6_tháng 7_tháng 8_tháng 9_tháng 10_tháng 11_tháng 12".split("_"),monthsShort:"Th01_Th02_Th03_Th04_Th05_Th06_Th07_Th08_Th09_Th10_Th11_Th12".split("_"),weekdays:"chủ nhật_thứ hai_thứ ba_thứ tư_thứ năm_thứ sáu_thứ bảy".split("_"),weekdaysShort:"CN_T2_T3_T4_T5_T6_T7".split("_"),weekdaysMin:"CN_T2_T3_T4_T5_T6_T7".split("_"),longDateFormat:{LT:"HH:mm",LTS:"HH:mm:ss",L:"DD/MM/YYYY",LL:"D MMMM [năm] YYYY",LLL:"D MMMM [năm] YYYY HH:mm",LLLL:"dddd, D MMMM [năm] YYYY HH:mm",l:"DD/M/YYYY",ll:"D MMM YYYY",lll:"D MMM YYYY HH:mm",llll:"ddd, D MMM YYYY HH:mm"},calendar:{sameDay:"[Hôm nay lúc] LT",nextDay:"[Ngày mai lúc] LT",nextWeek:"dddd [tuần tới lúc] LT",lastDay:"[Hôm qua lúc] LT",lastWeek:"dddd [tuần rồi lúc] LT",sameElse:"L"},relativeTime:{future:"%s tới",past:"%s trước",s:"vài giây",m:"một phút",mm:"%d phút",h:"một giờ",hh:"%d giờ",d:"một ngày",dd:"%d ngày",M:"một tháng",MM:"%d tháng",y:"một năm",yy:"%d năm"},ordinalParse:/\d{1,2}/,ordinal:function(a){return a},week:{dow:1,doy:4}}),Kf.defineLocale("zh-cn",{months:"一月_二月_三月_四月_五月_六月_七月_八月_九月_十月_十一月_十二月".split("_"),monthsShort:"1月_2月_3月_4月_5月_6月_7月_8月_9月_10月_11月_12月".split("_"),weekdays:"星期日_星期一_星期二_星期三_星期四_星期五_星期六".split("_"),weekdaysShort:"周日_周一_周二_周三_周四_周五_周六".split("_"),weekdaysMin:"日_一_二_三_四_五_六".split("_"),longDateFormat:{LT:"Ah点mm分",LTS:"Ah点m分s秒",L:"YYYY-MM-DD",LL:"YYYY年MMMD日",LLL:"YYYY年MMMD日Ah点mm分",LLLL:"YYYY年MMMD日ddddAh点mm分",l:"YYYY-MM-DD",ll:"YYYY年MMMD日",lll:"YYYY年MMMD日Ah点mm分",llll:"YYYY年MMMD日ddddAh点mm分"},meridiemParse:/凌晨|早上|上午|中午|下午|晚上/,meridiemHour:function(a,b){return 12===a&&(a=0),"凌晨"===b||"早上"===b||"上午"===b?a:"下午"===b||"晚上"===b?a+12:a>=11?a:a+12},meridiem:function(a,b,c){var d=100*a+b;return 600>d?"凌晨":900>d?"早上":1130>d?"上午":1230>d?"中午":1800>d?"下午":"晚上"},calendar:{sameDay:function(){return 0===this.minutes()?"[今天]Ah[点整]":"[今天]LT"},nextDay:function(){return 0===this.minutes()?"[明天]Ah[点整]":"[明天]LT"},lastDay:function(){return 0===this.minutes()?"[昨天]Ah[点整]":"[昨天]LT"},nextWeek:function(){var a,b;return a=Kf().startOf("week"),b=this.unix()-a.unix()>=604800?"[下]":"[本]",0===this.minutes()?b+"dddAh点整":b+"dddAh点mm"},lastWeek:function(){var a,b;return a=Kf().startOf("week"),b=this.unix()=11?a:a+12:"下午"===b||"晚上"===b?a+12:void 0},meridiem:function(a,b,c){var d=100*a+b;return 900>d?"早上":1130>d?"上午":1230>d?"中午":1800>d?"下午":"晚上"},calendar:{sameDay:"[今天]LT",nextDay:"[明天]LT",nextWeek:"[下]ddddLT",lastDay:"[昨天]LT",lastWeek:"[上]ddddLT",sameElse:"L"},ordinalParse:/\d{1,2}(日|月|週)/,ordinal:function(a,b){switch(b){case"d":case"D":case"DDD":return a+"日";case"M":return a+"月";case"w":case"W":return a+"週";default:return a}},relativeTime:{future:"%s內",past:"%s前",s:"幾秒",m:"一分鐘",mm:"%d分鐘",h:"一小時",hh:"%d小時",d:"一天",dd:"%d天",M:"一個月",MM:"%d個月",y:"一年",yy:"%d年"}}),Kf);return Ng.locale("en"),Ng}); \ No newline at end of file diff --git a/static/vendors/spinner.min.js b/static/vendors/spinner.min.js new file mode 100644 index 000000000..bd3ae4f4e --- /dev/null +++ b/static/vendors/spinner.min.js @@ -0,0 +1,2 @@ +// http://spin.js.org/#v2.3.2 +!function(a,b){"object"==typeof module&&module.exports?module.exports=b():"function"==typeof define&&define.amd?define(b):a.Spinner=b()}(this,function(){"use strict";function a(a,b){var c,d=document.createElement(a||"div");for(c in b)d[c]=b[c];return d}function b(a){for(var b=1,c=arguments.length;c>b;b++)a.appendChild(arguments[b]);return a}function c(a,b,c,d){var e=["opacity",b,~~(100*a),c,d].join("-"),f=.01+c/d*100,g=Math.max(1-(1-a)/b*(100-f),a),h=j.substring(0,j.indexOf("Animation")).toLowerCase(),i=h&&"-"+h+"-"||"";return m[e]||(k.insertRule("@"+i+"keyframes "+e+"{0%{opacity:"+g+"}"+f+"%{opacity:"+a+"}"+(f+.01)+"%{opacity:1}"+(f+b)%100+"%{opacity:"+a+"}100%{opacity:"+g+"}}",k.cssRules.length),m[e]=1),e}function d(a,b){var c,d,e=a.style;if(b=b.charAt(0).toUpperCase()+b.slice(1),void 0!==e[b])return b;for(d=0;d',c)}k.addRule(".spin-vml","behavior:url(#default#VML)"),h.prototype.lines=function(a,d){function f(){return e(c("group",{coordsize:k+" "+k,coordorigin:-j+" "+-j}),{width:k,height:k})}function h(a,h,i){b(m,b(e(f(),{rotation:360/d.lines*a+"deg",left:~~h}),b(e(c("roundrect",{arcsize:d.corners}),{width:j,height:d.scale*d.width,left:d.scale*d.radius,top:-d.scale*d.width>>1,filter:i}),c("fill",{color:g(d.color,a),opacity:d.opacity}),c("stroke",{opacity:0}))))}var i,j=d.scale*(d.length+d.width),k=2*d.scale*j,l=-(d.width+d.length)*d.scale*2+"px",m=e(f(),{position:"absolute",top:l,left:l});if(d.shadow)for(i=1;i<=d.lines;i++)h(i,-2,"progid:DXImageTransform.Microsoft.Blur(pixelradius=2,makeshadow=1,shadowopacity=.3)");for(i=1;i<=d.lines;i++)h(i);return b(a,m)},h.prototype.opacity=function(a,b,c,d){var e=a.firstChild;d=d.shadow&&d.lines||0,e&&b+d>1)+"px"})}for(var i,k=0,l=(f.lines-1)*(1-f.direction)/2;k +} diff --git a/tests/utils_test.go b/tests/utils_test.go new file mode 100644 index 000000000..22109561a --- /dev/null +++ b/tests/utils_test.go @@ -0,0 +1,15 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package test diff --git a/utils/encrypt.go b/utils/encrypt.go new file mode 100644 index 000000000..f97dc53db --- /dev/null +++ b/utils/encrypt.go @@ -0,0 +1,26 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "crypto/sha1" + "fmt" + + "golang.org/x/crypto/pbkdf2" +) + +func Encrypt(content string, salt string) string { + return fmt.Sprintf("%x", pbkdf2.Key([]byte(content), []byte(salt), 4096, 16, sha1.New)) +} diff --git a/utils/mail.go b/utils/mail.go new file mode 100644 index 000000000..8fc0c02f0 --- /dev/null +++ b/utils/mail.go @@ -0,0 +1,74 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "bytes" + + "net/smtp" + "text/template" + + "github.com/astaxie/beego" +) + +type Mail struct { + From string + To []string + Subject string + Message string +} +type MailConfig struct { + Identity string + Host string + Port string + Username string + Password string +} + +var mc MailConfig + +func (m Mail) SendMail() error { + + if mc.Host == "" { + loadConfig() + } + mailTemplate, err := template.ParseFiles("views/mail.tpl") + if err != nil { + return err + } + mailContent := new(bytes.Buffer) + err = mailTemplate.Execute(mailContent, m) + if err != nil { + return err + } + return smtp. + SendMail(mc.Host+":"+mc.Port, + smtp.PlainAuth(mc.Identity, mc.Username, mc.Password, mc.Host), + m.From, m.To, mailContent.Bytes()) +} + +func loadConfig() { + config, err := beego.AppConfig.GetSection("mail") + if err != nil { + panic(err) + } + mc = MailConfig{ + Identity: "Mail Config", + Host: config["host"], + Port: config["port"], + Username: config["username"], + Password: config["password"], + } +} diff --git a/utils/registry_utils.go b/utils/registry_utils.go new file mode 100644 index 000000000..22e1fe455 --- /dev/null +++ b/utils/registry_utils.go @@ -0,0 +1,145 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + "github.com/astaxie/beego" +) + +const SESSION_COOKIE = "beegosessionID" + +func BuildRegistryUrl(segments ...string) string { + registryURL := os.Getenv("REGISTRY_URL") + if registryURL == "" { + registryURL = "http://localhost:5000" + } + url := registryURL + "/v2" + for _, s := range segments { + if s == "v2" { + beego.Error("Unnecessary v2 in", segments) + continue + } + url += "/" + s + } + return url +} + +func HttpGet(url, sessionId, username, password string) ([]byte, error) { + response, err := http.Get(url) + if err != nil { + return nil, err + } + result, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode == 200 { + return result, nil + } else if response.StatusCode == 401 { + authenticate := response.Header.Get("WWW-Authenticate") + str := strings.Split(authenticate, " ")[1] + beego.Trace("url: " + url) + beego.Trace("Authentication Header: " + str) + var realm string + var service string + var scope string + strs := strings.Split(str, ",") + for _, s := range strs { + if strings.Contains(s, "realm") { + realm = s + } else if strings.Contains(s, "service") { + service = s + } else if strings.Contains(s, "scope") { + strings.HasSuffix(url, "v2/_catalog") + scope = s + } + } + realm = strings.Split(realm, "\"")[1] + service = strings.Split(service, "\"")[1] + scope = strings.Split(scope, "\"")[1] + + authUrl := realm + "?service=" + service + "&scope=" + scope + //skip certificate check if token service is https. + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + client := &http.Client{Transport: tr} + request, err := http.NewRequest("GET", authUrl, nil) + if err != nil { + return nil, err + } + if len(sessionId) > 0 { + cookie := &http.Cookie{Name: SESSION_COOKIE, Value: sessionId, Path: "/"} + request.AddCookie(cookie) + } else { + request.SetBasicAuth(username, password) + } + response, err = client.Do(request) + if err != nil { + return nil, err + } + result, err = ioutil.ReadAll(response.Body) + + defer response.Body.Close() + if err != nil { + return nil, err + } + if response.StatusCode == 200 { + tt := make(map[string]string) + json.Unmarshal(result, &tt) + request, err = http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + request.Header.Add("Authorization", "Bearer "+tt["token"]) + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return fmt.Errorf("too many redirects") + } + for k, v := range via[0].Header { + if _, ok := req.Header[k]; !ok { + req.Header[k] = v + } + } + return nil + } + response, err = client.Do(request) + if err != nil { + return nil, err + } + result, err = ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + defer response.Body.Close() + + return result, nil + } else { + return nil, errors.New(string(result)) + } + } else { + return nil, errors.New(string(result)) + } +} diff --git a/utils/utils.go b/utils/utils.go new file mode 100644 index 000000000..96260311b --- /dev/null +++ b/utils/utils.go @@ -0,0 +1,62 @@ +/* + Copyright (c) 2016 VMware, Inc. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ +package utils + +import ( + "encoding/base64" + "strings" + + "github.com/vmware/harbor/models" + + "github.com/astaxie/beego" +) + +type Repository struct { + Name string +} + +func ParseBasicAuth(authorization []string) (username, password string) { + if authorization == nil || len(authorization) == 0 { + beego.Debug("Authorization header is not set.") + return "", "" + } + auth := strings.SplitN(authorization[0], " ", 2) + payload, _ := base64.StdEncoding.DecodeString(auth[1]) + pair := strings.SplitN(string(payload), ":", 2) + return pair[0], pair[1] +} + +func (r *Repository) GetProject() string { + if !strings.ContainsRune(r.Name, '/') { + return "" + } + return r.Name[0:strings.LastIndex(r.Name, "/")] +} + +type ProjectSorter struct { + Projects []models.Project +} + +func (ps *ProjectSorter) Len() int { + return len(ps.Projects) +} + +func (ps *ProjectSorter) Less(i, j int) bool { + return ps.Projects[i].Name < ps.Projects[j].Name +} + +func (ps *ProjectSorter) Swap(i, j int) { + ps.Projects[i], ps.Projects[j] = ps.Projects[j], ps.Projects[i] +} diff --git a/views/change-password.tpl b/views/change-password.tpl new file mode 100644 index 000000000..cdcbdb9a5 --- /dev/null +++ b/views/change-password.tpl @@ -0,0 +1,50 @@ + +
+
+
+ +
+ +
+ + + +
+
+ + + +
{{i18n .Lang "password_description"}}
+
+
+ + + +
{{i18n .Lang "password_description"}}
+
+
+
+ +
+
+ +
+
+
+ + \ No newline at end of file diff --git a/views/forgot-password.tpl b/views/forgot-password.tpl new file mode 100644 index 000000000..64f8656b7 --- /dev/null +++ b/views/forgot-password.tpl @@ -0,0 +1,40 @@ + +
+
+
+ +
+
+ +
+ + + +
{{i18n .Lang "forgot_password_description"}}
+
+
+
+ +
+
+ +
+
+
+ + \ No newline at end of file diff --git a/views/index.tpl b/views/index.tpl new file mode 100644 index 000000000..51a2651d5 --- /dev/null +++ b/views/index.tpl @@ -0,0 +1,37 @@ + + +
+
+

 

+

Harbor

+

 

+
+
+ +
+ +
+
+

{{i18n .Lang "index_desc"}}

+

{{i18n .Lang "index_desc_0"}}

+

{{i18n .Lang "index_desc_1"}}

+

{{i18n .Lang "index_desc_2"}}

+

{{i18n .Lang "index_desc_3"}}

+

{{i18n .Lang "index_desc_4"}}

+
+
+
+ \ No newline at end of file diff --git a/views/item-detail.tpl b/views/item-detail.tpl new file mode 100644 index 000000000..53c4085d2 --- /dev/null +++ b/views/item-detail.tpl @@ -0,0 +1,223 @@ + +
+ + +
+
+ +
+ + + + + + + + + +
+
+
+
+ +
+
{{i18n .Lang "repo_name"}}:
+ + + + +
+
+ +

+

+ +
+
+
+
+
+
+
+
+ +
+
{{i18n .Lang "username"}}:
+ + + + +
+
+
+ + +

+

+
").addClass("cw").text("#"));c.isBefore(f.clone().endOf("w"));)b.append(a("").addClass("dow").text(c.format("dd"))),c.add(1,"d");o.find(".datepicker-days thead").append(b)},M=function(a){return d.disabledDates[a.format("YYYY-MM-DD")]===!0},N=function(a){return d.enabledDates[a.format("YYYY-MM-DD")]===!0},O=function(a){return d.disabledHours[a.format("H")]===!0},P=function(a){return d.enabledHours[a.format("H")]===!0},Q=function(b,c){if(!b.isValid())return!1;if(d.disabledDates&&"d"===c&&M(b))return!1;if(d.enabledDates&&"d"===c&&!N(b))return!1;if(d.minDate&&b.isBefore(d.minDate,c))return!1;if(d.maxDate&&b.isAfter(d.maxDate,c))return!1;if(d.daysOfWeekDisabled&&"d"===c&&-1!==d.daysOfWeekDisabled.indexOf(b.day()))return!1;if(d.disabledHours&&("h"===c||"m"===c||"s"===c)&&O(b))return!1;if(d.enabledHours&&("h"===c||"m"===c||"s"===c)&&!P(b))return!1;if(d.disabledTimeIntervals&&("h"===c||"m"===c||"s"===c)){var e=!1;if(a.each(d.disabledTimeIntervals,function(){return b.isBetween(this[0],this[1])?(e=!0,!1):void 0}),e)return!1}return!0},R=function(){for(var b=[],c=f.clone().startOf("y").startOf("d");c.isSame(f,"y");)b.push(a("").attr("data-action","selectMonth").addClass("month").text(c.format("MMM"))),c.add(1,"M");o.find(".datepicker-months td").empty().append(b)},S=function(){var b=o.find(".datepicker-months"),c=b.find("th"),g=b.find("tbody").find("span");c.eq(0).find("span").attr("title",d.tooltips.prevYear),c.eq(1).attr("title",d.tooltips.selectYear),c.eq(2).find("span").attr("title",d.tooltips.nextYear),b.find(".disabled").removeClass("disabled"),Q(f.clone().subtract(1,"y"),"y")||c.eq(0).addClass("disabled"),c.eq(1).text(f.year()),Q(f.clone().add(1,"y"),"y")||c.eq(2).addClass("disabled"),g.removeClass("active"),e.isSame(f,"y")&&!m&&g.eq(e.month()).addClass("active"),g.each(function(b){Q(f.clone().month(b),"M")||a(this).addClass("disabled")})},T=function(){var a=o.find(".datepicker-years"),b=a.find("th"),c=f.clone().subtract(5,"y"),g=f.clone().add(6,"y"),h="";for(b.eq(0).find("span").attr("title",d.tooltips.prevDecade),b.eq(1).attr("title",d.tooltips.selectDecade),b.eq(2).find("span").attr("title",d.tooltips.nextDecade),a.find(".disabled").removeClass("disabled"),d.minDate&&d.minDate.isAfter(c,"y")&&b.eq(0).addClass("disabled"),b.eq(1).text(c.year()+"-"+g.year()),d.maxDate&&d.maxDate.isBefore(g,"y")&&b.eq(2).addClass("disabled");!c.isAfter(g,"y");)h+=''+c.year()+"",c.add(1,"y");a.find("td").html(h)},U=function(){var a=o.find(".datepicker-decades"),c=a.find("th"),g=b({y:f.year()-f.year()%100-1}),h=g.clone().add(100,"y"),i=g.clone(),j="";for(c.eq(0).find("span").attr("title",d.tooltips.prevCentury),c.eq(2).find("span").attr("title",d.tooltips.nextCentury),a.find(".disabled").removeClass("disabled"),(g.isSame(b({y:1900}))||d.minDate&&d.minDate.isAfter(g,"y"))&&c.eq(0).addClass("disabled"),c.eq(1).text(g.year()+"-"+h.year()),(g.isSame(b({y:2e3}))||d.maxDate&&d.maxDate.isBefore(h,"y"))&&c.eq(2).addClass("disabled");!g.isAfter(h,"y");)j+=''+(g.year()+1)+" - "+(g.year()+12)+"",g.add(12,"y");j+="",a.find("td").html(j),c.eq(1).text(i.year()+1+"-"+g.year())},V=function(){var b,c,g,h,i=o.find(".datepicker-days"),j=i.find("th"),k=[];if(A()){for(j.eq(0).find("span").attr("title",d.tooltips.prevMonth),j.eq(1).attr("title",d.tooltips.selectMonth),j.eq(2).find("span").attr("title",d.tooltips.nextMonth),i.find(".disabled").removeClass("disabled"),j.eq(1).text(f.format(d.dayViewHeaderFormat)),Q(f.clone().subtract(1,"M"),"M")||j.eq(0).addClass("disabled"),Q(f.clone().add(1,"M"),"M")||j.eq(2).addClass("disabled"),b=f.clone().startOf("M").startOf("w").startOf("d"),h=0;42>h;h++)0===b.weekday()&&(c=a("
'+b.week()+"'+b.date()+"
'+c.format(h?"HH":"hh")+"
'+c.format("mm")+"
'+c.format("ss")+"
+ + + + + + + + + +
{{i18n .Lang "username"}}{{i18n .Lang "role_info"}}{{i18n .Lang "operation"}}
+ + +
+
+
+ +
+
{{i18n .Lang "username"}}:
+ + + + +
+
+
+
+ +
+
+ +

+
+ +
+ +
+
{{i18n .Lang "operation"}}:
+ + {{i18n .Lang "all"}} + Create + Pull + Push + Delete + {{i18n .Lang "others"}}: + + +
+
+

+
+ +
+
{{i18n .Lang "start_date"}}:
+
+ + + + +
+
+ +
+
+
+
{{i18n .Lang "end_date"}}:
+
+ + + + +
+
+ +
+ +
+
+ + + + + + + + + + + +
{{i18n .Lang "username"}}{{i18n .Lang "repo_name"}}{{i18n .Lang "operation"}}{{i18n .Lang "timestamp"}}
+
+
+ + + + + + + \ No newline at end of file diff --git a/views/mail.tpl b/views/mail.tpl new file mode 100644 index 000000000..5db1d4293 --- /dev/null +++ b/views/mail.tpl @@ -0,0 +1,7 @@ +From: {{.From}} +To: {{.To}} +Subject: {{.Subject}} +MIME-version: 1.0; +Content-Type: text/html; charset="UTF-8" + +{{.Message}} \ No newline at end of file diff --git a/views/project.tpl b/views/project.tpl new file mode 100644 index 000000000..3808dfd3a --- /dev/null +++ b/views/project.tpl @@ -0,0 +1,115 @@ + +
+ +
+ + +
+
+
+ +
+
{{i18n .Lang "project_name"}}:
+ + + + +
+ +
+
+ + + + + + + + + + +
{{i18n .Lang "project_name"}}{{i18n .Lang "creation_time"}}{{i18n .Lang "publicity"}}
+
+
+ +
+
+
+ + + + diff --git a/views/register.tpl b/views/register.tpl new file mode 100644 index 000000000..88bd9615f --- /dev/null +++ b/views/register.tpl @@ -0,0 +1,73 @@ + +
+
+
+ +
+ +
+ +

*

+ + +
{{i18n .Lang "username_description"}}
+
+
+ +

*

+ + +
{{i18n .Lang "email_description"}}
+
+
+ +

*

+ + +
{{i18n .Lang "full_name_description"}}
+
+
+ +

*

+ + +
{{i18n .Lang "password_description"}}
+
+
+ +

*

+ + +
{{i18n .Lang "password_description"}}
+
+
+ + + +
+
+
+ +
+
+
+
+
+
+ + \ No newline at end of file diff --git a/views/reset-password-mail.tpl b/views/reset-password-mail.tpl new file mode 100644 index 000000000..35b9c6976 --- /dev/null +++ b/views/reset-password-mail.tpl @@ -0,0 +1,21 @@ + + + + +

{{.Hint}}:

+ {{.Url}}/resetPassword?q={{.Uuid}} + + \ No newline at end of file diff --git a/views/reset-password.tpl b/views/reset-password.tpl new file mode 100644 index 000000000..b29c5dc97 --- /dev/null +++ b/views/reset-password.tpl @@ -0,0 +1,46 @@ + +
+ +
+
+ +
+ +
+ + + +
{{i18n .Lang "password_description"}}
+
+
+ + + +
{{i18n .Lang "password_description"}}
+
+
+
+ +
+
+
+
+
+
+ + \ No newline at end of file diff --git a/views/search.tpl b/views/search.tpl new file mode 100644 index 000000000..23ca5c675 --- /dev/null +++ b/views/search.tpl @@ -0,0 +1,32 @@ + + +
+ +
+
{{i18n .Lang "projects"}}
+
+
+
+
+
{{i18n .Lang "repositories"}}
+
+
+
+
+ \ No newline at end of file diff --git a/views/segment/base-layout.tpl b/views/segment/base-layout.tpl new file mode 100644 index 000000000..f7365ef25 --- /dev/null +++ b/views/segment/base-layout.tpl @@ -0,0 +1,28 @@ + + + + + {{.HeaderInc}} + {{.PageTitle}} + + + {{.HeaderContent}} + {{.BodyContent}} + {{.FooterInc}} + {{.ModalDialog}} + {{.FootContent}} + + \ No newline at end of file diff --git a/views/segment/foot-content.tpl b/views/segment/foot-content.tpl new file mode 100644 index 000000000..2b4cc20c6 --- /dev/null +++ b/views/segment/foot-content.tpl @@ -0,0 +1,23 @@ + +
+
+
+
+

{{i18n .Lang "copyright"}} © 2015-2016 VMware, Inc. {{i18n .Lang "all_rights_reserved"}}

+
+
+
+
\ No newline at end of file diff --git a/views/segment/header-content.tpl b/views/segment/header-content.tpl new file mode 100644 index 000000000..6977d6e48 --- /dev/null +++ b/views/segment/header-content.tpl @@ -0,0 +1,75 @@ + + + \ No newline at end of file diff --git a/views/segment/header-include.tpl b/views/segment/header-include.tpl new file mode 100644 index 000000000..529d4e716 --- /dev/null +++ b/views/segment/header-include.tpl @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/views/segment/modal-dialog.tpl b/views/segment/modal-dialog.tpl new file mode 100644 index 000000000..95a220b69 --- /dev/null +++ b/views/segment/modal-dialog.tpl @@ -0,0 +1,38 @@ + + + + \ No newline at end of file diff --git a/views/sign-in.tpl b/views/sign-in.tpl new file mode 100644 index 000000000..8cb173d27 --- /dev/null +++ b/views/sign-in.tpl @@ -0,0 +1,40 @@ + +
+ +
+ + \ No newline at end of file