update go dep version (#15648)

To fix the Dependabot alerts

Signed-off-by: Wang Yan <wangyan@vmware.com>
This commit is contained in:
Wang Yan 2021-09-23 17:37:38 +08:00 committed by GitHub
parent 90747caf87
commit 0a2db1405a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
94 changed files with 7058 additions and 1510 deletions

View File

@ -11,7 +11,7 @@ require (
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97 github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190726115642-cd293c93fd97
github.com/astaxie/beego v1.12.1 github.com/astaxie/beego v1.12.1
github.com/aws/aws-sdk-go v1.32.5 github.com/aws/aws-sdk-go v1.34.28
github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0 github.com/beego/i18n v0.0.0-20140604031826-e87155e8f0c0
github.com/bmatcuk/doublestar v1.1.1 github.com/bmatcuk/doublestar v1.1.1
github.com/bugsnag/bugsnag-go v1.5.2 // indirect github.com/bugsnag/bugsnag-go v1.5.2 // indirect
@ -19,6 +19,7 @@ require (
github.com/casbin/casbin v1.7.0 github.com/casbin/casbin v1.7.0
github.com/cenkalti/backoff/v4 v4.1.1 github.com/cenkalti/backoff/v4 v4.1.1
github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e // indirect github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e // indirect
github.com/containerd/containerd v1.4.8 // indirect
github.com/coreos/go-oidc/v3 v3.0.0 github.com/coreos/go-oidc/v3 v3.0.0
github.com/denverdino/aliyungo v0.0.0-20191227032621-df38c6fa730c // indirect github.com/denverdino/aliyungo v0.0.0-20191227032621-df38c6fa730c // indirect
github.com/dghubble/sling v1.1.0 github.com/dghubble/sling v1.1.0
@ -57,6 +58,7 @@ require (
github.com/olekukonko/tablewriter v0.0.4 github.com/olekukonko/tablewriter v0.0.4
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc v1.0.0-rc95 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_golang v1.7.1
@ -67,6 +69,7 @@ require (
github.com/tencentcloud/tencentcloud-sdk-go v1.0.62 github.com/tencentcloud/tencentcloud-sdk-go v1.0.62
github.com/theupdateframework/notary v0.6.1 github.com/theupdateframework/notary v0.6.1
github.com/vmihailenco/msgpack/v5 v5.0.0-rc.2 github.com/vmihailenco/msgpack/v5 v5.0.0-rc.2
go.mongodb.org/mongo-driver v1.5.1 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.22.0 go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.22.0
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.22.0
go.opentelemetry.io/otel v1.0.0 go.opentelemetry.io/otel v1.0.0

View File

@ -130,8 +130,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.32.5 h1:Sz0C7deIoMu5lFGTVkIN92IEZrUz1AWIDDW+9p6n1Rk= github.com/aws/aws-sdk-go v1.34.28 h1:sscPpn/Ns3i0F4HPEWAVcwdIRaZZCuL7llJ2/60yPIk=
github.com/aws/aws-sdk-go v1.32.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ= github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
@ -175,10 +175,12 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e h1:ZtyhUG4s94BMUCdgvRZySr/AXYL5CDcjxhIV/83xJog= github.com/cloudflare/cfssl v0.0.0-20190510060611-9c027c93ba9e h1:ZtyhUG4s94BMUCdgvRZySr/AXYL5CDcjxhIV/83xJog=
@ -194,10 +196,12 @@ github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 h1:qWj4qVYZ95vLWwqyNJCQg7rDsG5wPdze0UaPolH7DUk=
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.4 h1:rtRG4N6Ct7GNssATwgpvMGfnjnwfjnu/Zs9W3Ikzq+M=
github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/containerd v1.4.8 h1:H0wkS4AbVKTg9vyvBdCBrxoax8AMObKbNz9Fl2N0i4Y=
github.com/containerd/containerd v1.4.8/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 h1:6ejg6Lkk8dskcM7wQ28gONkukbQkM4qpj4RnYbpFzrI= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7 h1:6ejg6Lkk8dskcM7wQ28gONkukbQkM4qpj4RnYbpFzrI=
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
@ -219,6 +223,7 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
github.com/coreos/go-systemd/v22 v22.3.1/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= github.com/couchbase/go-couchbase v0.0.0-20181122212707-3e9b6e1258bb/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U=
@ -314,6 +319,7 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTg
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@ -454,6 +460,7 @@ github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhD
github.com/gocraft/work v0.5.1 h1:3bRjMiOo6N4zcRgZWV3Y7uX7R22SF+A9bPTk4xRXr34= github.com/gocraft/work v0.5.1 h1:3bRjMiOo6N4zcRgZWV3Y7uX7R22SF+A9bPTk4xRXr34=
github.com/gocraft/work v0.5.1/go.mod h1:pc3n9Pb5FAESPPGfM0nL+7Q1xtgtRnF8rr/azzhQVlM= github.com/gocraft/work v0.5.1/go.mod h1:pc3n9Pb5FAESPPGfM0nL+7Q1xtgtRnF8rr/azzhQVlM=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg=
github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
@ -516,6 +523,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@ -648,8 +656,10 @@ github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6G
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
@ -682,6 +692,7 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
@ -761,6 +772,7 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx
github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE=
github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@ -773,6 +785,7 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
@ -823,9 +836,12 @@ github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v1.0.0-rc95 h1:RMuWVfY3E1ILlVsC3RhIq38n4sJtlOFwU9gfFZSqrd0=
github.com/opencontainers/runc v1.0.0-rc95/go.mod h1:z+bZxa/+Tz/FmYVWkhUajJdzFeOqjc5vrqskhVyHGUM=
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@ -840,8 +856,9 @@ github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIw
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfSeg=
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
@ -914,6 +931,7 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
@ -984,6 +1002,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0= github.com/syndtr/goleveldb v0.0.0-20181127023241-353a9fca669c/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/tencentcloud/tencentcloud-sdk-go v1.0.62 h1:Vnr3IqaafEuQUciG6D6EaeLJm26Mg8sjAfbI4OoeauM= github.com/tencentcloud/tencentcloud-sdk-go v1.0.62 h1:Vnr3IqaafEuQUciG6D6EaeLJm26Mg8sjAfbI4OoeauM=
github.com/tencentcloud/tencentcloud-sdk-go v1.0.62/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI= github.com/tencentcloud/tencentcloud-sdk-go v1.0.62/go.mod h1:asUz5BPXxgoPGaRgZaVm1iGcUAuHyYUo1nXqKa83cvI=
@ -1000,12 +1019,18 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vmihailenco/msgpack/v5 v5.0.0-rc.2 h1:ognci8XPlosGhIHK1OLYSpSpnlhSFeBklfe18zIEwcU= github.com/vmihailenco/msgpack/v5 v5.0.0-rc.2 h1:ognci8XPlosGhIHK1OLYSpSpnlhSFeBklfe18zIEwcU=
github.com/vmihailenco/msgpack/v5 v5.0.0-rc.2/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo= github.com/vmihailenco/msgpack/v5 v5.0.0-rc.2/go.mod h1:HVxBVPUK/+fZMonk4bi1islLa8V3cfnBug0+4dykPzo=
github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc=
github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc= github.com/wendal/errors v0.0.0-20130201093226-f66c77a7882b/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
@ -1019,6 +1044,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@ -1041,8 +1067,9 @@ go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qL
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.mongodb.org/mongo-driver v1.3.4 h1:zs/dKNwX0gYUtzwrN9lLiR15hCO0nDwQj5xXx+vjCdE=
go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=
go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -1195,6 +1222,7 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210902165921-8d991716f632 h1:900XJE4Rn/iPU+xD5ZznOe4GKKc4AdFK0IO1P6Z3/lQ= golang.org/x/net v0.0.0-20210902165921-8d991716f632 h1:900XJE4Rn/iPU+xD5ZznOe4GKKc4AdFK0IO1P6Z3/lQ=
@ -1240,6 +1268,7 @@ golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1251,6 +1280,7 @@ golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1266,6 +1296,7 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1275,6 +1306,7 @@ golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 h1:xrCZDmdtoloIiooiA9q0OQb9r8HejIHYoHGhGCe1pGg= golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0 h1:xrCZDmdtoloIiooiA9q0OQb9r8HejIHYoHGhGCe1pGg=
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -43,7 +43,7 @@ type Config struct {
// An optional endpoint URL (hostname only or fully qualified URI) // An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this // that overrides the default generated endpoint for a client. Set this
// to `nil` to use the default generated endpoint. // to `nil` or the value to `""` to use the default generated endpoint.
// //
// Note: You must still provide a `Region` value when specifying an // Note: You must still provide a `Region` value when specifying an
// endpoint for a client. // endpoint for a client.
@ -138,7 +138,7 @@ type Config struct {
// `ExpectContinueTimeout` for information on adjusting the continue wait // `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport // timeout. https://golang.org/pkg/net/http/#Transport
// //
// You should use this flag to disble 100-Continue if you experience issues // You should use this flag to disable 100-Continue if you experience issues
// with proxies or third party S3 compatible services. // with proxies or third party S3 compatible services.
S3Disable100Continue *bool S3Disable100Continue *bool
@ -183,7 +183,7 @@ type Config struct {
// //
// Example: // Example:
// sess := session.Must(session.NewSession(aws.NewConfig() // sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true))) // .WithEC2MetadataDisableTimeoutOverride(true)))
// //
// svc := s3.New(sess) // svc := s3.New(sess)
// //
@ -194,7 +194,7 @@ type Config struct {
// both IPv4 and IPv6 addressing. // both IPv4 and IPv6 addressing.
// //
// Setting this for a service which does not support dual stack will fail // Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session // to make requests. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even // as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints. // services which don't support dual stack endpoints.
// //

View File

@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" { } else if r.ClientInfo.Endpoint == "" {
// Was any endpoint provided by the user, or one was derived by the
// SDK's endpoint resolver?
r.Error = aws.ErrMissingEndpoint r.Error = aws.ErrMissingEndpoint
} }
}} }}

View File

@ -52,9 +52,21 @@ type WebIdentityRoleProvider struct {
credentials.Expiry credentials.Expiry
PolicyArns []*sts.PolicyDescriptorType PolicyArns []*sts.PolicyDescriptorType
client stsiface.STSAPI // Duration the STS credentials will be valid for. Truncated to seconds.
// If unset, the assumed role will use AssumeRoleWithWebIdentity's default
// expiry duration. See
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity
// for more information.
Duration time.Duration
// The amount of time the credentials will be refreshed before they expire.
// This is useful refresh credentials before they expire to reduce risk of
// using credentials as they expire. If unset, will default to no expiry
// window.
ExpiryWindow time.Duration ExpiryWindow time.Duration
client stsiface.STSAPI
tokenFetcher TokenFetcher tokenFetcher TokenFetcher
roleARN string roleARN string
roleSessionName string roleSessionName string
@ -107,11 +119,18 @@ func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (
// uses unix time in nanoseconds to uniquely identify sessions. // uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(now().UnixNano(), 10) sessionName = strconv.FormatInt(now().UnixNano(), 10)
} }
var duration *int64
if p.Duration != 0 {
duration = aws.Int64(int64(p.Duration / time.Second))
}
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
PolicyArns: p.PolicyArns, PolicyArns: p.PolicyArns,
RoleArn: &p.roleARN, RoleArn: &p.roleARN,
RoleSessionName: &sessionName, RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)), WebIdentityToken: aws.String(string(b)),
DurationSeconds: duration,
}) })
req.SetContext(ctx) req.SetContext(ctx)

View File

@ -20,7 +20,7 @@ func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOu
op := &request.Operation{ op := &request.Operation{
Name: "GetToken", Name: "GetToken",
HTTPMethod: "PUT", HTTPMethod: "PUT",
HTTPPath: "/api/token", HTTPPath: "/latest/api/token",
} }
var output tokenOutput var output tokenOutput
@ -62,7 +62,7 @@ func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string,
op := &request.Operation{ op := &request.Operation{
Name: "GetMetadata", Name: "GetMetadata",
HTTPMethod: "GET", HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/meta-data", p), HTTPPath: sdkuri.PathJoin("/latest/meta-data", p),
} }
output := &metadataOutput{} output := &metadataOutput{}
@ -88,7 +88,7 @@ func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
op := &request.Operation{ op := &request.Operation{
Name: "GetUserData", Name: "GetUserData",
HTTPMethod: "GET", HTTPMethod: "GET",
HTTPPath: "/user-data", HTTPPath: "/latest/user-data",
} }
output := &metadataOutput{} output := &metadataOutput{}
@ -113,7 +113,7 @@ func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (stri
op := &request.Operation{ op := &request.Operation{
Name: "GetDynamicData", Name: "GetDynamicData",
HTTPMethod: "GET", HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/dynamic", p), HTTPPath: sdkuri.PathJoin("/latest/dynamic", p),
} }
output := &metadataOutput{} output := &metadataOutput{}

View File

@ -5,6 +5,10 @@
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot // true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environment variable is set to true, (case insensitive). // be used while the environment variable is set to true, (case insensitive).
//
// The endpoint of the EC2 IMDS client can be configured via the environment
// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
// Session. See aws/session#Options.EC2IMDSEndpoint for more details.
package ec2metadata package ec2metadata
import ( import (
@ -12,6 +16,7 @@ import (
"errors" "errors"
"io" "io"
"net/http" "net/http"
"net/url"
"os" "os"
"strconv" "strconv"
"strings" "strings"
@ -41,7 +46,7 @@ const (
enableTokenProviderHandlerName = "enableTokenProviderHandler" enableTokenProviderHandlerName = "enableTokenProviderHandler"
// TTL constants // TTL constants
defaultTTL = 21600 * time.Second defaultTTL = 21600 * time.Second
ttlExpirationWindow = 30 * time.Second ttlExpirationWindow = 30 * time.Second
) )
@ -69,6 +74,9 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
// a client when not using a session. Generally using just New with a session // a client when not using a session. Generally using just New with a session
// is preferred. // is preferred.
// //
// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS
// client is able to communicate with the EC2 IMDS API.
//
// If an unmodified HTTP client is provided from the stdlib default, or no client // If an unmodified HTTP client is provided from the stdlib default, or no client
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. // the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. // To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
@ -86,6 +94,15 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
cfg.MaxRetries = aws.Int(2) cfg.MaxRetries = aws.Int(2)
} }
if u, err := url.Parse(endpoint); err == nil {
// Remove path from the endpoint since it will be added by requests.
// This is an artifact of the SDK adding `/latest` to the endpoint for
// EC2 IMDS, but this is now moved to the operation definition.
u.Path = ""
u.RawPath = ""
endpoint = u.String()
}
svc := &EC2Metadata{ svc := &EC2Metadata{
Client: client.New( Client: client.New(
cfg, cfg,

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,8 @@ func isErrConnectionReset(err error) bool {
return false return false
} }
if strings.Contains(err.Error(), "connection reset") || if strings.Contains(err.Error(), "use of closed network connection") ||
strings.Contains(err.Error(), "connection reset") ||
strings.Contains(err.Error(), "broken pipe") { strings.Contains(err.Error(), "broken pipe") {
return true return true
} }

View File

@ -241,5 +241,22 @@ over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
Setting a custom HTTPClient in the aws.Config options will override this setting. Setting a custom HTTPClient in the aws.Config options will override this setting.
To use this option and custom HTTP client, the HTTP client needs to be provided To use this option and custom HTTP client, the HTTP client needs to be provided
when creating the session. Not the service client. when creating the session. Not the service client.
The endpoint of the EC2 IMDS client can be configured via the environment
variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
Session. See Options.EC2IMDSEndpoint for more details.
AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
If using an URL with an IPv6 address literal, the IPv6 address
component must be enclosed in square brackets.
AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
The custom EC2 IMDS endpoint can also be specified via the Session options.
sess, err := session.NewSessionWithOptions(session.Options{
EC2IMDSEndpoint: "http://[::1]",
})
*/ */
package session package session

View File

@ -148,6 +148,11 @@ type envConfig struct {
// //
// AWS_S3_USE_ARN_REGION=true // AWS_S3_USE_ARN_REGION=true
S3UseARNRegion bool S3UseARNRegion bool
// Specifies the alternative endpoint to use for EC2 IMDS.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
EC2IMDSEndpoint string
} }
var ( var (
@ -211,6 +216,9 @@ var (
s3UseARNRegionEnvKey = []string{ s3UseARNRegionEnvKey = []string{
"AWS_S3_USE_ARN_REGION", "AWS_S3_USE_ARN_REGION",
} }
ec2IMDSEndpointEnvKey = []string{
"AWS_EC2_METADATA_SERVICE_ENDPOINT",
}
) )
// loadEnvConfig retrieves the SDK's environment configuration. // loadEnvConfig retrieves the SDK's environment configuration.
@ -332,6 +340,8 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
} }
} }
setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey)
return cfg, nil return cfg, nil
} }

View File

@ -48,6 +48,8 @@ var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credenti
type Session struct { type Session struct {
Config *aws.Config Config *aws.Config
Handlers request.Handlers Handlers request.Handlers
options Options
} }
// New creates a new instance of the handlers merging in the provided configs // New creates a new instance of the handlers merging in the provided configs
@ -99,7 +101,7 @@ func New(cfgs ...*aws.Config) *Session {
return s return s
} }
s := deprecatedNewSession(cfgs...) s := deprecatedNewSession(envCfg, cfgs...)
if envErr != nil { if envErr != nil {
msg := "failed to load env config" msg := "failed to load env config"
s.logDeprecatedNewSessionError(msg, envErr, cfgs) s.logDeprecatedNewSessionError(msg, envErr, cfgs)
@ -243,6 +245,23 @@ type Options struct {
// function to initialize this value before changing the handlers to be // function to initialize this value before changing the handlers to be
// used by the SDK. // used by the SDK.
Handlers request.Handlers Handlers request.Handlers
// Allows specifying a custom endpoint to be used by the EC2 IMDS client
// when making requests to the EC2 IMDS API. The must endpoint value must
// include protocol prefix.
//
// If unset, will the EC2 IMDS client will use its default endpoint.
//
// Can also be specified via the environment variable,
// AWS_EC2_METADATA_SERVICE_ENDPOINT.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
//
// If using an URL with an IPv6 address literal, the IPv6 address
// component must be enclosed in square brackets.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
EC2IMDSEndpoint string
} }
// NewSessionWithOptions returns a new Session created from SDK defaults, config files, // NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@ -329,7 +348,25 @@ func Must(sess *Session, err error) *Session {
return sess return sess
} }
func deprecatedNewSession(cfgs ...*aws.Config) *Session { // Wraps the endpoint resolver with a resolver that will return a custom
// endpoint for EC2 IMDS.
func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver {
return endpoints.ResolverFunc(
func(service, region string, opts ...func(*endpoints.Options)) (
endpoints.ResolvedEndpoint, error,
) {
if service == ec2MetadataServiceID {
return endpoints.ResolvedEndpoint{
URL: endpoint,
SigningName: ec2MetadataServiceID,
SigningRegion: region,
}, nil
}
return resolver.EndpointFor(service, region)
})
}
func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session {
cfg := defaults.Config() cfg := defaults.Config()
handlers := defaults.Handlers() handlers := defaults.Handlers()
@ -341,6 +378,11 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
// endpoints for service client configurations. // endpoints for service client configurations.
cfg.EndpointResolver = endpoints.DefaultResolver() cfg.EndpointResolver = endpoints.DefaultResolver()
} }
if len(envCfg.EC2IMDSEndpoint) != 0 {
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint)
}
cfg.Credentials = defaults.CredChain(cfg, handlers) cfg.Credentials = defaults.CredChain(cfg, handlers)
// Reapply any passed in configs to override credentials if set // Reapply any passed in configs to override credentials if set
@ -349,6 +391,9 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
s := &Session{ s := &Session{
Config: cfg, Config: cfg,
Handlers: handlers, Handlers: handlers,
options: Options{
EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint,
},
} }
initHandlers(s) initHandlers(s)
@ -418,6 +463,7 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
s := &Session{ s := &Session{
Config: cfg, Config: cfg,
Handlers: handlers, Handlers: handlers,
options: opts,
} }
initHandlers(s) initHandlers(s)
@ -570,6 +616,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
endpoints.LegacyS3UsEast1Endpoint, endpoints.LegacyS3UsEast1Endpoint,
}) })
ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint
if len(ec2IMDSEndpoint) == 0 {
ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint
}
if len(ec2IMDSEndpoint) != 0 {
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint)
}
// Configure credentials if not already set by the user when creating the // Configure credentials if not already set by the user when creating the
// Session. // Session.
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
@ -627,6 +681,7 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{ newSession := &Session{
Config: s.Config.Copy(cfgs...), Config: s.Config.Copy(cfgs...),
Handlers: s.Handlers.Copy(), Handlers: s.Handlers.Copy(),
options: s.options,
} }
initHandlers(newSession) initHandlers(newSession)
@ -665,6 +720,8 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi
} }
} }
const ec2MetadataServiceID = "ec2metadata"
func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.32.5" const SDKVersion = "1.34.28"

View File

@ -69,10 +69,23 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) {
case ErrorMessageType: case ErrorMessageType:
return nil, r.unmarshalErrorMessage(msg) return nil, r.unmarshalErrorMessage(msg)
default: default:
return nil, fmt.Errorf("unknown eventstream message type, %v", typ) return nil, &UnknownMessageTypeError{
Type: typ, Message: msg.Clone(),
}
} }
} }
// UnknownMessageTypeError provides an error when a message is received from
// the stream, but the reader is unable to determine what kind of message it is.
type UnknownMessageTypeError struct {
Type string
Message eventstream.Message
}
func (e *UnknownMessageTypeError) Error() string {
return "unknown eventstream message type, " + e.Type
}
func (r *EventReader) unmarshalEventMessage( func (r *EventReader) unmarshalEventMessage(
msg eventstream.Message, msg eventstream.Message,
) (event interface{}, err error) { ) (event interface{}, err error) {

View File

@ -52,6 +52,15 @@ func (hs *Headers) Del(name string) {
} }
} }
// Clone returns a deep copy of the headers
func (hs Headers) Clone() Headers {
o := make(Headers, 0, len(hs))
for _, h := range hs {
o.Set(h.Name, h.Value)
}
return o
}
func decodeHeaders(r io.Reader) (Headers, error) { func decodeHeaders(r io.Reader) (Headers, error) {
hs := Headers{} hs := Headers{}

View File

@ -57,6 +57,20 @@ func (m *Message) rawMessage() (rawMessage, error) {
return raw, nil return raw, nil
} }
// Clone returns a deep copy of the message.
func (m Message) Clone() Message {
var payload []byte
if m.Payload != nil {
payload = make([]byte, len(m.Payload))
copy(payload, m.Payload)
}
return Message{
Headers: m.Headers.Clone(),
Payload: payload,
}
}
type messagePrelude struct { type messagePrelude struct {
Length uint32 Length uint32
HeadersLen uint32 HeadersLen uint32

View File

@ -6,6 +6,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"math/big"
"reflect" "reflect"
"strings" "strings"
"time" "time"
@ -15,6 +16,8 @@ import (
"github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol"
) )
var millisecondsFloat = new(big.Float).SetInt64(1e3)
// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in // UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
// type. The value to unmarshal the json document into must be a pointer to the // type. The value to unmarshal the json document into must be a pointer to the
// type. // type.
@ -39,7 +42,9 @@ func UnmarshalJSONError(v interface{}, stream io.Reader) error {
func UnmarshalJSON(v interface{}, stream io.Reader) error { func UnmarshalJSON(v interface{}, stream io.Reader) error {
var out interface{} var out interface{}
err := json.NewDecoder(stream).Decode(&out) decoder := json.NewDecoder(stream)
decoder.UseNumber()
err := decoder.Decode(&out)
if err == io.EOF { if err == io.EOF {
return nil return nil
} else if err != nil { } else if err != nil {
@ -54,7 +59,9 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error {
func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error {
var out interface{} var out interface{}
err := json.NewDecoder(stream).Decode(&out) decoder := json.NewDecoder(stream)
decoder.UseNumber()
err := decoder.Decode(&out)
if err == io.EOF { if err == io.EOF {
return nil return nil
} else if err != nil { } else if err != nil {
@ -254,16 +261,31 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag
default: default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
} }
case float64: case json.Number:
switch value.Interface().(type) { switch value.Interface().(type) {
case *int64: case *int64:
di := int64(d) // Retain the old behavior where we would just truncate the float64
// calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt
f, err := d.Float64()
if err != nil {
return err
}
di := int64(f)
value.Set(reflect.ValueOf(&di)) value.Set(reflect.ValueOf(&di))
case *float64: case *float64:
value.Set(reflect.ValueOf(&d)) f, err := d.Float64()
if err != nil {
return err
}
value.Set(reflect.ValueOf(&f))
case *time.Time: case *time.Time:
// Time unmarshaled from a float64 can only be epoch seconds float, ok := new(big.Float).SetString(d.String())
t := time.Unix(int64(d), 0).UTC() if !ok {
return fmt.Errorf("unsupported float time representation: %v", d.String())
}
float = float.Mul(float, millisecondsFloat)
ms, _ := float.Int64()
t := time.Unix(0, ms*1e6).UTC()
value.Set(reflect.ValueOf(&t)) value.Set(reflect.ValueOf(&t))
default: default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())

View File

@ -27,8 +27,8 @@ const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
// This format is used for output time without seconds precision // This format is used for output time with fractional second precision up to milliseconds
ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z"
) )
// IsKnownTimestampFormat returns if the timestamp format name // IsKnownTimestampFormat returns if the timestamp format name
@ -48,7 +48,7 @@ func IsKnownTimestampFormat(name string) bool {
// FormatTime returns a string value of the time. // FormatTime returns a string value of the time.
func FormatTime(name string, t time.Time) string { func FormatTime(name string, t time.Time) string {
t = t.UTC() t = t.UTC().Truncate(time.Millisecond)
switch name { switch name {
case RFC822TimeFormatName: case RFC822TimeFormatName:

View File

@ -179,7 +179,7 @@ func cookieURLScheme(u string) (string, error) {
// //
// // Or get Signed cookies for a resource that will expire in 1 hour // // Or get Signed cookies for a resource that will expire in 1 hour
// // and set path and domain of cookies // // and set path and domain of cookies
// cookies, err := s.Sign(policy, func(o *sign.CookieOptions) { // cookies, err := s.SignWithPolicy(policy, func(o *sign.CookieOptions) {
// o.Path = "/" // o.Path = "/"
// o.Domain = ".example.com" // o.Domain = ".example.com"
// }) // })

View File

@ -369,7 +369,7 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *
// repository and ensure that you are performing operations on the correct registry. // repository and ensure that you are performing operations on the correct registry.
// //
// * UploadNotFoundException // * UploadNotFoundException
// The upload could not be found, or the specified upload id is not valid for // The upload could not be found, or the specified upload ID is not valid for
// this repository. // this repository.
// //
// * InvalidLayerException // * InvalidLayerException
@ -385,6 +385,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *
// * EmptyUploadException // * EmptyUploadException
// The specified layer upload does not contain any layer parts. // The specified layer upload does not contain any layer parts.
// //
// * KmsException
// The operation failed due to a KMS exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload
func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) {
req, out := c.CompleteLayerUploadRequest(input) req, out := c.CompleteLayerUploadRequest(input)
@ -486,6 +489,9 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques
// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
// in the Amazon Elastic Container Registry User Guide. // in the Amazon Elastic Container Registry User Guide.
// //
// * KmsException
// The operation failed due to a KMS exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository
func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) {
req, out := c.CreateRepositoryRequest(input) req, out := c.CreateRepositoryRequest(input)
@ -669,6 +675,9 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques
// The specified repository contains images. To delete a repository that contains // The specified repository contains images. To delete a repository that contains
// images, you must force the deletion with the force parameter. // images, you must force the deletion with the force parameter.
// //
// * KmsException
// The operation failed due to a KMS exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository
func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) {
req, out := c.DeleteRepositoryRequest(input) req, out := c.DeleteRepositoryRequest(input)
@ -1830,6 +1839,9 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *
// The specified repository could not be found. Check the spelling of the specified // The specified repository could not be found. Check the spelling of the specified
// repository and ensure that you are performing operations on the correct registry. // repository and ensure that you are performing operations on the correct registry.
// //
// * KmsException
// The operation failed due to a KMS exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload
func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) {
req, out := c.InitiateLayerUploadRequest(input) req, out := c.InitiateLayerUploadRequest(input)
@ -2184,6 +2196,13 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu
// The specified image is tagged with a tag that already exists. The repository // The specified image is tagged with a tag that already exists. The repository
// is configured for tag immutability. // is configured for tag immutability.
// //
// * ImageDigestDoesNotMatchException
// The specified image digest does not match the digest that Amazon ECR calculated
// for the image.
//
// * KmsException
// The operation failed due to a KMS exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage
func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) {
req, out := c.PutImageRequest(input) req, out := c.PutImageRequest(input)
@ -2731,8 +2750,8 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev
// The lifecycle policy could not be found, and no policy is set to the repository. // The lifecycle policy could not be found, and no policy is set to the repository.
// //
// * LifecyclePolicyPreviewInProgressException // * LifecyclePolicyPreviewInProgressException
// The previous lifecycle policy preview request has not completed. Please try // The previous lifecycle policy preview request has not completed. Wait and
// again later. // try again.
// //
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview
func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) {
@ -3029,7 +3048,7 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.
// repository and ensure that you are performing operations on the correct registry. // repository and ensure that you are performing operations on the correct registry.
// //
// * UploadNotFoundException // * UploadNotFoundException
// The upload could not be found, or the specified upload id is not valid for // The upload could not be found, or the specified upload ID is not valid for
// this repository. // this repository.
// //
// * LimitExceededException // * LimitExceededException
@ -3037,6 +3056,9 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.
// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html)
// in the Amazon Elastic Container Registry User Guide. // in the Amazon Elastic Container Registry User Guide.
// //
// * KmsException
// The operation failed due to a KMS exception.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart
func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) {
req, out := c.UploadLayerPartRequest(input) req, out := c.UploadLayerPartRequest(input)
@ -3617,9 +3639,12 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu
type CreateRepositoryInput struct { type CreateRepositoryInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
// The image scanning configuration for the repository. This setting determines // The encryption configuration for the repository. This determines how the
// whether images are scanned for known vulnerabilities after being pushed to // contents of your repository are encrypted at rest.
// the repository. EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"`
// The image scanning configuration for the repository. This determines whether
// images are scanned for known vulnerabilities after being pushed to the repository.
ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"`
// The tag mutability setting for the repository. If this parameter is omitted, // The tag mutability setting for the repository. If this parameter is omitted,
@ -3661,6 +3686,11 @@ func (s *CreateRepositoryInput) Validate() error {
if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 {
invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2))
} }
if s.EncryptionConfiguration != nil {
if err := s.EncryptionConfiguration.Validate(); err != nil {
invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams))
}
}
if invalidParams.Len() > 0 { if invalidParams.Len() > 0 {
return invalidParams return invalidParams
@ -3668,6 +3698,12 @@ func (s *CreateRepositoryInput) Validate() error {
return nil return nil
} }
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *CreateRepositoryInput) SetEncryptionConfiguration(v *EncryptionConfiguration) *CreateRepositoryInput {
s.EncryptionConfiguration = v
return s
}
// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. // SetImageScanningConfiguration sets the ImageScanningConfiguration field's value.
func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput {
s.ImageScanningConfiguration = v s.ImageScanningConfiguration = v
@ -4518,6 +4554,87 @@ func (s *EmptyUploadException) RequestID() string {
return s.RespMetadata.RequestID return s.RespMetadata.RequestID
} }
// The encryption configuration for the repository. This determines how the
// contents of your repository are encrypted at rest.
//
// By default, when no encryption configuration is set or the AES256 encryption
// type is used, Amazon ECR uses server-side encryption with Amazon S3-managed
// encryption keys which encrypts your data at rest using an AES-256 encryption
// algorithm. This does not require any action on your part.
//
// For more control over the encryption of the contents of your repository,
// you can use server-side encryption with customer master keys (CMKs) stored
// in AWS Key Management Service (AWS KMS) to encrypt your images. For more
// information, see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html)
// in the Amazon Elastic Container Registry User Guide.
type EncryptionConfiguration struct {
_ struct{} `type:"structure"`
// The encryption type to use.
//
// If you use the KMS encryption type, the contents of the repository will be
// encrypted using server-side encryption with customer master keys (CMKs) stored
// in AWS KMS. When you use AWS KMS to encrypt your data, you can either use
// the default AWS managed CMK for Amazon ECR, or specify your own CMK, which
// you already created. For more information, see Protecting Data Using Server-Side
// Encryption with CMKs Stored in AWS Key Management Service (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html)
// in the Amazon Simple Storage Service Console Developer Guide..
//
// If you use the AES256 encryption type, Amazon ECR uses server-side encryption
// with Amazon S3-managed encryption keys which encrypts the images in the repository
// using an AES-256 encryption algorithm. For more information, see Protecting
// Data Using Server-Side Encryption with Amazon S3-Managed Encryption Keys
// (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html)
// in the Amazon Simple Storage Service Console Developer Guide..
//
// EncryptionType is a required field
EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"`
// If you use the KMS encryption type, specify the CMK to use for encryption.
// The alias, key ID, or full ARN of the CMK can be specified. The key must
// exist in the same Region as the repository. If no key is specified, the default
// AWS managed CMK for Amazon ECR will be used.
KmsKey *string `locationName:"kmsKey" min:"1" type:"string"`
}
// String returns the string representation
func (s EncryptionConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EncryptionConfiguration) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *EncryptionConfiguration) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"}
if s.EncryptionType == nil {
invalidParams.Add(request.NewErrParamRequired("EncryptionType"))
}
if s.KmsKey != nil && len(*s.KmsKey) < 1 {
invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetEncryptionType sets the EncryptionType field's value.
func (s *EncryptionConfiguration) SetEncryptionType(v string) *EncryptionConfiguration {
s.EncryptionType = &v
return s
}
// SetKmsKey sets the KmsKey field's value.
func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration {
s.KmsKey = &v
return s
}
type GetAuthorizationTokenInput struct { type GetAuthorizationTokenInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
@ -5076,7 +5193,7 @@ type Image struct {
// The image manifest associated with the image. // The image manifest associated with the image.
ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"`
// The media type associated with the image manifest. // The manifest media type of the image.
ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"`
// The AWS account ID associated with the registry containing the image. // The AWS account ID associated with the registry containing the image.
@ -5188,9 +5305,15 @@ func (s *ImageAlreadyExistsException) RequestID() string {
type ImageDetail struct { type ImageDetail struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
// The artifact media type of the image.
ArtifactMediaType *string `locationName:"artifactMediaType" type:"string"`
// The sha256 digest of the image manifest. // The sha256 digest of the image manifest.
ImageDigest *string `locationName:"imageDigest" type:"string"` ImageDigest *string `locationName:"imageDigest" type:"string"`
// The media type of the image manifest.
ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"`
// The date and time, expressed in standard JavaScript date format, at which // The date and time, expressed in standard JavaScript date format, at which
// the current image was pushed to the repository. // the current image was pushed to the repository.
ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"`
@ -5232,12 +5355,24 @@ func (s ImageDetail) GoString() string {
return s.String() return s.String()
} }
// SetArtifactMediaType sets the ArtifactMediaType field's value.
func (s *ImageDetail) SetArtifactMediaType(v string) *ImageDetail {
s.ArtifactMediaType = &v
return s
}
// SetImageDigest sets the ImageDigest field's value. // SetImageDigest sets the ImageDigest field's value.
func (s *ImageDetail) SetImageDigest(v string) *ImageDetail { func (s *ImageDetail) SetImageDigest(v string) *ImageDetail {
s.ImageDigest = &v s.ImageDigest = &v
return s return s
} }
// SetImageManifestMediaType sets the ImageManifestMediaType field's value.
func (s *ImageDetail) SetImageManifestMediaType(v string) *ImageDetail {
s.ImageManifestMediaType = &v
return s
}
// SetImagePushedAt sets the ImagePushedAt field's value. // SetImagePushedAt sets the ImagePushedAt field's value.
func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail {
s.ImagePushedAt = &v s.ImagePushedAt = &v
@ -5280,6 +5415,63 @@ func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail {
return s return s
} }
// The specified image digest does not match the digest that Amazon ECR calculated
// for the image.
type ImageDigestDoesNotMatchException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s ImageDigestDoesNotMatchException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ImageDigestDoesNotMatchException) GoString() string {
return s.String()
}
func newErrorImageDigestDoesNotMatchException(v protocol.ResponseMetadata) error {
return &ImageDigestDoesNotMatchException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *ImageDigestDoesNotMatchException) Code() string {
return "ImageDigestDoesNotMatchException"
}
// Message returns the exception's message.
func (s *ImageDigestDoesNotMatchException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *ImageDigestDoesNotMatchException) OrigErr() error {
return nil
}
func (s *ImageDigestDoesNotMatchException) Error() string {
return fmt.Sprintf("%s: %s", s.Code(), s.Message())
}
// Status code returns the HTTP status code for the request's response error.
func (s *ImageDigestDoesNotMatchException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *ImageDigestDoesNotMatchException) RequestID() string {
return s.RespMetadata.RequestID
}
// An object representing an Amazon ECR image failure. // An object representing an Amazon ECR image failure.
type ImageFailure struct { type ImageFailure struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
@ -6025,6 +6217,65 @@ func (s *InvalidTagParameterException) RequestID() string {
return s.RespMetadata.RequestID return s.RespMetadata.RequestID
} }
// The operation failed due to a KMS exception.
type KmsException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
// The error code returned by AWS KMS.
KmsError *string `locationName:"kmsError" type:"string"`
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
func (s KmsException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s KmsException) GoString() string {
return s.String()
}
func newErrorKmsException(v protocol.ResponseMetadata) error {
return &KmsException{
RespMetadata: v,
}
}
// Code returns the exception type name.
func (s *KmsException) Code() string {
return "KmsException"
}
// Message returns the exception's message.
func (s *KmsException) Message() string {
if s.Message_ != nil {
return *s.Message_
}
return ""
}
// OrigErr always returns nil, satisfies awserr.Error interface.
func (s *KmsException) OrigErr() error {
return nil
}
func (s *KmsException) Error() string {
return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
}
// Status code returns the HTTP status code for the request's response error.
func (s *KmsException) StatusCode() int {
return s.RespMetadata.StatusCode
}
// RequestID returns the service's response RequestID for request.
func (s *KmsException) RequestID() string {
return s.RespMetadata.RequestID
}
// An object representing an Amazon ECR image layer. // An object representing an Amazon ECR image layer.
type Layer struct { type Layer struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
@ -6429,8 +6680,8 @@ func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPr
return s return s
} }
// The previous lifecycle policy preview request has not completed. Please try // The previous lifecycle policy preview request has not completed. Wait and
// again later. // try again.
type LifecyclePolicyPreviewInProgressException struct { type LifecyclePolicyPreviewInProgressException struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@ -6929,6 +7180,9 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput
type PutImageInput struct { type PutImageInput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
// The image digest of the image manifest corresponding to the image.
ImageDigest *string `locationName:"imageDigest" type:"string"`
// The image manifest corresponding to the image to be uploaded. // The image manifest corresponding to the image to be uploaded.
// //
// ImageManifest is a required field // ImageManifest is a required field
@ -6940,7 +7194,8 @@ type PutImageInput struct {
ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"`
// The tag to associate with the image. This parameter is required for images // The tag to associate with the image. This parameter is required for images
// that use the Docker Image Manifest V2 Schema 2 or OCI formats. // that use the Docker Image Manifest V2 Schema 2 or Open Container Initiative
// (OCI) formats.
ImageTag *string `locationName:"imageTag" min:"1" type:"string"` ImageTag *string `locationName:"imageTag" min:"1" type:"string"`
// The AWS account ID associated with the registry that contains the repository // The AWS account ID associated with the registry that contains the repository
@ -6989,6 +7244,12 @@ func (s *PutImageInput) Validate() error {
return nil return nil
} }
// SetImageDigest sets the ImageDigest field's value.
func (s *PutImageInput) SetImageDigest(v string) *PutImageInput {
s.ImageDigest = &v
return s
}
// SetImageManifest sets the ImageManifest field's value. // SetImageManifest sets the ImageManifest field's value.
func (s *PutImageInput) SetImageManifest(v string) *PutImageInput { func (s *PutImageInput) SetImageManifest(v string) *PutImageInput {
s.ImageManifest = &v s.ImageManifest = &v
@ -7433,6 +7694,10 @@ type Repository struct {
// The date and time, in JavaScript date format, when the repository was created. // The date and time, in JavaScript date format, when the repository was created.
CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"`
// The encryption configuration for the repository. This determines how the
// contents of your repository are encrypted at rest.
EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"`
// The image scanning configuration for a repository. // The image scanning configuration for a repository.
ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"`
@ -7451,8 +7716,8 @@ type Repository struct {
// The name of the repository. // The name of the repository.
RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"`
// The URI for the repository. You can use this URI for Docker push or pull // The URI for the repository. You can use this URI for container image push
// operations. // and pull operations.
RepositoryUri *string `locationName:"repositoryUri" type:"string"` RepositoryUri *string `locationName:"repositoryUri" type:"string"`
} }
@ -7472,6 +7737,12 @@ func (s *Repository) SetCreatedAt(v time.Time) *Repository {
return s return s
} }
// SetEncryptionConfiguration sets the EncryptionConfiguration field's value.
func (s *Repository) SetEncryptionConfiguration(v *EncryptionConfiguration) *Repository {
s.EncryptionConfiguration = v
return s
}
// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. // SetImageScanningConfiguration sets the ImageScanningConfiguration field's value.
func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository {
s.ImageScanningConfiguration = v s.ImageScanningConfiguration = v
@ -8656,7 +8927,7 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput {
return s return s
} }
// The upload could not be found, or the specified upload id is not valid for // The upload could not be found, or the specified upload ID is not valid for
// this repository. // this repository.
type UploadNotFoundException struct { type UploadNotFoundException struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
@ -8714,6 +8985,22 @@ func (s *UploadNotFoundException) RequestID() string {
return s.RespMetadata.RequestID return s.RespMetadata.RequestID
} }
const (
// EncryptionTypeAes256 is a EncryptionType enum value
EncryptionTypeAes256 = "AES256"
// EncryptionTypeKms is a EncryptionType enum value
EncryptionTypeKms = "KMS"
)
// EncryptionType_Values returns all elements of the EncryptionType enum
func EncryptionType_Values() []string {
return []string{
EncryptionTypeAes256,
EncryptionTypeKms,
}
}
const ( const (
// FindingSeverityInformational is a FindingSeverity enum value // FindingSeverityInformational is a FindingSeverity enum value
FindingSeverityInformational = "INFORMATIONAL" FindingSeverityInformational = "INFORMATIONAL"
@ -8734,11 +9021,30 @@ const (
FindingSeverityUndefined = "UNDEFINED" FindingSeverityUndefined = "UNDEFINED"
) )
// FindingSeverity_Values returns all elements of the FindingSeverity enum
func FindingSeverity_Values() []string {
return []string{
FindingSeverityInformational,
FindingSeverityLow,
FindingSeverityMedium,
FindingSeverityHigh,
FindingSeverityCritical,
FindingSeverityUndefined,
}
}
const ( const (
// ImageActionTypeExpire is a ImageActionType enum value // ImageActionTypeExpire is a ImageActionType enum value
ImageActionTypeExpire = "EXPIRE" ImageActionTypeExpire = "EXPIRE"
) )
// ImageActionType_Values returns all elements of the ImageActionType enum
func ImageActionType_Values() []string {
return []string{
ImageActionTypeExpire,
}
}
const ( const (
// ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value
ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" ImageFailureCodeInvalidImageDigest = "InvalidImageDigest"
@ -8757,8 +9063,24 @@ const (
// ImageFailureCodeImageReferencedByManifestList is a ImageFailureCode enum value // ImageFailureCodeImageReferencedByManifestList is a ImageFailureCode enum value
ImageFailureCodeImageReferencedByManifestList = "ImageReferencedByManifestList" ImageFailureCodeImageReferencedByManifestList = "ImageReferencedByManifestList"
// ImageFailureCodeKmsError is a ImageFailureCode enum value
ImageFailureCodeKmsError = "KmsError"
) )
// ImageFailureCode_Values returns all elements of the ImageFailureCode enum
func ImageFailureCode_Values() []string {
return []string{
ImageFailureCodeInvalidImageDigest,
ImageFailureCodeInvalidImageTag,
ImageFailureCodeImageTagDoesNotMatchDigest,
ImageFailureCodeImageNotFound,
ImageFailureCodeMissingDigestAndTag,
ImageFailureCodeImageReferencedByManifestList,
ImageFailureCodeKmsError,
}
}
const ( const (
// ImageTagMutabilityMutable is a ImageTagMutability enum value // ImageTagMutabilityMutable is a ImageTagMutability enum value
ImageTagMutabilityMutable = "MUTABLE" ImageTagMutabilityMutable = "MUTABLE"
@ -8767,6 +9089,14 @@ const (
ImageTagMutabilityImmutable = "IMMUTABLE" ImageTagMutabilityImmutable = "IMMUTABLE"
) )
// ImageTagMutability_Values returns all elements of the ImageTagMutability enum
func ImageTagMutability_Values() []string {
return []string{
ImageTagMutabilityMutable,
ImageTagMutabilityImmutable,
}
}
const ( const (
// LayerAvailabilityAvailable is a LayerAvailability enum value // LayerAvailabilityAvailable is a LayerAvailability enum value
LayerAvailabilityAvailable = "AVAILABLE" LayerAvailabilityAvailable = "AVAILABLE"
@ -8775,6 +9105,14 @@ const (
LayerAvailabilityUnavailable = "UNAVAILABLE" LayerAvailabilityUnavailable = "UNAVAILABLE"
) )
// LayerAvailability_Values returns all elements of the LayerAvailability enum
func LayerAvailability_Values() []string {
return []string{
LayerAvailabilityAvailable,
LayerAvailabilityUnavailable,
}
}
const ( const (
// LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value // LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value
LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest"
@ -8783,6 +9121,14 @@ const (
LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" LayerFailureCodeMissingLayerDigest = "MissingLayerDigest"
) )
// LayerFailureCode_Values returns all elements of the LayerFailureCode enum
func LayerFailureCode_Values() []string {
return []string{
LayerFailureCodeInvalidLayerDigest,
LayerFailureCodeMissingLayerDigest,
}
}
const ( const (
// LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value
LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS"
@ -8797,6 +9143,16 @@ const (
LifecyclePolicyPreviewStatusFailed = "FAILED" LifecyclePolicyPreviewStatusFailed = "FAILED"
) )
// LifecyclePolicyPreviewStatus_Values returns all elements of the LifecyclePolicyPreviewStatus enum
func LifecyclePolicyPreviewStatus_Values() []string {
return []string{
LifecyclePolicyPreviewStatusInProgress,
LifecyclePolicyPreviewStatusComplete,
LifecyclePolicyPreviewStatusExpired,
LifecyclePolicyPreviewStatusFailed,
}
}
const ( const (
// ScanStatusInProgress is a ScanStatus enum value // ScanStatusInProgress is a ScanStatus enum value
ScanStatusInProgress = "IN_PROGRESS" ScanStatusInProgress = "IN_PROGRESS"
@ -8808,6 +9164,15 @@ const (
ScanStatusFailed = "FAILED" ScanStatusFailed = "FAILED"
) )
// ScanStatus_Values returns all elements of the ScanStatus enum
func ScanStatus_Values() []string {
return []string{
ScanStatusInProgress,
ScanStatusComplete,
ScanStatusFailed,
}
}
const ( const (
// TagStatusTagged is a TagStatus enum value // TagStatusTagged is a TagStatus enum value
TagStatusTagged = "TAGGED" TagStatusTagged = "TAGGED"
@ -8818,3 +9183,12 @@ const (
// TagStatusAny is a TagStatus enum value // TagStatusAny is a TagStatus enum value
TagStatusAny = "ANY" TagStatusAny = "ANY"
) )
// TagStatus_Values returns all elements of the TagStatus enum
func TagStatus_Values() []string {
return []string{
TagStatusTagged,
TagStatusUntagged,
TagStatusAny,
}
}

View File

@ -3,12 +3,13 @@
// Package ecr provides the client and types for making API // Package ecr provides the client and types for making API
// requests to Amazon EC2 Container Registry. // requests to Amazon EC2 Container Registry.
// //
// Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry // Amazon Elastic Container Registry (Amazon ECR) is a managed container image
// service. Customers can use the familiar Docker CLI to push, pull, and manage // registry service. Customers can use the familiar Docker CLI, or their preferred
// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon // client, to push, pull, and manage images. Amazon ECR provides a secure, scalable,
// ECR supports private Docker repositories with resource-based permissions // and reliable registry for your Docker or Open Container Initiative (OCI)
// images. Amazon ECR supports private repositories with resource-based permissions
// using IAM so that specific users or Amazon EC2 instances can access repositories // using IAM so that specific users or Amazon EC2 instances can access repositories
// and images. Developers can use the Docker CLI to author and manage images. // and images.
// //
// See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service.
// //

View File

@ -21,6 +21,13 @@ const (
// the manifest or image tag after the last push. // the manifest or image tag after the last push.
ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException"
// ErrCodeImageDigestDoesNotMatchException for service response error code
// "ImageDigestDoesNotMatchException".
//
// The specified image digest does not match the digest that Amazon ECR calculated
// for the image.
ErrCodeImageDigestDoesNotMatchException = "ImageDigestDoesNotMatchException"
// ErrCodeImageNotFoundException for service response error code // ErrCodeImageNotFoundException for service response error code
// "ImageNotFoundException". // "ImageNotFoundException".
// //
@ -63,6 +70,12 @@ const (
// characters. // characters.
ErrCodeInvalidTagParameterException = "InvalidTagParameterException" ErrCodeInvalidTagParameterException = "InvalidTagParameterException"
// ErrCodeKmsException for service response error code
// "KmsException".
//
// The operation failed due to a KMS exception.
ErrCodeKmsException = "KmsException"
// ErrCodeLayerAlreadyExistsException for service response error code // ErrCodeLayerAlreadyExistsException for service response error code
// "LayerAlreadyExistsException". // "LayerAlreadyExistsException".
// //
@ -98,8 +111,8 @@ const (
// ErrCodeLifecyclePolicyPreviewInProgressException for service response error code // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code
// "LifecyclePolicyPreviewInProgressException". // "LifecyclePolicyPreviewInProgressException".
// //
// The previous lifecycle policy preview request has not completed. Please try // The previous lifecycle policy preview request has not completed. Wait and
// again later. // try again.
ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException"
// ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code
@ -178,7 +191,7 @@ const (
// ErrCodeUploadNotFoundException for service response error code // ErrCodeUploadNotFoundException for service response error code
// "UploadNotFoundException". // "UploadNotFoundException".
// //
// The upload could not be found, or the specified upload id is not valid for // The upload could not be found, or the specified upload ID is not valid for
// this repository. // this repository.
ErrCodeUploadNotFoundException = "UploadNotFoundException" ErrCodeUploadNotFoundException = "UploadNotFoundException"
) )
@ -186,12 +199,14 @@ const (
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"EmptyUploadException": newErrorEmptyUploadException, "EmptyUploadException": newErrorEmptyUploadException,
"ImageAlreadyExistsException": newErrorImageAlreadyExistsException, "ImageAlreadyExistsException": newErrorImageAlreadyExistsException,
"ImageDigestDoesNotMatchException": newErrorImageDigestDoesNotMatchException,
"ImageNotFoundException": newErrorImageNotFoundException, "ImageNotFoundException": newErrorImageNotFoundException,
"ImageTagAlreadyExistsException": newErrorImageTagAlreadyExistsException, "ImageTagAlreadyExistsException": newErrorImageTagAlreadyExistsException,
"InvalidLayerException": newErrorInvalidLayerException, "InvalidLayerException": newErrorInvalidLayerException,
"InvalidLayerPartException": newErrorInvalidLayerPartException, "InvalidLayerPartException": newErrorInvalidLayerPartException,
"InvalidParameterException": newErrorInvalidParameterException, "InvalidParameterException": newErrorInvalidParameterException,
"InvalidTagParameterException": newErrorInvalidTagParameterException, "InvalidTagParameterException": newErrorInvalidTagParameterException,
"KmsException": newErrorKmsException,
"LayerAlreadyExistsException": newErrorLayerAlreadyExistsException, "LayerAlreadyExistsException": newErrorLayerAlreadyExistsException,
"LayerInaccessibleException": newErrorLayerInaccessibleException, "LayerInaccessibleException": newErrorLayerInaccessibleException,
"LayerPartTooSmallException": newErrorLayerPartTooSmallException, "LayerPartTooSmallException": newErrorLayerPartTooSmallException,

File diff suppressed because it is too large Load Diff

View File

@ -104,19 +104,6 @@
// content from S3. The Encryption and Decryption clients can be used concurrently // content from S3. The Encryption and Decryption clients can be used concurrently
// once the client is created. // once the client is created.
// //
// sess := session.Must(session.NewSession())
//
// // Create the decryption client.
// svc := s3crypto.NewDecryptionClient(sess)
//
// // The object will be downloaded from S3 and decrypted locally. By metadata
// // about the object's encryption will instruct the decryption client how
// // decrypt the content of the object. By default KMS is used for keys.
// result, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String(myBucket),
// Key: aws.String(myKey),
// })
//
// See the s3crypto package documentation for more information. // See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
// //

View File

@ -69,7 +69,7 @@ func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
return return
} }
// In backwards compatiable, the header's value is not base64 encoded, // In backwards compatible, the header's value is not base64 encoded,
// and needs to be encoded and updated by the SDK's customizations. // and needs to be encoded and updated by the SDK's customizations.
b64Key := base64.StdEncoding.EncodeToString([]byte(key)) b64Key := base64.StdEncoding.EncodeToString([]byte(key))
r.Header.Set(keyHeader, b64Key) r.Header.Set(keyHeader, b64Key)

View File

@ -502,7 +502,7 @@ func (s *store) resumeStatus(ref string, total int64, digester digest.Digester)
if ref != status.Ref { if ref != status.Ref {
// NOTE(stevvooe): This is fairly catastrophic. Either we have some // NOTE(stevvooe): This is fairly catastrophic. Either we have some
// layout corruption or a hash collision for the ref key. // layout corruption or a hash collision for the ref key.
return status, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref) return status, errors.Errorf("ref key does not match: %v != %v", ref, status.Ref)
} }
if total > 0 && status.Total > 0 && total != status.Total { if total > 0 && status.Total > 0 && total != status.Total {

View File

@ -155,6 +155,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
return nil, err return nil, err
} }
} }
defer resp.Body.Close()
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusOK, http.StatusAccepted, http.StatusNoContent: case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
@ -338,6 +339,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
if resp == nil { if resp == nil {
return errors.New("no response") return errors.New("no response")
} }
defer resp.Body.Close()
// 201 is specified return status, some registries return // 201 is specified return status, some registries return
// 200, 202 or 204. // 200, 202 or 204.

View File

@ -295,12 +295,14 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
if lastErr == nil { if lastErr == nil {
lastErr = err lastErr = err
} }
log.G(ctx).WithError(err).Info("trying next host")
continue // try another host continue // try another host
} }
resp.Body.Close() // don't care about body contents. resp.Body.Close() // don't care about body contents.
if resp.StatusCode > 299 { if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
log.G(ctx).Info("trying next host - response was http.StatusNotFound")
continue continue
} }
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)

View File

@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd" Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time. // Version holds the complete version number. Filled in at linking time.
Version = "1.4.4+unknown" Version = "1.4.8+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build // Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time. // the program at linking time.

View File

@ -12,6 +12,17 @@ go:
- 1.11.x - 1.11.x
- 1.12.x - 1.12.x
- 1.13.x - 1.13.x
- 1.14.x
- 1.15.x
- tip
install: go get -v -t ./... allow_failures:
script: make test - go: tip
script: make build
matrix:
include:
- language: go
go: 1.15.x
script: make test

View File

@ -1,6 +1,8 @@
CMD = jpgo CMD = jpgo
SRC_PKGS=./ ./cmd/... ./fuzz/...
help: help:
@echo "Please use \`make <target>' where <target> is one of" @echo "Please use \`make <target>' where <target> is one of"
@echo " test to run all the tests" @echo " test to run all the tests"
@ -9,21 +11,22 @@ help:
generate: generate:
go generate ./... go generate ${SRC_PKGS}
build: build:
rm -f $(CMD) rm -f $(CMD)
go build ./... go build ${SRC_PKGS}
rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
mv cmd/$(CMD)/$(CMD) . mv cmd/$(CMD)/$(CMD) .
test: test: test-internal-testify
go test -v ./... echo "making tests ${SRC_PKGS}"
go test -v ${SRC_PKGS}
check: check:
go vet ./... go vet ${SRC_PKGS}
@echo "golint ./..." @echo "golint ${SRC_PKGS}"
@lint=`golint ./...`; \ @lint=`golint ${SRC_PKGS}`; \
lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
echo "$$lint"; \ echo "$$lint"; \
if [ "$$lint" != "" ]; then exit 1; fi if [ "$$lint" != "" ]; then exit 1; fi
@ -42,3 +45,7 @@ bench:
pprof-cpu: pprof-cpu:
go tool pprof ./go-jmespath.test ./cpu.out go tool pprof ./go-jmespath.test ./cpu.out
test-internal-testify:
cd internal/testify && go test ./...

View File

@ -2,4 +2,4 @@ module github.com/jmespath/go-jmespath
go 1.14 go 1.14
require github.com/stretchr/testify v1.5.1 require github.com/jmespath/go-jmespath/internal/testify v1.5.1

View File

@ -1,11 +1,11 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -1,2 +0,0 @@
Tianon Gravi <admwiggin@gmail.com> (@tianon)
Aleksa Sarai <cyphar@cyphar.com> (@cyphar)

View File

@ -1,110 +0,0 @@
package user
import (
"errors"
"syscall"
)
var (
// The current operating system does not provide the required data for user lookups.
ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
// No matching entries found in file.
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
ErrNoGroupEntries = errors.New("no matching entries in group file")
)
func lookupUser(filter func(u User) bool) (User, error) {
// Get operating system-specific passwd reader-closer.
passwd, err := GetPasswd()
if err != nil {
return User{}, err
}
defer passwd.Close()
// Get the users.
users, err := ParsePasswdFilter(passwd, filter)
if err != nil {
return User{}, err
}
// No user entries found.
if len(users) == 0 {
return User{}, ErrNoPasswdEntries
}
// Assume the first entry is the "correct" one.
return users[0], nil
}
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(syscall.Getuid())
}
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (User, error) {
return lookupUser(func(u User) bool {
return u.Name == username
})
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (User, error) {
return lookupUser(func(u User) bool {
return u.Uid == uid
})
}
func lookupGroup(filter func(g Group) bool) (Group, error) {
// Get operating system-specific group reader-closer.
group, err := GetGroup()
if err != nil {
return Group{}, err
}
defer group.Close()
// Get the users.
groups, err := ParseGroupFilter(group, filter)
if err != nil {
return Group{}, err
}
// No user entries found.
if len(groups) == 0 {
return Group{}, ErrNoGroupEntries
}
// Assume the first entry is the "correct" one.
return groups[0], nil
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(syscall.Getgid())
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (Group, error) {
return lookupGroup(func(g Group) bool {
return g.Name == groupname
})
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (Group, error) {
return lookupGroup(func(g Group) bool {
return g.Gid == gid
})
}

View File

@ -5,6 +5,9 @@ package user
import ( import (
"io" "io"
"os" "os"
"strconv"
"golang.org/x/sys/unix"
) )
// Unix-specific path to the passwd and group formatted files. // Unix-specific path to the passwd and group formatted files.
@ -13,6 +16,88 @@ const (
unixGroupPath = "/etc/group" unixGroupPath = "/etc/group"
) )
// LookupUser looks up a user by their username in /etc/passwd. If the user
// cannot be found (or there is no /etc/passwd file on the filesystem), then
// LookupUser returns an error.
func LookupUser(username string) (User, error) {
return lookupUserFunc(func(u User) bool {
return u.Name == username
})
}
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
// returns an error.
func LookupUid(uid int) (User, error) {
return lookupUserFunc(func(u User) bool {
return u.Uid == uid
})
}
func lookupUserFunc(filter func(u User) bool) (User, error) {
// Get operating system-specific passwd reader-closer.
passwd, err := GetPasswd()
if err != nil {
return User{}, err
}
defer passwd.Close()
// Get the users.
users, err := ParsePasswdFilter(passwd, filter)
if err != nil {
return User{}, err
}
// No user entries found.
if len(users) == 0 {
return User{}, ErrNoPasswdEntries
}
// Assume the first entry is the "correct" one.
return users[0], nil
}
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
// returns an error.
func LookupGroup(groupname string) (Group, error) {
return lookupGroupFunc(func(g Group) bool {
return g.Name == groupname
})
}
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
// be found (or there is no /etc/group file on the filesystem), then LookupGid
// returns an error.
func LookupGid(gid int) (Group, error) {
return lookupGroupFunc(func(g Group) bool {
return g.Gid == gid
})
}
func lookupGroupFunc(filter func(g Group) bool) (Group, error) {
// Get operating system-specific group reader-closer.
group, err := GetGroup()
if err != nil {
return Group{}, err
}
defer group.Close()
// Get the users.
groups, err := ParseGroupFilter(group, filter)
if err != nil {
return Group{}, err
}
// No user entries found.
if len(groups) == 0 {
return Group{}, ErrNoGroupEntries
}
// Assume the first entry is the "correct" one.
return groups[0], nil
}
func GetPasswdPath() (string, error) { func GetPasswdPath() (string, error) {
return unixPasswdPath, nil return unixPasswdPath, nil
} }
@ -28,3 +113,44 @@ func GetGroupPath() (string, error) {
func GetGroup() (io.ReadCloser, error) { func GetGroup() (io.ReadCloser, error) {
return os.Open(unixGroupPath) return os.Open(unixGroupPath)
} }
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
// user cannot be found (or there is no /etc/passwd file on the filesystem),
// then CurrentUser returns an error.
func CurrentUser() (User, error) {
return LookupUid(unix.Getuid())
}
// CurrentGroup looks up the current user's group by their primary group id's
// entry in /etc/passwd. If the group cannot be found (or there is no
// /etc/group file on the filesystem), then CurrentGroup returns an error.
func CurrentGroup() (Group, error) {
return LookupGid(unix.Getgid())
}
func currentUserSubIDs(fileName string) ([]SubID, error) {
u, err := CurrentUser()
if err != nil {
return nil, err
}
filter := func(entry SubID) bool {
return entry.Name == u.Name || entry.Name == strconv.Itoa(u.Uid)
}
return ParseSubIDFileFilter(fileName, filter)
}
func CurrentUserSubUIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subuid")
}
func CurrentUserSubGIDs() ([]SubID, error) {
return currentUserSubIDs("/etc/subgid")
}
func CurrentProcessUIDMap() ([]IDMap, error) {
return ParseIDMapFile("/proc/self/uid_map")
}
func CurrentProcessGIDMap() ([]IDMap, error) {
return ParseIDMapFile("/proc/self/gid_map")
}

View File

@ -1,21 +0,0 @@
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
package user
import "io"
func GetPasswdPath() (string, error) {
return "", ErrUnsupported
}
func GetPasswd() (io.ReadCloser, error) {
return nil, ErrUnsupported
}
func GetGroupPath() (string, error) {
return "", ErrUnsupported
}
func GetGroup() (io.ReadCloser, error) {
return nil, ErrUnsupported
}

View File

@ -2,6 +2,7 @@ package user
import ( import (
"bufio" "bufio"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -15,6 +16,13 @@ const (
) )
var ( var (
// The current operating system does not provide the required data for user lookups.
ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
// No matching entries found in file.
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
ErrNoGroupEntries = errors.New("no matching entries in group file")
ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId) ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId)
) )
@ -35,12 +43,29 @@ type Group struct {
List []string List []string
} }
// SubID represents an entry in /etc/sub{u,g}id
type SubID struct {
Name string
SubID int64
Count int64
}
// IDMap represents an entry in /proc/PID/{u,g}id_map
type IDMap struct {
ID int64
ParentID int64
Count int64
}
func parseLine(line string, v ...interface{}) { func parseLine(line string, v ...interface{}) {
if line == "" { parseParts(strings.Split(line, ":"), v...)
}
func parseParts(parts []string, v ...interface{}) {
if len(parts) == 0 {
return return
} }
parts := strings.Split(line, ":")
for i, p := range parts { for i, p := range parts {
// Ignore cases where we don't have enough fields to populate the arguments. // Ignore cases where we don't have enough fields to populate the arguments.
// Some configuration files like to misbehave. // Some configuration files like to misbehave.
@ -56,6 +81,8 @@ func parseLine(line string, v ...interface{}) {
case *int: case *int:
// "numbers", with conversion errors ignored because of some misbehaving configuration files. // "numbers", with conversion errors ignored because of some misbehaving configuration files.
*e, _ = strconv.Atoi(p) *e, _ = strconv.Atoi(p)
case *int64:
*e, _ = strconv.ParseInt(p, 10, 64)
case *[]string: case *[]string:
// Comma-separated lists. // Comma-separated lists.
if p != "" { if p != "" {
@ -65,7 +92,7 @@ func parseLine(line string, v ...interface{}) {
} }
default: default:
// Someone goof'd when writing code using this function. Scream so they can hear us. // Someone goof'd when writing code using this function. Scream so they can hear us.
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e)) panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
} }
} }
} }
@ -103,10 +130,6 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
) )
for s.Scan() { for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
line := strings.TrimSpace(s.Text()) line := strings.TrimSpace(s.Text())
if line == "" { if line == "" {
continue continue
@ -124,6 +147,9 @@ func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
out = append(out, p) out = append(out, p)
} }
} }
if err := s.Err(); err != nil {
return nil, err
}
return out, nil return out, nil
} }
@ -162,10 +188,6 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
) )
for s.Scan() { for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
text := s.Text() text := s.Text()
if text == "" { if text == "" {
continue continue
@ -183,6 +205,9 @@ func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
out = append(out, p) out = append(out, p)
} }
} }
if err := s.Err(); err != nil {
return nil, err
}
return out, nil return out, nil
} }
@ -199,18 +224,16 @@ type ExecUser struct {
// files cannot be opened for any reason, the error is ignored and a nil // files cannot be opened for any reason, the error is ignored and a nil
// io.Reader is passed instead. // io.Reader is passed instead.
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
passwd, err := os.Open(passwdPath) var passwd, group io.Reader
if err != nil {
passwd = nil if passwdFile, err := os.Open(passwdPath); err == nil {
} else { passwd = passwdFile
defer passwd.Close() defer passwdFile.Close()
} }
group, err := os.Open(groupPath) if groupFile, err := os.Open(groupPath); err == nil {
if err != nil { group = groupFile
group = nil defer groupFile.Close()
} else {
defer group.Close()
} }
return GetExecUser(userSpec, defaults, passwd, group) return GetExecUser(userSpec, defaults, passwd, group)
@ -343,7 +366,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (
if len(groups) > 0 { if len(groups) > 0 {
// First match wins, even if there's more than one matching entry. // First match wins, even if there's more than one matching entry.
user.Gid = groups[0].Gid user.Gid = groups[0].Gid
} else if groupArg != "" { } else {
// If we can't find a group with the given name, the only other valid // If we can't find a group with the given name, the only other valid
// option is if it's a numeric group name with no associated entry in group. // option is if it's a numeric group name with no associated entry in group.
@ -411,7 +434,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
// we asked for a group but didn't find it. let's check to see // we asked for a group but didn't find it. let's check to see
// if we wanted a numeric group // if we wanted a numeric group
if !found { if !found {
gid, err := strconv.Atoi(ag) gid, err := strconv.ParseInt(ag, 10, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("Unable to find group %s", ag) return nil, fmt.Errorf("Unable to find group %s", ag)
} }
@ -419,7 +442,7 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
if gid < minId || gid > maxId { if gid < minId || gid > maxId {
return nil, ErrRange return nil, ErrRange
} }
gidMap[gid] = struct{}{} gidMap[int(gid)] = struct{}{}
} }
} }
gids := []int{} gids := []int{}
@ -433,9 +456,117 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err
// that opens the groupPath given and gives it as an argument to // that opens the groupPath given and gives it as an argument to
// GetAdditionalGroups. // GetAdditionalGroups.
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
group, err := os.Open(groupPath) var group io.Reader
if err == nil {
defer group.Close() if groupFile, err := os.Open(groupPath); err == nil {
group = groupFile
defer groupFile.Close()
} }
return GetAdditionalGroups(additionalGroups, group) return GetAdditionalGroups(additionalGroups, group)
} }
func ParseSubIDFile(path string) ([]SubID, error) {
subid, err := os.Open(path)
if err != nil {
return nil, err
}
defer subid.Close()
return ParseSubID(subid)
}
func ParseSubID(subid io.Reader) ([]SubID, error) {
return ParseSubIDFilter(subid, nil)
}
func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
subid, err := os.Open(path)
if err != nil {
return nil, err
}
defer subid.Close()
return ParseSubIDFilter(subid, filter)
}
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
if r == nil {
return nil, fmt.Errorf("nil source for subid-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []SubID{}
)
for s.Scan() {
line := strings.TrimSpace(s.Text())
if line == "" {
continue
}
// see: man 5 subuid
p := SubID{}
parseLine(line, &p.Name, &p.SubID, &p.Count)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}
func ParseIDMapFile(path string) ([]IDMap, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return ParseIDMap(r)
}
func ParseIDMap(r io.Reader) ([]IDMap, error) {
return ParseIDMapFilter(r, nil)
}
func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
r, err := os.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
return ParseIDMapFilter(r, filter)
}
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
if r == nil {
return nil, fmt.Errorf("nil source for idmap-formatted data")
}
var (
s = bufio.NewScanner(r)
out = []IDMap{}
)
for s.Scan() {
line := strings.TrimSpace(s.Text())
if line == "" {
continue
}
// see: man 7 user_namespaces
p := IDMap{}
parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count)
if filter == nil || filter(p) {
out = append(out, p)
}
}
if err := s.Err(); err != nil {
return nil, err
}
return out, nil
}

View File

@ -0,0 +1,42 @@
// +build gofuzz
package user
import (
"io"
"strings"
)
func IsDivisbleBy(n int, divisibleby int) bool {
return (n % divisibleby) == 0
}
func FuzzUser(data []byte) int {
if len(data) == 0 {
return -1
}
if !IsDivisbleBy(len(data), 5) {
return -1
}
var divided [][]byte
chunkSize := len(data) / 5
for i := 0; i < len(data); i += chunkSize {
end := i + chunkSize
divided = append(divided, data[i:end])
}
_, _ = ParsePasswdFilter(strings.NewReader(string(divided[0])), nil)
var passwd, group io.Reader
group = strings.NewReader(string(divided[1]))
_, _ = GetAdditionalGroups([]string{string(divided[2])}, group)
passwd = strings.NewReader(string(divided[3]))
_, _ = GetExecUser(string(divided[4]), nil, passwd, group)
return 1
}

View File

@ -1,22 +0,0 @@
sudo: false
language: go
go:
- 1.11.x
- 1.12.x
- tip
matrix:
allow_failures:
- go: tip
fast_finish: true
env:
- GO111MODULE=on
script:
- if [ -n "$(go fmt ./...)" ]; then exit 1; fi
- go test github.com/pelletier/go-toml -race -coverprofile=coverage.txt -covermode=atomic
- go test github.com/pelletier/go-toml/cmd/tomljson
- go test github.com/pelletier/go-toml/cmd/tomll
- go test github.com/pelletier/go-toml/query
- ./benchmark.sh $TRAVIS_BRANCH https://github.com/$TRAVIS_REPO_SLUG.git
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -8,3 +8,4 @@ RUN go install ./...
FROM scratch FROM scratch
COPY --from=builder /go/bin/tomll /usr/bin/tomll COPY --from=builder /go/bin/tomll /usr/bin/tomll
COPY --from=builder /go/bin/tomljson /usr/bin/tomljson COPY --from=builder /go/bin/tomljson /usr/bin/tomljson
COPY --from=builder /go/bin/jsontoml /usr/bin/jsontoml

29
src/vendor/github.com/pelletier/go-toml/Makefile generated vendored Normal file
View File

@ -0,0 +1,29 @@
export CGO_ENABLED=0
go := go
go.goos ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f1)
go.goarch ?= $(shell echo `go version`|cut -f4 -d ' '|cut -d '/' -f2)
out.tools := tomll tomljson jsontoml
out.dist := $(out.tools:=_$(go.goos)_$(go.goarch).tar.xz)
sources := $(wildcard **/*.go)
.PHONY:
tools: $(out.tools)
$(out.tools): $(sources)
GOOS=$(go.goos) GOARCH=$(go.goarch) $(go) build ./cmd/$@
.PHONY:
dist: $(out.dist)
$(out.dist):%_$(go.goos)_$(go.goarch).tar.xz: %
if [ "$(go.goos)" = "windows" ]; then \
tar -cJf $@ $^.exe; \
else \
tar -cJf $@ $^; \
fi
.PHONY:
clean:
rm -rf $(out.tools) $(out.dist)

View File

@ -3,12 +3,11 @@
Go library for the [TOML](https://github.com/mojombo/toml) format. Go library for the [TOML](https://github.com/mojombo/toml) format.
This library supports TOML version This library supports TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) [v0.5.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md)
[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml) [![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE) [![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml) [![Build Status](https://dev.azure.com/pelletierthomas/go-toml-ci/_apis/build/status/pelletier.go-toml?branchName=master)](https://dev.azure.com/pelletierthomas/go-toml-ci/_build/latest?definitionId=1&branchName=master)
[![Windows Build status](https://ci.appveyor.com/api/projects/status/4aepwwjori266hkt/branch/master?svg=true)](https://ci.appveyor.com/project/pelletier/go-toml/branch/master)
[![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml) [![codecov](https://codecov.io/gh/pelletier/go-toml/branch/master/graph/badge.svg)](https://codecov.io/gh/pelletier/go-toml)
[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml) [![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fpelletier%2Fgo-toml?ref=badge_shield)
@ -101,6 +100,13 @@ Go-toml provides two handy command line tools:
tomljson --help tomljson --help
``` ```
* `jsontoml`: Reads a JSON file and outputs a TOML representation.
```
go install github.com/pelletier/go-toml/cmd/jsontoml
jsontoml --help
```
### Docker image ### Docker image
Those tools are also availble as a Docker image from Those tools are also availble as a Docker image from

View File

@ -1,34 +0,0 @@
version: "{build}"
# Source Config
clone_folder: c:\gopath\src\github.com\pelletier\go-toml
# Build host
environment:
GOPATH: c:\gopath
DEPTESTBYPASS501: 1
GOVERSION: 1.12
GO111MODULE: on
init:
- git config --global core.autocrlf input
# Build
install:
# Install the specific Go version.
- rmdir c:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
- msiexec /i go%GOVERSION%.windows-amd64.msi /q
- choco install bzr
- set Path=c:\go\bin;c:\gopath\bin;C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\%Path%
- go version
- go env
build: false
deploy: false
test_script:
- go test github.com/pelletier/go-toml
- go test github.com/pelletier/go-toml/cmd/tomljson
- go test github.com/pelletier/go-toml/cmd/tomll
- go test github.com/pelletier/go-toml/query

View File

@ -0,0 +1,230 @@
trigger:
- master
stages:
- stage: fuzzit
displayName: "Run Fuzzit"
dependsOn: []
condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
jobs:
- job: submit
displayName: "Submit"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.14"
inputs:
version: "1.14"
- script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
- script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml
- script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml
- task: Bash@3
inputs:
filePath: './fuzzit.sh'
env:
TYPE: fuzzing
FUZZIT_API_KEY: $(FUZZIT_API_KEY)
- stage: run_checks
displayName: "Check"
dependsOn: []
jobs:
- job: fmt
displayName: "fmt"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.14"
inputs:
version: "1.14"
- task: Go@0
displayName: "go fmt ./..."
inputs:
command: 'custom'
customCommand: 'fmt'
arguments: './...'
- job: coverage
displayName: "coverage"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.14"
inputs:
version: "1.14"
- task: Go@0
displayName: "Generate coverage"
inputs:
command: 'test'
arguments: "-race -coverprofile=coverage.txt -covermode=atomic"
- task: Bash@3
inputs:
targetType: 'inline'
script: 'bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN}'
env:
CODECOV_TOKEN: $(CODECOV_TOKEN)
- job: benchmark
displayName: "benchmark"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.14"
inputs:
version: "1.14"
- script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
- task: Bash@3
inputs:
filePath: './benchmark.sh'
arguments: "master $(Build.Repository.Uri)"
- job: fuzzing
displayName: "fuzzing"
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go 1.14"
inputs:
version: "1.14"
- script: echo "##vso[task.setvariable variable=PATH]${PATH}:/home/vsts/go/bin/"
- script: mkdir -p ${HOME}/go/src/github.com/pelletier/go-toml
- script: cp -R . ${HOME}/go/src/github.com/pelletier/go-toml
- task: Bash@3
inputs:
filePath: './fuzzit.sh'
env:
TYPE: local-regression
- job: go_unit_tests
displayName: "unit tests"
strategy:
matrix:
linux 1.14:
goVersion: '1.14'
imageName: 'ubuntu-latest'
mac 1.14:
goVersion: '1.14'
imageName: 'macOS-latest'
windows 1.14:
goVersion: '1.14'
imageName: 'windows-latest'
linux 1.13:
goVersion: '1.13'
imageName: 'ubuntu-latest'
mac 1.13:
goVersion: '1.13'
imageName: 'macOS-latest'
windows 1.13:
goVersion: '1.13'
imageName: 'windows-latest'
pool:
vmImage: $(imageName)
steps:
- task: GoTool@0
displayName: "Install Go $(goVersion)"
inputs:
version: $(goVersion)
- task: Go@0
displayName: "go test ./..."
inputs:
command: 'test'
arguments: './...'
- stage: build_binaries
displayName: "Build binaries"
dependsOn: run_checks
jobs:
- job: build_binary
displayName: "Build binary"
strategy:
matrix:
linux_amd64:
GOOS: linux
GOARCH: amd64
darwin_amd64:
GOOS: darwin
GOARCH: amd64
windows_amd64:
GOOS: windows
GOARCH: amd64
pool:
vmImage: ubuntu-latest
steps:
- task: GoTool@0
displayName: "Install Go"
inputs:
version: 1.14
- task: Bash@3
inputs:
targetType: inline
script: "make dist"
env:
go.goos: $(GOOS)
go.goarch: $(GOARCH)
- task: CopyFiles@2
inputs:
sourceFolder: '$(Build.SourcesDirectory)'
contents: '*.tar.xz'
TargetFolder: '$(Build.ArtifactStagingDirectory)'
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
artifactName: binaries
- stage: build_binaries_manifest
displayName: "Build binaries manifest"
dependsOn: build_binaries
jobs:
- job: build_manifest
displayName: "Build binaries manifest"
steps:
- task: DownloadBuildArtifacts@0
inputs:
buildType: 'current'
downloadType: 'single'
artifactName: 'binaries'
downloadPath: '$(Build.SourcesDirectory)'
- task: Bash@3
inputs:
targetType: inline
script: "cd binaries && sha256sum --binary *.tar.xz | tee $(Build.ArtifactStagingDirectory)/sha256sums.txt"
- task: PublishBuildArtifacts@1
inputs:
pathtoPublish: '$(Build.ArtifactStagingDirectory)'
artifactName: manifest
- stage: build_docker_image
displayName: "Build Docker image"
dependsOn: run_checks
jobs:
- job: build
displayName: "Build"
pool:
vmImage: ubuntu-latest
steps:
- task: Docker@2
inputs:
command: 'build'
Dockerfile: 'Dockerfile'
buildContext: '.'
addPipelineData: false
- stage: publish_docker_image
displayName: "Publish Docker image"
dependsOn: build_docker_image
condition: and(succeeded(), eq(variables['Build.SourceBranchName'], 'master'))
jobs:
- job: publish
displayName: "Publish"
pool:
vmImage: ubuntu-latest
steps:
- task: Docker@2
inputs:
containerRegistry: 'DockerHub'
repository: 'pelletier/go-toml'
command: 'buildAndPush'
Dockerfile: 'Dockerfile'
buildContext: '.'
tags: 'latest'

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
set -e set -ex
reference_ref=${1:-master} reference_ref=${1:-master}
reference_git=${2:-.} reference_git=${2:-.}
@ -8,7 +8,6 @@ reference_git=${2:-.}
if ! `hash benchstat 2>/dev/null`; then if ! `hash benchstat 2>/dev/null`; then
echo "Installing benchstat" echo "Installing benchstat"
go get golang.org/x/perf/cmd/benchstat go get golang.org/x/perf/cmd/benchstat
go install golang.org/x/perf/cmd/benchstat
fi fi
tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX` tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`

View File

@ -1,7 +1,7 @@
// Package toml is a TOML parser and manipulation library. // Package toml is a TOML parser and manipulation library.
// //
// This version supports the specification as described in // This version supports the specification as described in
// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md // https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.5.0.md
// //
// Marshaling // Marshaling
// //

26
src/vendor/github.com/pelletier/go-toml/fuzzit.sh generated vendored Normal file
View File

@ -0,0 +1,26 @@
#!/bin/bash
set -xe
# go-fuzz doesn't support modules yet, so ensure we do everything
# in the old style GOPATH way
export GO111MODULE="off"
# install go-fuzz
go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
# target name can only contain lower-case letters (a-z), digits (0-9) and a dash (-)
# to add another target, make sure to create it with `fuzzit create target`
# before using `fuzzit create job`
TARGET=toml-fuzzer
go-fuzz-build -libfuzzer -o ${TARGET}.a github.com/pelletier/go-toml
clang -fsanitize=fuzzer ${TARGET}.a -o ${TARGET}
# install fuzzit for talking to fuzzit.dev service
# or latest version:
# https://github.com/fuzzitdev/fuzzit/releases/latest/download/fuzzit_Linux_x86_64
wget -q -O fuzzit https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64
chmod a+x fuzzit
# TODO: change kkowalczyk to go-toml and create toml-fuzzer target there
./fuzzit create job --type $TYPE go-toml/${TARGET} ${TARGET}

View File

@ -5,5 +5,5 @@ go 1.12
require ( require (
github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml v0.3.1
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 v2.2.8
) )

View File

@ -5,3 +5,13 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -223,9 +223,12 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
} }
possibleDate := l.peekString(35) possibleDate := l.peekString(35)
dateMatch := dateRegexp.FindString(possibleDate) dateSubmatches := dateRegexp.FindStringSubmatch(possibleDate)
if dateMatch != "" { if dateSubmatches != nil && dateSubmatches[0] != "" {
l.fastForward(len(dateMatch)) l.fastForward(len(dateSubmatches[0]))
if dateSubmatches[2] == "" { // no timezone information => local date
return l.lexLocalDate
}
return l.lexDate return l.lexDate
} }
@ -247,7 +250,7 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn { func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
l.next() l.next()
l.emit(tokenLeftCurlyBrace) l.emit(tokenLeftCurlyBrace)
return l.lexRvalue return l.lexVoid
} }
func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn { func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
@ -261,6 +264,11 @@ func (l *tomlLexer) lexDate() tomlLexStateFn {
return l.lexRvalue return l.lexRvalue
} }
func (l *tomlLexer) lexLocalDate() tomlLexStateFn {
l.emit(tokenLocalDate)
return l.lexRvalue
}
func (l *tomlLexer) lexTrue() tomlLexStateFn { func (l *tomlLexer) lexTrue() tomlLexStateFn {
l.fastForward(4) l.fastForward(4)
l.emit(tokenTrue) l.emit(tokenTrue)
@ -733,7 +741,27 @@ func (l *tomlLexer) run() {
} }
func init() { func init() {
dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`) // Regexp for all date/time formats supported by TOML.
// Group 1: nano precision
// Group 2: timezone
//
// /!\ also matches the empty string
//
// Example matches:
//1979-05-27T07:32:00Z
//1979-05-27T00:32:00-07:00
//1979-05-27T00:32:00.999999-07:00
//1979-05-27 07:32:00Z
//1979-05-27 00:32:00-07:00
//1979-05-27 00:32:00.999999-07:00
//1979-05-27T07:32:00
//1979-05-27T00:32:00.999999
//1979-05-27 07:32:00
//1979-05-27 00:32:00.999999
//1979-05-27
//07:32:00
//00:32:00.999999
dateRegexp = regexp.MustCompile(`^(?:\d{1,4}-\d{2}-\d{2})?(?:[T ]?\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})?)?`)
} }
// Entry point // Entry point

281
src/vendor/github.com/pelletier/go-toml/localtime.go generated vendored Normal file
View File

@ -0,0 +1,281 @@
// Implementation of TOML's local date/time.
// Copied over from https://github.com/googleapis/google-cloud-go/blob/master/civil/civil.go
// to avoid pulling all the Google dependencies.
//
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package civil implements types for civil time, a time-zone-independent
// representation of time that follows the rules of the proleptic
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
// minutes.
//
// Because they lack location information, these types do not represent unique
// moments or intervals of time. Use time.Time for that purpose.
package toml
import (
"fmt"
"time"
)
// A LocalDate represents a date (year, month, day).
//
// This type does not include location information, and therefore does not
// describe a unique 24-hour timespan.
type LocalDate struct {
Year int // Year (e.g., 2014).
Month time.Month // Month of the year (January = 1, ...).
Day int // Day of the month, starting at 1.
}
// LocalDateOf returns the LocalDate in which a time occurs in that time's location.
func LocalDateOf(t time.Time) LocalDate {
var d LocalDate
d.Year, d.Month, d.Day = t.Date()
return d
}
// ParseLocalDate parses a string in RFC3339 full-date format and returns the date value it represents.
func ParseLocalDate(s string) (LocalDate, error) {
t, err := time.Parse("2006-01-02", s)
if err != nil {
return LocalDate{}, err
}
return LocalDateOf(t), nil
}
// String returns the date in RFC3339 full-date format.
func (d LocalDate) String() string {
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
}
// IsValid reports whether the date is valid.
func (d LocalDate) IsValid() bool {
return LocalDateOf(d.In(time.UTC)) == d
}
// In returns the time corresponding to time 00:00:00 of the date in the location.
//
// In is always consistent with time.LocalDate, even when time.LocalDate returns a time
// on a different day. For example, if loc is America/Indiana/Vincennes, then both
// time.LocalDate(1955, time.May, 1, 0, 0, 0, 0, loc)
// and
// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}.In(loc)
// return 23:00:00 on April 30, 1955.
//
// In panics if loc is nil.
func (d LocalDate) In(loc *time.Location) time.Time {
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
}
// AddDays returns the date that is n days in the future.
// n can also be negative to go into the past.
func (d LocalDate) AddDays(n int) LocalDate {
return LocalDateOf(d.In(time.UTC).AddDate(0, 0, n))
}
// DaysSince returns the signed number of days between the date and s, not including the end day.
// This is the inverse operation to AddDays.
func (d LocalDate) DaysSince(s LocalDate) (days int) {
// We convert to Unix time so we do not have to worry about leap seconds:
// Unix time increases by exactly 86400 seconds per day.
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
return int(deltaUnix / 86400)
}
// Before reports whether d1 occurs before d2.
func (d1 LocalDate) Before(d2 LocalDate) bool {
if d1.Year != d2.Year {
return d1.Year < d2.Year
}
if d1.Month != d2.Month {
return d1.Month < d2.Month
}
return d1.Day < d2.Day
}
// After reports whether d1 occurs after d2.
func (d1 LocalDate) After(d2 LocalDate) bool {
return d2.Before(d1)
}
// MarshalText implements the encoding.TextMarshaler interface.
// The output is the result of d.String().
func (d LocalDate) MarshalText() ([]byte, error) {
return []byte(d.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The date is expected to be a string in a format accepted by ParseLocalDate.
func (d *LocalDate) UnmarshalText(data []byte) error {
var err error
*d, err = ParseLocalDate(string(data))
return err
}
// A LocalTime represents a time with nanosecond precision.
//
// This type does not include location information, and therefore does not
// describe a unique moment in time.
//
// This type exists to represent the TIME type in storage-based APIs like BigQuery.
// Most operations on Times are unlikely to be meaningful. Prefer the LocalDateTime type.
type LocalTime struct {
Hour int // The hour of the day in 24-hour format; range [0-23]
Minute int // The minute of the hour; range [0-59]
Second int // The second of the minute; range [0-59]
Nanosecond int // The nanosecond of the second; range [0-999999999]
}
// LocalTimeOf returns the LocalTime representing the time of day in which a time occurs
// in that time's location. It ignores the date.
func LocalTimeOf(t time.Time) LocalTime {
var tm LocalTime
tm.Hour, tm.Minute, tm.Second = t.Clock()
tm.Nanosecond = t.Nanosecond()
return tm
}
// ParseLocalTime parses a string and returns the time value it represents.
// ParseLocalTime accepts an extended form of the RFC3339 partial-time format. After
// the HH:MM:SS part of the string, an optional fractional part may appear,
// consisting of a decimal point followed by one to nine decimal digits.
// (RFC3339 admits only one digit after the decimal point).
func ParseLocalTime(s string) (LocalTime, error) {
t, err := time.Parse("15:04:05.999999999", s)
if err != nil {
return LocalTime{}, err
}
return LocalTimeOf(t), nil
}
// String returns the date in the format described in ParseLocalTime. If Nanoseconds
// is zero, no fractional part will be generated. Otherwise, the result will
// end with a fractional part consisting of a decimal point and nine digits.
func (t LocalTime) String() string {
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
if t.Nanosecond == 0 {
return s
}
return s + fmt.Sprintf(".%09d", t.Nanosecond)
}
// IsValid reports whether the time is valid.
func (t LocalTime) IsValid() bool {
// Construct a non-zero time.
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
return LocalTimeOf(tm) == t
}
// MarshalText implements the encoding.TextMarshaler interface.
// The output is the result of t.String().
func (t LocalTime) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The time is expected to be a string in a format accepted by ParseLocalTime.
func (t *LocalTime) UnmarshalText(data []byte) error {
var err error
*t, err = ParseLocalTime(string(data))
return err
}
// A LocalDateTime represents a date and time.
//
// This type does not include location information, and therefore does not
// describe a unique moment in time.
type LocalDateTime struct {
Date LocalDate
Time LocalTime
}
// Note: We deliberately do not embed LocalDate into LocalDateTime, to avoid promoting AddDays and Sub.
// LocalDateTimeOf returns the LocalDateTime in which a time occurs in that time's location.
func LocalDateTimeOf(t time.Time) LocalDateTime {
return LocalDateTime{
Date: LocalDateOf(t),
Time: LocalTimeOf(t),
}
}
// ParseLocalDateTime parses a string and returns the LocalDateTime it represents.
// ParseLocalDateTime accepts a variant of the RFC3339 date-time format that omits
// the time offset but includes an optional fractional time, as described in
// ParseLocalTime. Informally, the accepted format is
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
// where the 'T' may be a lower-case 't'.
func ParseLocalDateTime(s string) (LocalDateTime, error) {
t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
if err != nil {
t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
if err != nil {
return LocalDateTime{}, err
}
}
return LocalDateTimeOf(t), nil
}
// String returns the date in the format described in ParseLocalDate.
func (dt LocalDateTime) String() string {
return dt.Date.String() + "T" + dt.Time.String()
}
// IsValid reports whether the datetime is valid.
func (dt LocalDateTime) IsValid() bool {
return dt.Date.IsValid() && dt.Time.IsValid()
}
// In returns the time corresponding to the LocalDateTime in the given location.
//
// If the time is missing or ambigous at the location, In returns the same
// result as time.LocalDate. For example, if loc is America/Indiana/Vincennes, then
// both
// time.LocalDate(1955, time.May, 1, 0, 30, 0, 0, loc)
// and
// civil.LocalDateTime{
// civil.LocalDate{Year: 1955, Month: time.May, Day: 1}},
// civil.LocalTime{Minute: 30}}.In(loc)
// return 23:30:00 on April 30, 1955.
//
// In panics if loc is nil.
func (dt LocalDateTime) In(loc *time.Location) time.Time {
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
}
// Before reports whether dt1 occurs before dt2.
func (dt1 LocalDateTime) Before(dt2 LocalDateTime) bool {
return dt1.In(time.UTC).Before(dt2.In(time.UTC))
}
// After reports whether dt1 occurs after dt2.
func (dt1 LocalDateTime) After(dt2 LocalDateTime) bool {
return dt2.Before(dt1)
}
// MarshalText implements the encoding.TextMarshaler interface.
// The output is the result of dt.String().
func (dt LocalDateTime) MarshalText() ([]byte, error) {
return []byte(dt.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// The datetime is expected to be a string in a format accepted by ParseLocalDateTime
func (dt *LocalDateTime) UnmarshalText(data []byte) error {
var err error
*dt, err = ParseLocalDateTime(string(data))
return err
}

View File

@ -68,6 +68,9 @@ const (
var timeType = reflect.TypeOf(time.Time{}) var timeType = reflect.TypeOf(time.Time{})
var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
var localDateType = reflect.TypeOf(LocalDate{})
var localTimeType = reflect.TypeOf(LocalTime{})
var localDateTimeType = reflect.TypeOf(LocalDateTime{})
// Check if the given marshal type maps to a Tree primitive // Check if the given marshal type maps to a Tree primitive
func isPrimitive(mtype reflect.Type) bool { func isPrimitive(mtype reflect.Type) bool {
@ -85,29 +88,31 @@ func isPrimitive(mtype reflect.Type) bool {
case reflect.String: case reflect.String:
return true return true
case reflect.Struct: case reflect.Struct:
return mtype == timeType || isCustomMarshaler(mtype) return mtype == timeType || mtype == localDateType || mtype == localDateTimeType || mtype == localTimeType || isCustomMarshaler(mtype)
default: default:
return false return false
} }
} }
// Check if the given marshal type maps to a Tree slice // Check if the given marshal type maps to a Tree slice or array
func isTreeSlice(mtype reflect.Type) bool { func isTreeSequence(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Slice:
return !isOtherSlice(mtype)
default:
return false
}
}
// Check if the given marshal type maps to a non-Tree slice
func isOtherSlice(mtype reflect.Type) bool {
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Ptr: case reflect.Ptr:
return isOtherSlice(mtype.Elem()) return isTreeSequence(mtype.Elem())
case reflect.Slice: case reflect.Slice, reflect.Array:
return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem()) return isTree(mtype.Elem())
default:
return false
}
}
// Check if the given marshal type maps to a non-Tree slice or array
func isOtherSequence(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
return isOtherSequence(mtype.Elem())
case reflect.Slice, reflect.Array:
return !isTreeSequence(mtype)
default: default:
return false return false
} }
@ -116,6 +121,8 @@ func isOtherSlice(mtype reflect.Type) bool {
// Check if the given marshal type maps to a Tree // Check if the given marshal type maps to a Tree
func isTree(mtype reflect.Type) bool { func isTree(mtype reflect.Type) bool {
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Ptr:
return isTree(mtype.Elem())
case reflect.Map: case reflect.Map:
return true return true
case reflect.Struct: case reflect.Struct:
@ -170,7 +177,7 @@ Tree primitive types and corresponding marshal types:
float64 float32, float64, pointers to same float64 float32, float64, pointers to same
string string, pointers to same string string, pointers to same
bool bool, pointers to same bool bool, pointers to same
time.Time time.Time{}, pointers to same time.LocalTime time.LocalTime{}, pointers to same
For additional flexibility, use the Encoder API. For additional flexibility, use the Encoder API.
*/ */
@ -295,7 +302,7 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) {
} }
var buf bytes.Buffer var buf bytes.Buffer
_, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order) _, err = t.writeToOrdered(&buf, "", "", 0, e.arraysOneElementPerLine, e.order, false)
return buf.Bytes(), err return buf.Bytes(), err
} }
@ -313,20 +320,25 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er
tval := e.nextTree() tval := e.nextTree()
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Struct: case reflect.Struct:
for i := 0; i < mtype.NumField(); i++ { switch mval.Interface().(type) {
mtypef, mvalf := mtype.Field(i), mval.Field(i) case Tree:
opts := tomlOptions(mtypef, e.annotation) reflect.ValueOf(tval).Elem().Set(mval)
if opts.include && (!opts.omitempty || !isZero(mvalf)) { default:
val, err := e.valueToToml(mtypef.Type, mvalf) for i := 0; i < mtype.NumField(); i++ {
if err != nil { mtypef, mvalf := mtype.Field(i), mval.Field(i)
return nil, err opts := tomlOptions(mtypef, e.annotation)
} if opts.include && ((mtypef.Type.Kind() != reflect.Interface && !opts.omitempty) || !isZero(mvalf)) {
val, err := e.valueToToml(mtypef.Type, mvalf)
if err != nil {
return nil, err
}
tval.SetWithOptions(opts.name, SetOptions{ tval.SetWithOptions(opts.name, SetOptions{
Comment: opts.comment, Comment: opts.comment,
Commented: opts.commented, Commented: opts.commented,
Multiline: opts.multiline, Multiline: opts.multiline,
}, val) }, val)
}
} }
} }
case reflect.Map: case reflect.Map:
@ -351,12 +363,15 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er
} }
for _, key := range keys { for _, key := range keys {
mvalf := mval.MapIndex(key) mvalf := mval.MapIndex(key)
if (mtype.Elem().Kind() == reflect.Ptr || mtype.Elem().Kind() == reflect.Interface) && mvalf.IsNil() {
continue
}
val, err := e.valueToToml(mtype.Elem(), mvalf) val, err := e.valueToToml(mtype.Elem(), mvalf)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if e.quoteMapKeys { if e.quoteMapKeys {
keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) keyStr, err := tomlValueStringRepresentation(key.String(), "", "", e.arraysOneElementPerLine)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -384,6 +399,9 @@ func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*T
// Convert given marshal slice to slice of toml values // Convert given marshal slice to slice of toml values
func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
if mtype.Elem().Kind() == reflect.Interface {
return nil, fmt.Errorf("marshal can't handle []interface{}")
}
tval := make([]interface{}, mval.Len(), mval.Len()) tval := make([]interface{}, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ { for i := 0; i < mval.Len(); i++ {
val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) val, err := e.valueToToml(mtype.Elem(), mval.Index(i))
@ -401,14 +419,17 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface
if mtype.Kind() == reflect.Ptr { if mtype.Kind() == reflect.Ptr {
return e.valueToToml(mtype.Elem(), mval.Elem()) return e.valueToToml(mtype.Elem(), mval.Elem())
} }
if mtype.Kind() == reflect.Interface {
return e.valueToToml(mval.Elem().Type(), mval.Elem())
}
switch { switch {
case isCustomMarshaler(mtype): case isCustomMarshaler(mtype):
return callCustomMarshaler(mval) return callCustomMarshaler(mval)
case isTree(mtype): case isTree(mtype):
return e.valueToTree(mtype, mval) return e.valueToTree(mtype, mval)
case isTreeSlice(mtype): case isTreeSequence(mtype):
return e.valueToTreeSlice(mtype, mval) return e.valueToTreeSlice(mtype, mval)
case isOtherSlice(mtype): case isOtherSequence(mtype):
return e.valueToOtherSlice(mtype, mval) return e.valueToOtherSlice(mtype, mval)
default: default:
switch mtype.Kind() { switch mtype.Kind() {
@ -426,7 +447,7 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface
case reflect.String: case reflect.String:
return mval.String(), nil return mval.String(), nil
case reflect.Struct: case reflect.Struct:
return mval.Interface().(time.Time), nil return mval.Interface(), nil
default: default:
return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind()) return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
} }
@ -445,8 +466,11 @@ func (t *Tree) Unmarshal(v interface{}) error {
// See Marshal() documentation for types mapping table. // See Marshal() documentation for types mapping table.
func (t *Tree) Marshal() ([]byte, error) { func (t *Tree) Marshal() ([]byte, error) {
var buf bytes.Buffer var buf bytes.Buffer
err := NewEncoder(&buf).Encode(t) _, err := t.WriteTo(&buf)
return buf.Bytes(), err if err != nil {
return nil, err
}
return buf.Bytes(), nil
} }
// Unmarshal parses the TOML-encoded data and stores the result in the value // Unmarshal parses the TOML-encoded data and stores the result in the value
@ -526,7 +550,9 @@ func (d *Decoder) unmarshal(v interface{}) error {
return errors.New("only a pointer to struct or map can be unmarshaled from TOML") return errors.New("only a pointer to struct or map can be unmarshaled from TOML")
} }
sval, err := d.valueFromTree(elem, d.tval) vv := reflect.ValueOf(v).Elem()
sval, err := d.valueFromTree(elem, d.tval, &vv)
if err != nil { if err != nil {
return err return err
} }
@ -534,20 +560,32 @@ func (d *Decoder) unmarshal(v interface{}) error {
return nil return nil
} }
// Convert toml tree to marshal struct or map, using marshal type // Convert toml tree to marshal struct or map, using marshal type. When mval1
func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { // is non-nil, merge fields into the given value instead of allocating a new one.
func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree, mval1 *reflect.Value) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr { if mtype.Kind() == reflect.Ptr {
return d.unwrapPointer(mtype, tval) return d.unwrapPointer(mtype, tval, mval1)
} }
var mval reflect.Value var mval reflect.Value
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Struct: case reflect.Struct:
mval = reflect.New(mtype).Elem() if mval1 != nil {
for i := 0; i < mtype.NumField(); i++ { mval = *mval1
mtypef := mtype.Field(i) } else {
an := annotation{tag: d.tagName} mval = reflect.New(mtype).Elem()
opts := tomlOptions(mtypef, an) }
if opts.include {
switch mval.Interface().(type) {
case Tree:
mval.Set(reflect.ValueOf(tval).Elem())
default:
for i := 0; i < mtype.NumField(); i++ {
mtypef := mtype.Field(i)
an := annotation{tag: d.tagName}
opts := tomlOptions(mtypef, an)
if !opts.include {
continue
}
baseKey := opts.name baseKey := opts.name
keysToTry := []string{ keysToTry := []string{
baseKey, baseKey,
@ -557,19 +595,22 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value,
} }
found := false found := false
for _, key := range keysToTry { if tval != nil {
exists := tval.Has(key) for _, key := range keysToTry {
if !exists { exists := tval.Has(key)
continue if !exists {
continue
}
val := tval.Get(key)
fval := mval.Field(i)
mvalf, err := d.valueFromToml(mtypef.Type, val, &fval)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.Field(i).Set(mvalf)
found = true
break
} }
val := tval.Get(key)
mvalf, err := d.valueFromToml(mtypef.Type, val)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.Field(i).Set(mvalf)
found = true
break
} }
if !found && opts.defaultValue != "" { if !found && opts.defaultValue != "" {
@ -604,6 +645,19 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value,
} }
mval.Field(i).Set(reflect.ValueOf(val)) mval.Field(i).Set(reflect.ValueOf(val))
} }
// save the old behavior above and try to check structs
if !found && opts.defaultValue == "" && mtypef.Type.Kind() == reflect.Struct {
tmpTval := tval
if !mtypef.Anonymous {
tmpTval = nil
}
v, err := d.valueFromTree(mtypef.Type, tmpTval, nil)
if err != nil {
return v, err
}
mval.Field(i).Set(v)
}
} }
} }
case reflect.Map: case reflect.Map:
@ -611,7 +665,7 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value,
for _, key := range tval.Keys() { for _, key := range tval.Keys() {
// TODO: path splits key // TODO: path splits key
val := tval.GetPath([]string{key}) val := tval.GetPath([]string{key})
mvalf, err := d.valueFromToml(mtype.Elem(), val) mvalf, err := d.valueFromToml(mtype.Elem(), val, nil)
if err != nil { if err != nil {
return mval, formatError(err, tval.GetPosition(key)) return mval, formatError(err, tval.GetPosition(key))
} }
@ -625,7 +679,7 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value,
func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval)) mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ { for i := 0; i < len(tval); i++ {
val, err := d.valueFromTree(mtype.Elem(), tval[i]) val, err := d.valueFromTree(mtype.Elem(), tval[i], nil)
if err != nil { if err != nil {
return mval, err return mval, err
} }
@ -638,7 +692,7 @@ func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.
func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval)) mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ { for i := 0; i < len(tval); i++ {
val, err := d.valueFromToml(mtype.Elem(), tval[i]) val, err := d.valueFromToml(mtype.Elem(), tval[i], nil)
if err != nil { if err != nil {
return mval, err return mval, err
} }
@ -647,33 +701,88 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r
return mval, nil return mval, nil
} }
// Convert toml value to marshal value, using marshal type // Convert toml value to marshal value, using marshal type. When mval1 is non-nil
func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { // and the given type is a struct value, merge fields into it.
func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr { if mtype.Kind() == reflect.Ptr {
return d.unwrapPointer(mtype, tval) return d.unwrapPointer(mtype, tval, mval1)
} }
switch t := tval.(type) { switch t := tval.(type) {
case *Tree: case *Tree:
if isTree(mtype) { var mval11 *reflect.Value
return d.valueFromTree(mtype, t) if mtype.Kind() == reflect.Struct {
mval11 = mval1
} }
if isTree(mtype) {
return d.valueFromTree(mtype, t, mval11)
}
if mtype.Kind() == reflect.Interface {
if mval1 == nil || mval1.IsNil() {
return d.valueFromTree(reflect.TypeOf(map[string]interface{}{}), t, nil)
} else {
return d.valueFromToml(mval1.Elem().Type(), t, nil)
}
}
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
case []*Tree: case []*Tree:
if isTreeSlice(mtype) { if isTreeSequence(mtype) {
return d.valueFromTreeSlice(mtype, t) return d.valueFromTreeSlice(mtype, t)
} }
if mtype.Kind() == reflect.Interface {
if mval1 == nil || mval1.IsNil() {
return d.valueFromTreeSlice(reflect.TypeOf([]map[string]interface{}{}), t)
} else {
ival := mval1.Elem()
return d.valueFromToml(mval1.Elem().Type(), t, &ival)
}
}
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
case []interface{}: case []interface{}:
if isOtherSlice(mtype) { if isOtherSequence(mtype) {
return d.valueFromOtherSlice(mtype, t) return d.valueFromOtherSlice(mtype, t)
} }
if mtype.Kind() == reflect.Interface {
if mval1 == nil || mval1.IsNil() {
return d.valueFromOtherSlice(reflect.TypeOf([]interface{}{}), t)
} else {
ival := mval1.Elem()
return d.valueFromToml(mval1.Elem().Type(), t, &ival)
}
}
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
default: default:
switch mtype.Kind() { switch mtype.Kind() {
case reflect.Bool, reflect.Struct: case reflect.Bool, reflect.Struct:
val := reflect.ValueOf(tval) val := reflect.ValueOf(tval)
// if this passes for when mtype is reflect.Struct, tval is a time.Time
switch val.Type() {
case localDateType:
localDate := val.Interface().(LocalDate)
switch mtype {
case timeType:
return reflect.ValueOf(time.Date(localDate.Year, localDate.Month, localDate.Day, 0, 0, 0, 0, time.Local)), nil
}
case localDateTimeType:
localDateTime := val.Interface().(LocalDateTime)
switch mtype {
case timeType:
return reflect.ValueOf(time.Date(
localDateTime.Date.Year,
localDateTime.Date.Month,
localDateTime.Date.Day,
localDateTime.Time.Hour,
localDateTime.Time.Minute,
localDateTime.Time.Second,
localDateTime.Time.Nanosecond,
time.Local)), nil
}
}
// if this passes for when mtype is reflect.Struct, tval is a time.LocalTime
if !val.Type().ConvertibleTo(mtype) { if !val.Type().ConvertibleTo(mtype) {
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
} }
@ -728,14 +837,28 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.V
} }
return val.Convert(mtype), nil return val.Convert(mtype), nil
case reflect.Interface:
if mval1 == nil || mval1.IsNil() {
return reflect.ValueOf(tval), nil
} else {
ival := mval1.Elem()
return d.valueFromToml(mval1.Elem().Type(), t, &ival)
}
default: default:
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
} }
} }
} }
func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}, mval1 *reflect.Value) (reflect.Value, error) {
val, err := d.valueFromToml(mtype.Elem(), tval) var melem *reflect.Value
if mval1 != nil && !mval1.IsNil() && (mtype.Elem().Kind() == reflect.Struct || mtype.Elem().Kind() == reflect.Interface) {
elem := mval1.Elem()
melem = &elem
}
val, err := d.valueFromToml(mtype.Elem(), tval, melem)
if err != nil { if err != nil {
return reflect.ValueOf(nil), err return reflect.ValueOf(nil), err
} }

View File

@ -1,17 +0,0 @@
title = "TOML Marshal Testing"
[basic_map]
one = "one"
two = "two"
[long_map]
a7 = "1"
b3 = "2"
c8 = "3"
d4 = "4"
e6 = "5"
f5 = "6"
g10 = "7"
h1 = "8"
i2 = "9"
j9 = "10"

View File

@ -27,6 +27,7 @@ title = "TOML Marshal Testing"
uint = 5001 uint = 5001
bool = true bool = true
float = 123.4 float = 123.4
float64 = 123.456782132399
int = 5000 int = 5000
string = "Bite me" string = "Bite me"
date = 1979-05-27T07:32:00Z date = 1979-05-27T07:32:00Z

View File

@ -4,6 +4,7 @@ title = "TOML Marshal Testing"
bool = true bool = true
date = 1979-05-27T07:32:00Z date = 1979-05-27T07:32:00Z
float = 123.4 float = 123.4
float64 = 123.456782132399
int = 5000 int = 5000
string = "Bite me" string = "Bite me"
uint = 5001 uint = 5001

View File

@ -313,7 +313,41 @@ func (p *tomlParser) parseRvalue() interface{} {
} }
return val return val
case tokenDate: case tokenDate:
val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC) layout := time.RFC3339Nano
if !strings.Contains(tok.val, "T") {
layout = strings.Replace(layout, "T", " ", 1)
}
val, err := time.ParseInLocation(layout, tok.val, time.UTC)
if err != nil {
p.raiseError(tok, "%s", err)
}
return val
case tokenLocalDate:
v := strings.Replace(tok.val, " ", "T", -1)
isDateTime := false
isTime := false
for _, c := range v {
if c == 'T' || c == 't' {
isDateTime = true
break
}
if c == ':' {
isTime = true
break
}
}
var val interface{}
var err error
if isDateTime {
val, err = ParseLocalDateTime(v)
} else if isTime {
val, err = ParseLocalTime(v)
} else {
val, err = ParseLocalDate(v)
}
if err != nil { if err != nil {
p.raiseError(tok, "%s", err) p.raiseError(tok, "%s", err)
} }
@ -356,12 +390,15 @@ Loop:
} }
key := p.getToken() key := p.getToken()
p.assume(tokenEqual) p.assume(tokenEqual)
value := p.parseRvalue()
tree.Set(key.val, value) parsedKey, err := parseKey(key.val)
case tokenComma: if err != nil {
if previous == nil { p.raiseError(key, "invalid key: %s", err)
p.raiseError(follow, "inline table cannot start with a comma")
} }
value := p.parseRvalue()
tree.SetPath(parsedKey, value)
case tokenComma:
if tokenIsComma(previous) { if tokenIsComma(previous) {
p.raiseError(follow, "need field between two commas in inline table") p.raiseError(follow, "need field between two commas in inline table")
} }

View File

@ -2,7 +2,6 @@ package toml
import ( import (
"fmt" "fmt"
"strconv"
"unicode" "unicode"
) )
@ -35,6 +34,7 @@ const (
tokenDoubleLeftBracket tokenDoubleLeftBracket
tokenDoubleRightBracket tokenDoubleRightBracket
tokenDate tokenDate
tokenLocalDate
tokenKeyGroup tokenKeyGroup
tokenKeyGroupArray tokenKeyGroupArray
tokenComma tokenComma
@ -68,7 +68,8 @@ var tokenTypeNames = []string{
")", ")",
"]]", "]]",
"[[", "[[",
"Date", "LocalDate",
"LocalDate",
"KeyGroup", "KeyGroup",
"KeyGroupArray", "KeyGroupArray",
",", ",",
@ -95,14 +96,6 @@ func (tt tokenType) String() string {
return "Unknown" return "Unknown"
} }
func (t token) Int() int {
if result, err := strconv.Atoi(t.val); err != nil {
panic(err)
} else {
return result
}
}
func (t token) String() string { func (t token) String() string {
switch t.typ { switch t.typ {
case tokenEOF: case tokenEOF:

View File

@ -222,8 +222,12 @@ func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interfac
switch v := value.(type) { switch v := value.(type) {
case *Tree: case *Tree:
v.comment = opts.Comment v.comment = opts.Comment
v.commented = opts.Commented
toInsert = value toInsert = value
case []*Tree: case []*Tree:
for i := range v {
v[i].commented = opts.Commented
}
toInsert = value toInsert = value
case *tomlValue: case *tomlValue:
v.comment = opts.Comment v.comment = opts.Comment

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"io" "io"
"math" "math"
"math/big"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
@ -27,9 +28,10 @@ type sortNode struct {
// Encodes a string to a TOML-compliant multi-line string value // Encodes a string to a TOML-compliant multi-line string value
// This function is a clone of the existing encodeTomlString function, except that whitespace characters // This function is a clone of the existing encodeTomlString function, except that whitespace characters
// are preserved. Quotation marks and backslashes are also not escaped. // are preserved. Quotation marks and backslashes are also not escaped.
func encodeMultilineTomlString(value string) string { func encodeMultilineTomlString(value string, commented string) string {
var b bytes.Buffer var b bytes.Buffer
b.WriteString(commented)
for _, rr := range value { for _, rr := range value {
switch rr { switch rr {
case '\b': case '\b':
@ -37,7 +39,7 @@ func encodeMultilineTomlString(value string) string {
case '\t': case '\t':
b.WriteString("\t") b.WriteString("\t")
case '\n': case '\n':
b.WriteString("\n") b.WriteString("\n" + commented)
case '\f': case '\f':
b.WriteString(`\f`) b.WriteString(`\f`)
case '\r': case '\r':
@ -90,7 +92,7 @@ func encodeTomlString(value string) string {
return b.String() return b.String()
} }
func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { func tomlValueStringRepresentation(v interface{}, commented string, indent string, arraysOneElementPerLine bool) (string, error) {
// this interface check is added to dereference the change made in the writeTo function. // this interface check is added to dereference the change made in the writeTo function.
// That change was made to allow this function to see formatting options. // That change was made to allow this function to see formatting options.
tv, ok := v.(*tomlValue) tv, ok := v.(*tomlValue)
@ -106,20 +108,28 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
case int64: case int64:
return strconv.FormatInt(value, 10), nil return strconv.FormatInt(value, 10), nil
case float64: case float64:
// Ensure a round float does contain a decimal point. Otherwise feeding // Default bit length is full 64
// the output back to the parser would convert to an integer. bits := 64
if math.Trunc(value) == value { // Float panics if nan is used
return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil if !math.IsNaN(value) {
// if 32 bit accuracy is enough to exactly show, use 32
_, acc := big.NewFloat(value).Float32()
if acc == big.Exact {
bits = 32
}
} }
return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil if math.Trunc(value) == value {
return strings.ToLower(strconv.FormatFloat(value, 'f', 1, bits)), nil
}
return strings.ToLower(strconv.FormatFloat(value, 'f', -1, bits)), nil
case string: case string:
if tv.multiline { if tv.multiline {
return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil return "\"\"\"\n" + encodeMultilineTomlString(value, commented) + "\"\"\"", nil
} }
return "\"" + encodeTomlString(value) + "\"", nil return "\"" + encodeTomlString(value) + "\"", nil
case []byte: case []byte:
b, _ := v.([]byte) b, _ := v.([]byte)
return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) return tomlValueStringRepresentation(string(b), commented, indent, arraysOneElementPerLine)
case bool: case bool:
if value { if value {
return "true", nil return "true", nil
@ -127,6 +137,12 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
return "false", nil return "false", nil
case time.Time: case time.Time:
return value.Format(time.RFC3339), nil return value.Format(time.RFC3339), nil
case LocalDate:
return value.String(), nil
case LocalDateTime:
return value.String(), nil
case LocalTime:
return value.String(), nil
case nil: case nil:
return "", nil return "", nil
} }
@ -137,7 +153,7 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
var values []string var values []string
for i := 0; i < rv.Len(); i++ { for i := 0; i < rv.Len(); i++ {
item := rv.Index(i).Interface() item := rv.Index(i).Interface()
itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) itemRepr, err := tomlValueStringRepresentation(item, commented, indent, arraysOneElementPerLine)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -151,12 +167,12 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen
for _, value := range values { for _, value := range values {
stringBuffer.WriteString(valueIndent) stringBuffer.WriteString(valueIndent)
stringBuffer.WriteString(value) stringBuffer.WriteString(commented + value)
stringBuffer.WriteString(`,`) stringBuffer.WriteString(`,`)
stringBuffer.WriteString("\n") stringBuffer.WriteString("\n")
} }
stringBuffer.WriteString(indent + "]") stringBuffer.WriteString(indent + commented + "]")
return stringBuffer.String(), nil return stringBuffer.String(), nil
} }
@ -255,10 +271,10 @@ func sortAlphabetical(t *Tree) (vals []sortNode) {
} }
func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) {
return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical) return t.writeToOrdered(w, indent, keyspace, bytesCount, arraysOneElementPerLine, OrderAlphabetical, false)
} }
func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder) (int64, error) { func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool, ord marshalOrder, parentCommented bool) (int64, error) {
var orderedVals []sortNode var orderedVals []sortNode
switch ord { switch ord {
@ -278,10 +294,6 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i
if keyspace != "" { if keyspace != "" {
combinedKey = keyspace + "." + combinedKey combinedKey = keyspace + "." + combinedKey
} }
var commented string
if t.commented {
commented = "# "
}
switch node := v.(type) { switch node := v.(type) {
// node has to be of those two types given how keys are sorted above // node has to be of those two types given how keys are sorted above
@ -302,24 +314,33 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i
return bytesCount, errc return bytesCount, errc
} }
} }
var commented string
if parentCommented || t.commented || tv.commented {
commented = "# "
}
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
bytesCount += int64(writtenBytesCount) bytesCount += int64(writtenBytesCount)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }
bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) bytesCount, err = node.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord, parentCommented || t.commented || tv.commented)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }
case []*Tree: case []*Tree:
for _, subTree := range node { for _, subTree := range node {
var commented string
if parentCommented || t.commented || subTree.commented {
commented = "# "
}
writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
bytesCount += int64(writtenBytesCount) bytesCount += int64(writtenBytesCount)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }
bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord) bytesCount, err = subTree.writeToOrdered(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine, ord, parentCommented || t.commented || subTree.commented)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }
@ -332,7 +353,11 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i
return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
} }
repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) var commented string
if parentCommented || t.commented || v.commented {
commented = "# "
}
repr, err := tomlValueStringRepresentation(v, commented, indent, arraysOneElementPerLine)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
} }
@ -350,11 +375,8 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i
} }
} }
var commented string quotedKey := quoteKeyIfNeeded(k)
if v.commented { writtenBytesCount, err := writeStrings(w, indent, commented, quotedKey, " = ", repr, "\n")
commented = "# "
}
writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
bytesCount += int64(writtenBytesCount) bytesCount += int64(writtenBytesCount)
if err != nil { if err != nil {
return bytesCount, err return bytesCount, err
@ -365,6 +387,32 @@ func (t *Tree) writeToOrdered(w io.Writer, indent, keyspace string, bytesCount i
return bytesCount, nil return bytesCount, nil
} }
// quote a key if it does not fit the bare key format (A-Za-z0-9_-)
// quoted keys use the same rules as strings
func quoteKeyIfNeeded(k string) string {
// when encoding a map with the 'quoteMapKeys' option enabled, the tree will contain
// keys that have already been quoted.
// not an ideal situation, but good enough of a stop gap.
if len(k) >= 2 && k[0] == '"' && k[len(k)-1] == '"' {
return k
}
isBare := true
for _, r := range k {
if !isValidBareChar(r) {
isBare = false
break
}
}
if isBare {
return k
}
return quoteKey(k)
}
func quoteKey(k string) string {
return "\"" + encodeTomlString(k) + "\""
}
func writeStrings(w io.Writer, s ...string) (int, error) { func writeStrings(w io.Writer, s ...string) (int, error) {
var n int var n int
for i := range s { for i := range s {
@ -387,12 +435,11 @@ func (t *Tree) WriteTo(w io.Writer) (int64, error) {
// Output spans multiple lines, and is suitable for ingest by a TOML parser. // Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error. // If the conversion cannot be performed, ToString returns a non-nil error.
func (t *Tree) ToTomlString() (string, error) { func (t *Tree) ToTomlString() (string, error) {
var buf bytes.Buffer b, err := t.Marshal()
_, err := t.WriteTo(&buf)
if err != nil { if err != nil {
return "", err return "", err
} }
return buf.String(), nil return string(b), nil
} }
// String generates a human-readable representation of the current tree. // String generates a human-readable representation of the current tree.

View File

@ -0,0 +1,50 @@
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package bsoncodec
import (
"reflect"
"go.mongodb.org/mongo-driver/bson/bsonrw"
"go.mongodb.org/mongo-driver/x/bsonx/bsoncore"
)
// ArrayCodec is the Codec used for bsoncore.Array values.
type ArrayCodec struct{}
var defaultArrayCodec = NewArrayCodec()
// NewArrayCodec returns an ArrayCodec.
func NewArrayCodec() *ArrayCodec {
return &ArrayCodec{}
}
// EncodeValue is the ValueEncoder for bsoncore.Array values.
func (ac *ArrayCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tCoreArray {
return ValueEncoderError{Name: "CoreArrayEncodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
}
arr := val.Interface().(bsoncore.Array)
return bsonrw.Copier{}.CopyArrayFromBytes(vw, arr)
}
// DecodeValue is the ValueDecoder for bsoncore.Array values.
func (ac *ArrayCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tCoreArray {
return ValueDecoderError{Name: "CoreArrayDecodeValue", Types: []reflect.Type{tCoreArray}, Received: val}
}
if val.IsNil() {
val.Set(reflect.MakeSlice(val.Type(), 0, 0))
}
val.SetLen(0)
arr, err := bsonrw.Copier{}.AppendArrayBytes(val.Interface().(bsoncore.Array), vr)
val.Set(reflect.ValueOf(arr))
return err
}

View File

@ -15,6 +15,10 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
var (
emptyValue = reflect.Value{}
)
// Marshaler is an interface implemented by types that can marshal themselves // Marshaler is an interface implemented by types that can marshal themselves
// into a BSON document represented as bytes. The bytes returned must be a valid // into a BSON document represented as bytes. The bytes returned must be a valid
// BSON document if the error is nil. // BSON document if the error is nil.
@ -156,6 +160,55 @@ func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader,
return fn(dc, vr, val) return fn(dc, vr, val)
} }
// typeDecoder is the interface implemented by types that can handle the decoding of a value given its type.
type typeDecoder interface {
decodeType(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error)
}
// typeDecoderFunc is an adapter function that allows a function with the correct signature to be used as a typeDecoder.
type typeDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Type) (reflect.Value, error)
func (fn typeDecoderFunc) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
return fn(dc, vr, t)
}
// decodeAdapter allows two functions with the correct signatures to be used as both a ValueDecoder and typeDecoder.
type decodeAdapter struct {
ValueDecoderFunc
typeDecoderFunc
}
var _ ValueDecoder = decodeAdapter{}
var _ typeDecoder = decodeAdapter{}
// decodeTypeOrValue calls decoder.decodeType is decoder is a typeDecoder. Otherwise, it allocates a new element of type
// t and calls decoder.DecodeValue on it.
func decodeTypeOrValue(decoder ValueDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
td, _ := decoder.(typeDecoder)
return decodeTypeOrValueWithInfo(decoder, td, dc, vr, t, true)
}
func decodeTypeOrValueWithInfo(vd ValueDecoder, td typeDecoder, dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type, convert bool) (reflect.Value, error) {
if td != nil {
val, err := td.decodeType(dc, vr, t)
if err == nil && convert && val.Type() != t {
// This conversion step is necessary for slices and maps. If a user declares variables like:
//
// type myBool bool
// var m map[string]myBool
//
// and tries to decode BSON bytes into the map, the decoding will fail if this conversion is not present
// because we'll try to assign a value of type bool to one of type myBool.
val = val.Convert(t)
}
return val, err
}
val := reflect.New(t).Elem()
err := vd.DecodeValue(dc, vr, val)
return val, err
}
// CodecZeroer is the interface implemented by Codecs that can also determine if // CodecZeroer is the interface implemented by Codecs that can also determine if
// a value of the type that would be encoded is zero. // a value of the type that would be encoded is zero.
type CodecZeroer interface { type CodecZeroer interface {

View File

@ -15,14 +15,17 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
var defaultByteSliceCodec = NewByteSliceCodec()
// ByteSliceCodec is the Codec used for []byte values. // ByteSliceCodec is the Codec used for []byte values.
type ByteSliceCodec struct { type ByteSliceCodec struct {
EncodeNilAsEmpty bool EncodeNilAsEmpty bool
} }
var _ ValueCodec = &ByteSliceCodec{} var (
defaultByteSliceCodec = NewByteSliceCodec()
_ ValueCodec = defaultByteSliceCodec
_ typeDecoder = defaultByteSliceCodec
)
// NewByteSliceCodec returns a StringCodec with options opts. // NewByteSliceCodec returns a StringCodec with options opts.
func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec { func NewByteSliceCodec(opts ...*bsonoptions.ByteSliceCodecOptions) *ByteSliceCodec {
@ -45,10 +48,13 @@ func (bsc *ByteSliceCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter,
return vw.WriteBinary(val.Interface().([]byte)) return vw.WriteBinary(val.Interface().([]byte))
} }
// DecodeValue is the ValueDecoder for []byte. func (bsc *ByteSliceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if t != tByteSlice {
if !val.CanSet() || val.Type() != tByteSlice { return emptyValue, ValueDecoderError{
return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val} Name: "ByteSliceDecodeValue",
Types: []reflect.Type{tByteSlice},
Received: reflect.Zero(t),
}
} }
var data []byte var data []byte
@ -57,31 +63,49 @@ func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader,
case bsontype.String: case bsontype.String:
str, err := vr.ReadString() str, err := vr.ReadString()
if err != nil { if err != nil {
return err return emptyValue, err
} }
data = []byte(str) data = []byte(str)
case bsontype.Symbol: case bsontype.Symbol:
sym, err := vr.ReadSymbol() sym, err := vr.ReadSymbol()
if err != nil { if err != nil {
return err return emptyValue, err
} }
data = []byte(sym) data = []byte(sym)
case bsontype.Binary: case bsontype.Binary:
var subtype byte var subtype byte
data, subtype, err = vr.ReadBinary() data, subtype, err = vr.ReadBinary()
if err != nil { if err != nil {
return err return emptyValue, err
} }
if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) return emptyValue, decodeBinaryError{subtype: subtype, typeName: "[]byte"}
} }
case bsontype.Null: case bsontype.Null:
val.Set(reflect.Zero(val.Type())) err = vr.ReadNull()
return vr.ReadNull() case bsontype.Undefined:
err = vr.ReadUndefined()
default: default:
return fmt.Errorf("cannot decode %v into a []byte", vrType) return emptyValue, fmt.Errorf("cannot decode %v into a []byte", vrType)
}
if err != nil {
return emptyValue, err
} }
val.Set(reflect.ValueOf(data)) return reflect.ValueOf(data), nil
}
// DecodeValue is the ValueDecoder for []byte.
func (bsc *ByteSliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tByteSlice {
return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
}
elem, err := bsc.decodeType(dc, vr, tByteSlice)
if err != nil {
return err
}
val.Set(elem)
return nil return nil
} }

File diff suppressed because it is too large Load Diff

View File

@ -70,6 +70,7 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec). RegisterTypeEncoder(tByteSlice, defaultByteSliceCodec).
RegisterTypeEncoder(tTime, defaultTimeCodec). RegisterTypeEncoder(tTime, defaultTimeCodec).
RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec). RegisterTypeEncoder(tEmpty, defaultEmptyInterfaceCodec).
RegisterTypeEncoder(tCoreArray, defaultArrayCodec).
RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)). RegisterTypeEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)).
RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)). RegisterTypeEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)).
RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)). RegisterTypeEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)).
@ -104,7 +105,7 @@ func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
RegisterDefaultEncoder(reflect.Map, defaultMapCodec). RegisterDefaultEncoder(reflect.Map, defaultMapCodec).
RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec). RegisterDefaultEncoder(reflect.Slice, defaultSliceCodec).
RegisterDefaultEncoder(reflect.String, defaultStringCodec). RegisterDefaultEncoder(reflect.String, defaultStringCodec).
RegisterDefaultEncoder(reflect.Struct, defaultStructCodec). RegisterDefaultEncoder(reflect.Struct, newDefaultStructCodec()).
RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()). RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec()).
RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)). RegisterHookEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)).
RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)). RegisterHookEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)).
@ -150,8 +151,8 @@ func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.Value
} }
// UintEncodeValue is the ValueEncoderFunc for uint types. // UintEncodeValue is the ValueEncoderFunc for uint types.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use UIntCodec.EncodeValue instead. // Deprecated: UintEncodeValue is not registered by default. Use UintCodec.EncodeValue instead.
func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
switch val.Kind() { switch val.Kind() {
case reflect.Uint8, reflect.Uint16: case reflect.Uint8, reflect.Uint16:
@ -185,8 +186,8 @@ func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.Val
} }
// StringEncodeValue is the ValueEncoderFunc for string types. // StringEncodeValue is the ValueEncoderFunc for string types.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use StringCodec.EncodeValue instead. // Deprecated: StringEncodeValue is not registered by default. Use StringCodec.EncodeValue instead.
func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if val.Kind() != reflect.String { if val.Kind() != reflect.String {
return ValueEncoderError{ return ValueEncoderError{
@ -245,19 +246,20 @@ func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.Value
} }
// TimeEncodeValue is the ValueEncoderFunc for time.TIme. // TimeEncodeValue is the ValueEncoderFunc for time.TIme.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use TimeCodec.EncodeValue instead. // Deprecated: TimeEncodeValue is not registered by default. Use TimeCodec.EncodeValue instead.
func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tTime { if !val.IsValid() || val.Type() != tTime {
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
} }
tt := val.Interface().(time.Time) tt := val.Interface().(time.Time)
return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) dt := primitive.NewDateTimeFromTime(tt)
return vw.WriteDateTime(int64(dt))
} }
// ByteSliceEncodeValue is the ValueEncoderFunc for []byte. // ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use ByteSliceCodec.EncodeValue instead. // Deprecated: ByteSliceEncodeValue is not registered by default. Use ByteSliceCodec.EncodeValue instead.
func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tByteSlice { if !val.IsValid() || val.Type() != tByteSlice {
return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val} return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
@ -269,8 +271,8 @@ func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw
} }
// MapEncodeValue is the ValueEncoderFunc for map[string]* types. // MapEncodeValue is the ValueEncoderFunc for map[string]* types.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use MapCodec.EncodeValue instead. // Deprecated: MapEncodeValue is not registered by default. Use MapCodec.EncodeValue instead.
func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String { if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val} return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
@ -419,8 +421,8 @@ func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.Val
} }
// SliceEncodeValue is the ValueEncoderFunc for slice types. // SliceEncodeValue is the ValueEncoderFunc for slice types.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use SliceCodec.EncodeValue instead. // Deprecated: SliceEncodeValue is not registered by default. Use SliceCodec.EncodeValue instead.
func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Kind() != reflect.Slice { if !val.IsValid() || val.Kind() != reflect.Slice {
return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val} return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
@ -501,8 +503,8 @@ func (dve DefaultValueEncoders) lookupElementEncoder(ec EncodeContext, origEncod
} }
// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}. // EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}.
// This method is deprecated and does not have any stability guarantees. It may be removed in the //
// future. Use EmptyInterfaceCodec.EncodeValue instead. // Deprecated: EmptyInterfaceEncodeValue is not registered by default. Use EmptyInterfaceCodec.EncodeValue instead.
func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error { func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
if !val.IsValid() || val.Type() != tEmpty { if !val.IsValid() || val.Type() != tEmpty {
return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val} return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}

View File

@ -15,14 +15,17 @@ import (
"go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/bson/primitive"
) )
var defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec()
// EmptyInterfaceCodec is the Codec used for interface{} values. // EmptyInterfaceCodec is the Codec used for interface{} values.
type EmptyInterfaceCodec struct { type EmptyInterfaceCodec struct {
DecodeBinaryAsSlice bool DecodeBinaryAsSlice bool
} }
var _ ValueCodec = &EmptyInterfaceCodec{} var (
defaultEmptyInterfaceCodec = NewEmptyInterfaceCodec()
_ ValueCodec = defaultEmptyInterfaceCodec
_ typeDecoder = defaultEmptyInterfaceCodec
)
// NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts. // NewEmptyInterfaceCodec returns a EmptyInterfaceCodec with options opts.
func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec { func NewEmptyInterfaceCodec(opts ...*bsonoptions.EmptyInterfaceCodecOptions) *EmptyInterfaceCodec {
@ -86,33 +89,31 @@ func (eic EmptyInterfaceCodec) getEmptyInterfaceDecodeType(dc DecodeContext, val
return nil, err return nil, err
} }
// DecodeValue is the ValueDecoderFunc for interface{}. func (eic EmptyInterfaceCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if t != tEmpty {
if !val.CanSet() || val.Type() != tEmpty { return emptyValue, ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: reflect.Zero(t)}
return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
} }
rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type()) rtype, err := eic.getEmptyInterfaceDecodeType(dc, vr.Type())
if err != nil { if err != nil {
switch vr.Type() { switch vr.Type() {
case bsontype.Null: case bsontype.Null:
val.Set(reflect.Zero(val.Type())) return reflect.Zero(t), vr.ReadNull()
return vr.ReadNull()
default: default:
return err return emptyValue, err
} }
} }
decoder, err := dc.LookupDecoder(rtype) decoder, err := dc.LookupDecoder(rtype)
if err != nil { if err != nil {
return err return emptyValue, err
} }
elem := reflect.New(rtype).Elem() elem, err := decodeTypeOrValue(decoder, dc, vr, rtype)
err = decoder.DecodeValue(dc, vr, elem)
if err != nil { if err != nil {
return err return emptyValue, err
} }
if eic.DecodeBinaryAsSlice && rtype == tBinary { if eic.DecodeBinaryAsSlice && rtype == tBinary {
binElem := elem.Interface().(primitive.Binary) binElem := elem.Interface().(primitive.Binary)
if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld { if binElem.Subtype == bsontype.BinaryGeneric || binElem.Subtype == bsontype.BinaryBinaryOld {
@ -120,6 +121,20 @@ func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueRead
} }
} }
return elem, nil
}
// DecodeValue is the ValueDecoderFunc for interface{}.
func (eic EmptyInterfaceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tEmpty {
return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
}
elem, err := eic.decodeType(dc, vr, val.Type())
if err != nil {
return err
}
val.Set(elem) val.Set(elem)
return nil return nil
} }

View File

@ -20,12 +20,29 @@ var defaultMapCodec = NewMapCodec()
// MapCodec is the Codec used for map values. // MapCodec is the Codec used for map values.
type MapCodec struct { type MapCodec struct {
DecodeZerosMap bool DecodeZerosMap bool
EncodeNilAsEmpty bool EncodeNilAsEmpty bool
EncodeKeysWithStringer bool
} }
var _ ValueCodec = &MapCodec{} var _ ValueCodec = &MapCodec{}
// KeyMarshaler is the interface implemented by an object that can marshal itself into a string key.
// This applies to types used as map keys and is similar to encoding.TextMarshaler.
type KeyMarshaler interface {
MarshalKey() (key string, err error)
}
// KeyUnmarshaler is the interface implemented by an object that can unmarshal a string representation
// of itself. This applies to types used as map keys and is similar to encoding.TextUnmarshaler.
//
// UnmarshalKey must be able to decode the form generated by MarshalKey.
// UnmarshalKey must copy the text if it wishes to retain the text
// after returning.
type KeyUnmarshaler interface {
UnmarshalKey(key string) error
}
// NewMapCodec returns a MapCodec with options opts. // NewMapCodec returns a MapCodec with options opts.
func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec { func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
mapOpt := bsonoptions.MergeMapCodecOptions(opts...) mapOpt := bsonoptions.MergeMapCodecOptions(opts...)
@ -37,6 +54,9 @@ func NewMapCodec(opts ...*bsonoptions.MapCodecOptions) *MapCodec {
if mapOpt.EncodeNilAsEmpty != nil { if mapOpt.EncodeNilAsEmpty != nil {
codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty codec.EncodeNilAsEmpty = *mapOpt.EncodeNilAsEmpty
} }
if mapOpt.EncodeKeysWithStringer != nil {
codec.EncodeKeysWithStringer = *mapOpt.EncodeKeysWithStringer
}
return &codec return &codec
} }
@ -79,7 +99,11 @@ func (mc *MapCodec) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, v
keys := val.MapKeys() keys := val.MapKeys()
for _, key := range keys { for _, key := range keys {
keyStr := fmt.Sprint(key) keyStr, err := mc.encodeKey(key)
if err != nil {
return err
}
if collisionFn != nil && collisionFn(keyStr) { if collisionFn != nil && collisionFn(keyStr) {
return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key) return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
} }
@ -129,6 +153,9 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref
case bsontype.Null: case bsontype.Null:
val.Set(reflect.Zero(val.Type())) val.Set(reflect.Zero(val.Type()))
return vr.ReadNull() return vr.ReadNull()
case bsontype.Undefined:
val.Set(reflect.Zero(val.Type()))
return vr.ReadUndefined()
default: default:
return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type()) return fmt.Errorf("cannot decode %v into a %s", vrType, val.Type())
} }
@ -151,13 +178,13 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref
if err != nil { if err != nil {
return err return err
} }
eTypeDecoder, _ := decoder.(typeDecoder)
if eType == tEmpty { if eType == tEmpty {
dc.Ancestor = val.Type() dc.Ancestor = val.Type()
} }
keyType := val.Type().Key() keyType := val.Type().Key()
keyKind := keyType.Kind()
for { for {
key, vr, err := dr.ReadElement() key, vr, err := dr.ReadElement()
@ -168,31 +195,16 @@ func (mc *MapCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val ref
return err return err
} }
k := reflect.ValueOf(key) k, err := mc.decodeKey(key, keyType)
if keyType != tString {
switch keyKind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64:
parsed, err := strconv.ParseFloat(k.String(), 64)
if err != nil {
return fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyKind, err)
}
k = reflect.ValueOf(parsed)
case reflect.String: // if keyType wraps string
default:
return fmt.Errorf("BSON map must have string or decimal keys. Got:%v", val.Type())
}
k = k.Convert(keyType)
}
elem := reflect.New(eType).Elem()
err = decoder.DecodeValue(dc, vr, elem)
if err != nil { if err != nil {
return err return err
} }
elem, err := decodeTypeOrValueWithInfo(decoder, eTypeDecoder, dc, vr, eType, true)
if err != nil {
return newDecodeError(key, err)
}
val.SetMapIndex(k, elem) val.SetMapIndex(k, elem)
} }
return nil return nil
@ -204,3 +216,82 @@ func clearMap(m reflect.Value) {
m.SetMapIndex(k, none) m.SetMapIndex(k, none)
} }
} }
func (mc *MapCodec) encodeKey(val reflect.Value) (string, error) {
if mc.EncodeKeysWithStringer {
return fmt.Sprint(val), nil
}
// keys of any string type are used directly
if val.Kind() == reflect.String {
return val.String(), nil
}
// KeyMarshalers are marshaled
if km, ok := val.Interface().(KeyMarshaler); ok {
if val.Kind() == reflect.Ptr && val.IsNil() {
return "", nil
}
buf, err := km.MarshalKey()
if err == nil {
return buf, nil
}
return "", err
}
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(val.Int(), 10), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(val.Uint(), 10), nil
}
return "", fmt.Errorf("unsupported key type: %v", val.Type())
}
var keyUnmarshalerType = reflect.TypeOf((*KeyUnmarshaler)(nil)).Elem()
func (mc *MapCodec) decodeKey(key string, keyType reflect.Type) (reflect.Value, error) {
keyVal := reflect.ValueOf(key)
var err error
switch {
// First, if EncodeKeysWithStringer is not enabled, try to decode withKeyUnmarshaler
case !mc.EncodeKeysWithStringer && reflect.PtrTo(keyType).Implements(keyUnmarshalerType):
keyVal = reflect.New(keyType)
v := keyVal.Interface().(KeyUnmarshaler)
err = v.UnmarshalKey(key)
keyVal = keyVal.Elem()
// Otherwise, go to type specific behavior
default:
switch keyType.Kind() {
case reflect.String:
keyVal = reflect.ValueOf(key).Convert(keyType)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
s := string(key)
n, parseErr := strconv.ParseInt(s, 10, 64)
if parseErr != nil || reflect.Zero(keyType).OverflowInt(n) {
err = fmt.Errorf("failed to unmarshal number key %v", s)
}
keyVal = reflect.ValueOf(n).Convert(keyType)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
s := string(key)
n, parseErr := strconv.ParseUint(s, 10, 64)
if parseErr != nil || reflect.Zero(keyType).OverflowUint(n) {
err = fmt.Errorf("failed to unmarshal number key %v", s)
break
}
keyVal = reflect.ValueOf(n).Convert(keyType)
case reflect.Float32, reflect.Float64:
if mc.EncodeKeysWithStringer {
parsed, err := strconv.ParseFloat(key, 64)
if err != nil {
return keyVal, fmt.Errorf("Map key is defined to be a decimal type (%v) but got error %v", keyType.Kind(), err)
}
keyVal = reflect.ValueOf(parsed)
break
}
fallthrough
default:
return keyVal, fmt.Errorf("unsupported key type: %v", keyType)
}
}
return keyVal, err
}

View File

@ -14,11 +14,6 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
var defaultPointerCodec = &PointerCodec{
ecache: make(map[reflect.Type]ValueEncoder),
dcache: make(map[reflect.Type]ValueDecoder),
}
var _ ValueEncoder = &PointerCodec{} var _ ValueEncoder = &PointerCodec{}
var _ ValueDecoder = &PointerCodec{} var _ ValueDecoder = &PointerCodec{}
@ -83,6 +78,10 @@ func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val
val.Set(reflect.Zero(val.Type())) val.Set(reflect.Zero(val.Type()))
return vr.ReadNull() return vr.ReadNull()
} }
if vr.Type() == bsontype.Undefined {
val.Set(reflect.Zero(val.Type()))
return vr.ReadUndefined()
}
if val.IsNil() { if val.IsNil() {
val.Set(reflect.New(val.Type().Elem())) val.Set(reflect.New(val.Type().Elem()))

View File

@ -187,8 +187,9 @@ func (rb *RegistryBuilder) RegisterHookDecoder(t reflect.Type, dec ValueDecoder)
return rb return rb
} }
// RegisterEncoder has been deprecated and will be removed in a future major version release. Use RegisterTypeEncoder // RegisterEncoder registers the provided type and encoder pair.
// or RegisterHookEncoder instead. //
// Deprecated: Use RegisterTypeEncoder or RegisterHookEncoder instead.
func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder { func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
if t == tEmpty { if t == tEmpty {
rb.typeEncoders[t] = enc rb.typeEncoders[t] = enc
@ -210,8 +211,9 @@ func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *Re
return rb return rb
} }
// RegisterDecoder has been deprecated and will be removed in a future major version release. Use RegisterTypeDecoder // RegisterDecoder registers the provided type and decoder pair.
// or RegisterHookDecoder instead. //
// Deprecated: Use RegisterTypeDecoder or RegisterHookDecoder instead.
func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder { func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
if t == nil { if t == nil {
rb.typeDecoders[nil] = dec rb.typeDecoders[nil] = dec

View File

@ -123,6 +123,9 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
case bsontype.Null: case bsontype.Null:
val.Set(reflect.Zero(val.Type())) val.Set(reflect.Zero(val.Type()))
return vr.ReadNull() return vr.ReadNull()
case bsontype.Undefined:
val.Set(reflect.Zero(val.Type()))
return vr.ReadUndefined()
case bsontype.Type(0), bsontype.EmbeddedDocument: case bsontype.Type(0), bsontype.EmbeddedDocument:
if val.Type().Elem() != tE { if val.Type().Elem() != tE {
return fmt.Errorf("cannot decode document into %s", val.Type()) return fmt.Errorf("cannot decode document into %s", val.Type())
@ -149,8 +152,8 @@ func (sc *SliceCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
} }
return nil return nil
case bsontype.String: case bsontype.String:
if val.Type().Elem() != tByte { if sliceType := val.Type().Elem(); sliceType != tByte {
return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", vrType) return fmt.Errorf("SliceDecodeValue can only decode a string into a byte array, got %v", sliceType)
} }
str, err := vr.ReadString() str, err := vr.ReadString()
if err != nil { if err != nil {

View File

@ -15,14 +15,17 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
var defaultStringCodec = NewStringCodec()
// StringCodec is the Codec used for struct values. // StringCodec is the Codec used for struct values.
type StringCodec struct { type StringCodec struct {
DecodeObjectIDAsHex bool DecodeObjectIDAsHex bool
} }
var _ ValueCodec = &StringCodec{} var (
defaultStringCodec = NewStringCodec()
_ ValueCodec = defaultStringCodec
_ typeDecoder = defaultStringCodec
)
// NewStringCodec returns a StringCodec with options opts. // NewStringCodec returns a StringCodec with options opts.
func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec { func NewStringCodec(opts ...*bsonoptions.StringCodecOptions) *StringCodec {
@ -43,23 +46,27 @@ func (sc *StringCodec) EncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, va
return vw.WriteString(val.String()) return vw.WriteString(val.String())
} }
// DecodeValue is the ValueDecoder for string types. func (sc *StringCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if t.Kind() != reflect.String {
if !val.CanSet() || val.Kind() != reflect.String { return emptyValue, ValueDecoderError{
return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val} Name: "StringDecodeValue",
Kinds: []reflect.Kind{reflect.String},
Received: reflect.Zero(t),
}
} }
var str string var str string
var err error var err error
switch vr.Type() { switch vr.Type() {
case bsontype.String: case bsontype.String:
str, err = vr.ReadString() str, err = vr.ReadString()
if err != nil { if err != nil {
return err return emptyValue, err
} }
case bsontype.ObjectID: case bsontype.ObjectID:
oid, err := vr.ReadObjectID() oid, err := vr.ReadObjectID()
if err != nil { if err != nil {
return err return emptyValue, err
} }
if sc.DecodeObjectIDAsHex { if sc.DecodeObjectIDAsHex {
str = oid.Hex() str = oid.Hex()
@ -70,25 +77,43 @@ func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, va
case bsontype.Symbol: case bsontype.Symbol:
str, err = vr.ReadSymbol() str, err = vr.ReadSymbol()
if err != nil { if err != nil {
return err return emptyValue, err
} }
case bsontype.Binary: case bsontype.Binary:
data, subtype, err := vr.ReadBinary() data, subtype, err := vr.ReadBinary()
if err != nil { if err != nil {
return err return emptyValue, err
} }
if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld { if subtype != bsontype.BinaryGeneric && subtype != bsontype.BinaryBinaryOld {
return fmt.Errorf("SliceDecodeValue can only be used to decode subtype 0x00 or 0x02 for %s, got %v", bsontype.Binary, subtype) return emptyValue, decodeBinaryError{subtype: subtype, typeName: "string"}
} }
str = string(data) str = string(data)
case bsontype.Null: case bsontype.Null:
if err = vr.ReadNull(); err != nil { if err = vr.ReadNull(); err != nil {
return err return emptyValue, err
}
case bsontype.Undefined:
if err = vr.ReadUndefined(); err != nil {
return emptyValue, err
} }
default: default:
return fmt.Errorf("cannot decode %v into a string type", vr.Type()) return emptyValue, fmt.Errorf("cannot decode %v into a string type", vr.Type())
} }
val.SetString(str) return reflect.ValueOf(str), nil
}
// DecodeValue is the ValueDecoder for string types.
func (sc *StringCodec) DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Kind() != reflect.String {
return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
}
elem, err := sc.decodeType(dctx, vr, val.Type())
if err != nil {
return err
}
val.SetString(elem.String())
return nil return nil
} }

View File

@ -10,6 +10,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
"sort"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -19,9 +20,35 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
var defaultStructCodec = &StructCodec{ // DecodeError represents an error that occurs when unmarshalling BSON bytes into a native Go type.
cache: make(map[reflect.Type]*structDescription), type DecodeError struct {
parser: DefaultStructTagParser, keys []string
wrapped error
}
// Unwrap returns the underlying error
func (de *DecodeError) Unwrap() error {
return de.wrapped
}
// Error implements the error interface.
func (de *DecodeError) Error() string {
// The keys are stored in reverse order because the de.keys slice is builtup while propagating the error up the
// stack of BSON keys, so we call de.Keys(), which reverses them.
keyPath := strings.Join(de.Keys(), ".")
return fmt.Sprintf("error decoding key %s: %v", keyPath, de.wrapped)
}
// Keys returns the BSON key path that caused an error as a slice of strings. The keys in the slice are in top-down
// order. For example, if the document being unmarshalled was {a: {b: {c: 1}}} and the value for c was supposed to be
// a string, the keys slice will be ["a", "b", "c"].
func (de *DecodeError) Keys() []string {
reversedKeys := make([]string, 0, len(de.keys))
for idx := len(de.keys) - 1; idx >= 0; idx-- {
reversedKeys = append(reversedKeys, de.keys[idx])
}
return reversedKeys
} }
// Zeroer allows custom struct types to implement a report of zero // Zeroer allows custom struct types to implement a report of zero
@ -33,13 +60,14 @@ type Zeroer interface {
// StructCodec is the Codec used for struct values. // StructCodec is the Codec used for struct values.
type StructCodec struct { type StructCodec struct {
cache map[reflect.Type]*structDescription cache map[reflect.Type]*structDescription
l sync.RWMutex l sync.RWMutex
parser StructTagParser parser StructTagParser
DecodeZeroStruct bool DecodeZeroStruct bool
DecodeDeepZeroInline bool DecodeDeepZeroInline bool
EncodeOmitDefaultStruct bool EncodeOmitDefaultStruct bool
AllowUnexportedFields bool AllowUnexportedFields bool
OverwriteDuplicatedInlinedFields bool
} }
var _ ValueEncoder = &StructCodec{} var _ ValueEncoder = &StructCodec{}
@ -67,6 +95,9 @@ func NewStructCodec(p StructTagParser, opts ...*bsonoptions.StructCodecOptions)
if structOpt.EncodeOmitDefaultStruct != nil { if structOpt.EncodeOmitDefaultStruct != nil {
codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct codec.EncodeOmitDefaultStruct = *structOpt.EncodeOmitDefaultStruct
} }
if structOpt.OverwriteDuplicatedInlinedFields != nil {
codec.OverwriteDuplicatedInlinedFields = *structOpt.OverwriteDuplicatedInlinedFields
}
if structOpt.AllowUnexportedFields != nil { if structOpt.AllowUnexportedFields != nil {
codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields codec.AllowUnexportedFields = *structOpt.AllowUnexportedFields
} }
@ -166,6 +197,19 @@ func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val r
return dw.WriteDocumentEnd() return dw.WriteDocumentEnd()
} }
func newDecodeError(key string, original error) error {
de, ok := original.(*DecodeError)
if !ok {
return &DecodeError{
keys: []string{key},
wrapped: original,
}
}
de.keys = append(de.keys, key)
return de
}
// DecodeValue implements the Codec interface. // DecodeValue implements the Codec interface.
// By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr. // By default, map types in val will not be cleared. If a map has existing key/value pairs, it will be extended with the new ones from vr.
// For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared. // For slices, the decoder will set the length of the slice to zero and append all elements. The underlying array will not be cleared.
@ -181,6 +225,13 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
return err return err
} }
val.Set(reflect.Zero(val.Type()))
return nil
case bsontype.Undefined:
if err := vr.ReadUndefined(); err != nil {
return err
}
val.Set(reflect.Zero(val.Type())) val.Set(reflect.Zero(val.Type()))
return nil return nil
default: default:
@ -267,7 +318,8 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
} }
if !field.CanSet() { // Being settable is a super set of being addressable. if !field.CanSet() { // Being settable is a super set of being addressable.
return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field) innerErr := fmt.Errorf("field %v is not settable", field)
return newDecodeError(fd.name, innerErr)
} }
if field.Kind() == reflect.Ptr && field.IsNil() { if field.Kind() == reflect.Ptr && field.IsNil() {
field.Set(reflect.New(field.Type().Elem())) field.Set(reflect.New(field.Type().Elem()))
@ -276,19 +328,19 @@ func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val r
dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate} dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate || r.Truncate}
if fd.decoder == nil { if fd.decoder == nil {
return ErrNoDecoder{Type: field.Elem().Type()} return newDecodeError(fd.name, ErrNoDecoder{Type: field.Elem().Type()})
} }
if decoder, ok := fd.decoder.(ValueDecoder); ok { if decoder, ok := fd.decoder.(ValueDecoder); ok {
err = decoder.DecodeValue(dctx, vr, field.Elem()) err = decoder.DecodeValue(dctx, vr, field.Elem())
if err != nil { if err != nil {
return err return newDecodeError(fd.name, err)
} }
continue continue
} }
err = fd.decoder.DecodeValue(dctx, vr, field) err = fd.decoder.DecodeValue(dctx, vr, field)
if err != nil { if err != nil {
return err return newDecodeError(fd.name, err)
} }
} }
@ -350,7 +402,8 @@ type structDescription struct {
} }
type fieldDescription struct { type fieldDescription struct {
name string name string // BSON key name
fieldName string // struct field name
idx int idx int
omitEmpty bool omitEmpty bool
minSize bool minSize bool
@ -360,6 +413,35 @@ type fieldDescription struct {
decoder ValueDecoder decoder ValueDecoder
} }
type byIndex []fieldDescription
func (bi byIndex) Len() int { return len(bi) }
func (bi byIndex) Swap(i, j int) { bi[i], bi[j] = bi[j], bi[i] }
func (bi byIndex) Less(i, j int) bool {
// If a field is inlined, its index in the top level struct is stored at inline[0]
iIdx, jIdx := bi[i].idx, bi[j].idx
if len(bi[i].inline) > 0 {
iIdx = bi[i].inline[0]
}
if len(bi[j].inline) > 0 {
jIdx = bi[j].inline[0]
}
if iIdx != jIdx {
return iIdx < jIdx
}
for k, biik := range bi[i].inline {
if k >= len(bi[j].inline) {
return false
}
if biik != bi[j].inline[k] {
return biik < bi[j].inline[k]
}
}
return len(bi[i].inline) < len(bi[j].inline)
}
func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) { func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
// We need to analyze the struct, including getting the tags, collecting // We need to analyze the struct, including getting the tags, collecting
// information about inlining, and create a map of the field name to the field. // information about inlining, and create a map of the field name to the field.
@ -377,6 +459,7 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
inlineMap: -1, inlineMap: -1,
} }
var fields []fieldDescription
for i := 0; i < numFields; i++ { for i := 0; i < numFields; i++ {
sf := t.Field(i) sf := t.Field(i)
if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) { if sf.PkgPath != "" && (!sc.AllowUnexportedFields || !sf.Anonymous) {
@ -394,7 +477,12 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
decoder = nil decoder = nil
} }
description := fieldDescription{idx: i, encoder: encoder, decoder: decoder} description := fieldDescription{
fieldName: sf.Name,
idx: i,
encoder: encoder,
decoder: decoder,
}
stags, err := sc.parser.ParseStructTags(sf) stags, err := sc.parser.ParseStructTags(sf)
if err != nil { if err != nil {
@ -431,31 +519,62 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
return nil, err return nil, err
} }
for _, fd := range inlinesf.fl { for _, fd := range inlinesf.fl {
if _, exists := sd.fm[fd.name]; exists {
return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name)
}
if fd.inline == nil { if fd.inline == nil {
fd.inline = []int{i, fd.idx} fd.inline = []int{i, fd.idx}
} else { } else {
fd.inline = append([]int{i}, fd.inline...) fd.inline = append([]int{i}, fd.inline...)
} }
sd.fm[fd.name] = fd fields = append(fields, fd)
sd.fl = append(sd.fl, fd)
} }
default: default:
return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String()) return nil, fmt.Errorf("(struct %s) inline fields must be a struct, a struct pointer, or a map", t.String())
} }
continue continue
} }
fields = append(fields, description)
if _, exists := sd.fm[description.name]; exists {
return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name)
}
sd.fm[description.name] = description
sd.fl = append(sd.fl, description)
} }
// Sort fieldDescriptions by name and use dominance rules to determine which should be added for each name
sort.Slice(fields, func(i, j int) bool {
x := fields
// sort field by name, breaking ties with depth, then
// breaking ties with index sequence.
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].inline) != len(x[j].inline) {
return len(x[i].inline) < len(x[j].inline)
}
return byIndex(x).Less(i, j)
})
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
sd.fl = append(sd.fl, fi)
sd.fm[name] = fi
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if !ok || !sc.OverwriteDuplicatedInlinedFields {
return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), name)
}
sd.fl = append(sd.fl, dominant)
sd.fm[name] = dominant
}
sort.Sort(byIndex(sd.fl))
sc.l.Lock() sc.l.Lock()
sc.cache[t] = sd sc.cache[t] = sd
sc.l.Unlock() sc.l.Unlock()
@ -463,6 +582,22 @@ func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescr
return sd, nil return sd, nil
} }
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's inlining rules. If there are multiple top-level
// fields, the boolean will be false: This condition is an error in Go
// and we skip all the fields.
func dominantField(fields []fieldDescription) (fieldDescription, bool) {
// The fields are sorted in increasing index-length order, then by presence of tag.
// That means that the first field is the dominant one. We need only check
// for error cases: two fields at top level.
if len(fields) > 1 &&
len(fields[0].inline) == len(fields[1].inline) {
return fieldDescription{}, false
}
return fields[0], true
}
func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) { func fieldByIndexErr(v reflect.Value, index []int) (result reflect.Value, err error) {
defer func() { defer func() {
if recovered := recover(); recovered != nil { if recovered := recover(); recovered != nil {

View File

@ -91,6 +91,10 @@ var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (S
if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 { if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
tag = string(sf.Tag) tag = string(sf.Tag)
} }
return parseTags(key, tag)
}
func parseTags(key string, tag string) (StructTags, error) {
var st StructTags var st StructTags
if tag == "-" { if tag == "-" {
st.Skip = true st.Skip = true
@ -117,3 +121,19 @@ var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (S
return st, nil return st, nil
} }
// JSONFallbackStructTagParser has the same behavior as DefaultStructTagParser
// but will also fallback to parsing the json tag instead on a field where the
// bson tag isn't available.
var JSONFallbackStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
key := strings.ToLower(sf.Name)
tag, ok := sf.Tag.Lookup("bson")
if !ok {
tag, ok = sf.Tag.Lookup("json")
}
if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
tag = string(sf.Tag)
}
return parseTags(key, tag)
}

View File

@ -14,20 +14,24 @@ import (
"go.mongodb.org/mongo-driver/bson/bsonoptions" "go.mongodb.org/mongo-driver/bson/bsonoptions"
"go.mongodb.org/mongo-driver/bson/bsonrw" "go.mongodb.org/mongo-driver/bson/bsonrw"
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
) )
const ( const (
timeFormatString = "2006-01-02T15:04:05.999Z07:00" timeFormatString = "2006-01-02T15:04:05.999Z07:00"
) )
var defaultTimeCodec = NewTimeCodec()
// TimeCodec is the Codec used for time.Time values. // TimeCodec is the Codec used for time.Time values.
type TimeCodec struct { type TimeCodec struct {
UseLocalTimeZone bool UseLocalTimeZone bool
} }
var _ ValueCodec = &TimeCodec{} var (
defaultTimeCodec = NewTimeCodec()
_ ValueCodec = defaultTimeCodec
_ typeDecoder = defaultTimeCodec
)
// NewTimeCodec returns a TimeCodec with options opts. // NewTimeCodec returns a TimeCodec with options opts.
func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec { func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
@ -40,10 +44,13 @@ func NewTimeCodec(opts ...*bsonoptions.TimeCodecOptions) *TimeCodec {
return &codec return &codec
} }
// DecodeValue is the ValueDecoderFunc for time.Time. func (tc *TimeCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { if t != tTime {
if !val.CanSet() || val.Type() != tTime { return emptyValue, ValueDecoderError{
return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val} Name: "TimeDecodeValue",
Types: []reflect.Type{tTime},
Received: reflect.Zero(t),
}
} }
var timeVal time.Time var timeVal time.Time
@ -51,43 +58,61 @@ func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val re
case bsontype.DateTime: case bsontype.DateTime:
dt, err := vr.ReadDateTime() dt, err := vr.ReadDateTime()
if err != nil { if err != nil {
return err return emptyValue, err
} }
timeVal = time.Unix(dt/1000, dt%1000*1000000) timeVal = time.Unix(dt/1000, dt%1000*1000000)
case bsontype.String: case bsontype.String:
// assume strings are in the isoTimeFormat // assume strings are in the isoTimeFormat
timeStr, err := vr.ReadString() timeStr, err := vr.ReadString()
if err != nil { if err != nil {
return err return emptyValue, err
} }
timeVal, err = time.Parse(timeFormatString, timeStr) timeVal, err = time.Parse(timeFormatString, timeStr)
if err != nil { if err != nil {
return err return emptyValue, err
} }
case bsontype.Int64: case bsontype.Int64:
i64, err := vr.ReadInt64() i64, err := vr.ReadInt64()
if err != nil { if err != nil {
return err return emptyValue, err
} }
timeVal = time.Unix(i64/1000, i64%1000*1000000) timeVal = time.Unix(i64/1000, i64%1000*1000000)
case bsontype.Timestamp: case bsontype.Timestamp:
t, _, err := vr.ReadTimestamp() t, _, err := vr.ReadTimestamp()
if err != nil { if err != nil {
return err return emptyValue, err
} }
timeVal = time.Unix(int64(t), 0) timeVal = time.Unix(int64(t), 0)
case bsontype.Null: case bsontype.Null:
if err := vr.ReadNull(); err != nil { if err := vr.ReadNull(); err != nil {
return err return emptyValue, err
}
case bsontype.Undefined:
if err := vr.ReadUndefined(); err != nil {
return emptyValue, err
} }
default: default:
return fmt.Errorf("cannot decode %v into a time.Time", vrType) return emptyValue, fmt.Errorf("cannot decode %v into a time.Time", vrType)
} }
if !tc.UseLocalTimeZone { if !tc.UseLocalTimeZone {
timeVal = timeVal.UTC() timeVal = timeVal.UTC()
} }
val.Set(reflect.ValueOf(timeVal)) return reflect.ValueOf(timeVal), nil
}
// DecodeValue is the ValueDecoderFunc for time.Time.
func (tc *TimeCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() || val.Type() != tTime {
return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
}
elem, err := tc.decodeType(dc, vr, tTime)
if err != nil {
return err
}
val.Set(elem)
return nil return nil
} }
@ -97,5 +122,6 @@ func (tc *TimeCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val re
return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val} return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
} }
tt := val.Interface().(time.Time) tt := val.Interface().(time.Time)
return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6)) dt := primitive.NewDateTimeFromTime(tt)
return vw.WriteDateTime(int64(dt))
} }

View File

@ -79,3 +79,4 @@ var tA = reflect.TypeOf(primitive.A{})
var tE = reflect.TypeOf(primitive.E{}) var tE = reflect.TypeOf(primitive.E{})
var tCoreDocument = reflect.TypeOf(bsoncore.Document{}) var tCoreDocument = reflect.TypeOf(bsoncore.Document{})
var tCoreArray = reflect.TypeOf(bsoncore.Array{})

View File

@ -7,7 +7,6 @@
package bsoncodec package bsoncodec
import ( import (
"errors"
"fmt" "fmt"
"math" "math"
"reflect" "reflect"
@ -17,14 +16,17 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
var defaultUIntCodec = NewUIntCodec()
// UIntCodec is the Codec used for uint values. // UIntCodec is the Codec used for uint values.
type UIntCodec struct { type UIntCodec struct {
EncodeToMinSize bool EncodeToMinSize bool
} }
var _ ValueCodec = &UIntCodec{} var (
defaultUIntCodec = NewUIntCodec()
_ ValueCodec = defaultUIntCodec
_ typeDecoder = defaultUIntCodec
)
// NewUIntCodec returns a UIntCodec with options opts. // NewUIntCodec returns a UIntCodec with options opts.
func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec { func NewUIntCodec(opts ...*bsonoptions.UIntCodecOptions) *UIntCodec {
@ -64,6 +66,93 @@ func (uic *UIntCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val r
} }
} }
func (uic *UIntCodec) decodeType(dc DecodeContext, vr bsonrw.ValueReader, t reflect.Type) (reflect.Value, error) {
var i64 int64
var err error
switch vrType := vr.Type(); vrType {
case bsontype.Int32:
i32, err := vr.ReadInt32()
if err != nil {
return emptyValue, err
}
i64 = int64(i32)
case bsontype.Int64:
i64, err = vr.ReadInt64()
if err != nil {
return emptyValue, err
}
case bsontype.Double:
f64, err := vr.ReadDouble()
if err != nil {
return emptyValue, err
}
if !dc.Truncate && math.Floor(f64) != f64 {
return emptyValue, errCannotTruncate
}
if f64 > float64(math.MaxInt64) {
return emptyValue, fmt.Errorf("%g overflows int64", f64)
}
i64 = int64(f64)
case bsontype.Boolean:
b, err := vr.ReadBoolean()
if err != nil {
return emptyValue, err
}
if b {
i64 = 1
}
case bsontype.Null:
if err = vr.ReadNull(); err != nil {
return emptyValue, err
}
case bsontype.Undefined:
if err = vr.ReadUndefined(); err != nil {
return emptyValue, err
}
default:
return emptyValue, fmt.Errorf("cannot decode %v into an integer type", vrType)
}
switch t.Kind() {
case reflect.Uint8:
if i64 < 0 || i64 > math.MaxUint8 {
return emptyValue, fmt.Errorf("%d overflows uint8", i64)
}
return reflect.ValueOf(uint8(i64)), nil
case reflect.Uint16:
if i64 < 0 || i64 > math.MaxUint16 {
return emptyValue, fmt.Errorf("%d overflows uint16", i64)
}
return reflect.ValueOf(uint16(i64)), nil
case reflect.Uint32:
if i64 < 0 || i64 > math.MaxUint32 {
return emptyValue, fmt.Errorf("%d overflows uint32", i64)
}
return reflect.ValueOf(uint32(i64)), nil
case reflect.Uint64:
if i64 < 0 {
return emptyValue, fmt.Errorf("%d overflows uint64", i64)
}
return reflect.ValueOf(uint64(i64)), nil
case reflect.Uint:
if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
return emptyValue, fmt.Errorf("%d overflows uint", i64)
}
return reflect.ValueOf(uint(i64)), nil
default:
return emptyValue, ValueDecoderError{
Name: "UintDecodeValue",
Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
Received: reflect.Zero(t),
}
}
}
// DecodeValue is the ValueDecoder for uint types. // DecodeValue is the ValueDecoder for uint types.
func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error { func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
if !val.CanSet() { if !val.CanSet() {
@ -74,77 +163,11 @@ func (uic *UIntCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val r
} }
} }
var i64 int64 elem, err := uic.decodeType(dc, vr, val.Type())
var err error if err != nil {
switch vrType := vr.Type(); vrType { return err
case bsontype.Int32:
i32, err := vr.ReadInt32()
if err != nil {
return err
}
i64 = int64(i32)
case bsontype.Int64:
i64, err = vr.ReadInt64()
if err != nil {
return err
}
case bsontype.Double:
f64, err := vr.ReadDouble()
if err != nil {
return err
}
if !dc.Truncate && math.Floor(f64) != f64 {
return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled")
}
if f64 > float64(math.MaxInt64) {
return fmt.Errorf("%g overflows int64", f64)
}
i64 = int64(f64)
case bsontype.Boolean:
b, err := vr.ReadBoolean()
if err != nil {
return err
}
if b {
i64 = 1
}
case bsontype.Null:
if err = vr.ReadNull(); err != nil {
return err
}
default:
return fmt.Errorf("cannot decode %v into an integer type", vrType)
} }
switch val.Kind() { val.SetUint(elem.Uint())
case reflect.Uint8:
if i64 < 0 || i64 > math.MaxUint8 {
return fmt.Errorf("%d overflows uint8", i64)
}
case reflect.Uint16:
if i64 < 0 || i64 > math.MaxUint16 {
return fmt.Errorf("%d overflows uint16", i64)
}
case reflect.Uint32:
if i64 < 0 || i64 > math.MaxUint32 {
return fmt.Errorf("%d overflows uint32", i64)
}
case reflect.Uint64:
if i64 < 0 {
return fmt.Errorf("%d overflows uint64", i64)
}
case reflect.Uint:
if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
return fmt.Errorf("%d overflows uint", i64)
}
default:
return ValueDecoderError{
Name: "UintDecodeValue",
Kinds: []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
Received: val,
}
}
val.SetUint(uint64(i64))
return nil return nil
} }

View File

@ -10,6 +10,12 @@ package bsonoptions
type MapCodecOptions struct { type MapCodecOptions struct {
DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false. DecodeZerosMap *bool // Specifies if the map should be zeroed before decoding into it. Defaults to false.
EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false. EncodeNilAsEmpty *bool // Specifies if a nil map should encode as an empty document instead of null. Defaults to false.
// Specifies how keys should be handled. If false, the behavior matches encoding/json, where the encoding key type must
// either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key type must either be a
// string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with fmt.Sprint() and the
// encoding key type must be a string, an integer type, or a float. If true, the use of Stringer will override
// TextMarshaler/TextUnmarshaler. Defaults to false.
EncodeKeysWithStringer *bool
} }
// MapCodec creates a new *MapCodecOptions // MapCodec creates a new *MapCodecOptions
@ -23,12 +29,22 @@ func (t *MapCodecOptions) SetDecodeZerosMap(b bool) *MapCodecOptions {
return t return t
} }
// SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false. // SetEncodeNilAsEmpty specifies if a nil map should encode as an empty document instead of null. Defaults to false.
func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions { func (t *MapCodecOptions) SetEncodeNilAsEmpty(b bool) *MapCodecOptions {
t.EncodeNilAsEmpty = &b t.EncodeNilAsEmpty = &b
return t return t
} }
// SetEncodeKeysWithStringer specifies how keys should be handled. If false, the behavior matches encoding/json, where the
// encoding key type must either be a string, an integer type, or implement bsoncodec.KeyMarshaler and the decoding key
// type must either be a string, an integer type, or implement bsoncodec.KeyUnmarshaler. If true, keys are encoded with
// fmt.Sprint() and the encoding key type must be a string, an integer type, or a float. If true, the use of Stringer
// will override TextMarshaler/TextUnmarshaler. Defaults to false.
func (t *MapCodecOptions) SetEncodeKeysWithStringer(b bool) *MapCodecOptions {
t.EncodeKeysWithStringer = &b
return t
}
// MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion. // MergeMapCodecOptions combines the given *MapCodecOptions into a single *MapCodecOptions in a last one wins fashion.
func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions { func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions {
s := MapCodec() s := MapCodec()
@ -42,6 +58,9 @@ func MergeMapCodecOptions(opts ...*MapCodecOptions) *MapCodecOptions {
if opt.EncodeNilAsEmpty != nil { if opt.EncodeNilAsEmpty != nil {
s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty s.EncodeNilAsEmpty = opt.EncodeNilAsEmpty
} }
if opt.EncodeKeysWithStringer != nil {
s.EncodeKeysWithStringer = opt.EncodeKeysWithStringer
}
} }
return s return s

View File

@ -6,12 +6,15 @@
package bsonoptions package bsonoptions
var defaultOverwriteDuplicatedInlinedFields = true
// StructCodecOptions represents all possible options for struct encoding and decoding. // StructCodecOptions represents all possible options for struct encoding and decoding.
type StructCodecOptions struct { type StructCodecOptions struct {
DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false. DecodeZeroStruct *bool // Specifies if structs should be zeroed before decoding into them. Defaults to false.
DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false. DecodeDeepZeroInline *bool // Specifies if structs should be recursively zeroed when a inline value is decoded. Defaults to false.
EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false. EncodeOmitDefaultStruct *bool // Specifies if default structs should be considered empty by omitempty. Defaults to false.
AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. AllowUnexportedFields *bool // Specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
OverwriteDuplicatedInlinedFields *bool // Specifies if fields in inlined structs can be overwritten by higher level struct fields with the same key. Defaults to true.
} }
// StructCodec creates a new *StructCodecOptions // StructCodec creates a new *StructCodecOptions
@ -38,6 +41,15 @@ func (t *StructCodecOptions) SetEncodeOmitDefaultStruct(b bool) *StructCodecOpti
return t return t
} }
// SetOverwriteDuplicatedInlinedFields specifies if inlined struct fields can be overwritten by higher level struct fields with the
// same bson key. When true and decoding, values will be written to the outermost struct with a matching key, and when
// encoding, keys will have the value of the top-most matching field. When false, decoding and encoding will error if
// there are duplicate keys after the struct is inlined. Defaults to true.
func (t *StructCodecOptions) SetOverwriteDuplicatedInlinedFields(b bool) *StructCodecOptions {
t.OverwriteDuplicatedInlinedFields = &b
return t
}
// SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false. // SetAllowUnexportedFields specifies if unexported fields should be marshaled/unmarshaled. Defaults to false.
func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions { func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOptions {
t.AllowUnexportedFields = &b t.AllowUnexportedFields = &b
@ -46,7 +58,9 @@ func (t *StructCodecOptions) SetAllowUnexportedFields(b bool) *StructCodecOption
// MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion. // MergeStructCodecOptions combines the given *StructCodecOptions into a single *StructCodecOptions in a last one wins fashion.
func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions { func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions {
s := StructCodec() s := &StructCodecOptions{
OverwriteDuplicatedInlinedFields: &defaultOverwriteDuplicatedInlinedFields,
}
for _, opt := range opts { for _, opt := range opts {
if opt == nil { if opt == nil {
continue continue
@ -61,6 +75,9 @@ func MergeStructCodecOptions(opts ...*StructCodecOptions) *StructCodecOptions {
if opt.EncodeOmitDefaultStruct != nil { if opt.EncodeOmitDefaultStruct != nil {
s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct s.EncodeOmitDefaultStruct = opt.EncodeOmitDefaultStruct
} }
if opt.OverwriteDuplicatedInlinedFields != nil {
s.OverwriteDuplicatedInlinedFields = opt.OverwriteDuplicatedInlinedFields
}
if opt.AllowUnexportedFields != nil { if opt.AllowUnexportedFields != nil {
s.AllowUnexportedFields = opt.AllowUnexportedFields s.AllowUnexportedFields = opt.AllowUnexportedFields
} }

View File

@ -45,6 +45,22 @@ func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
return c.copyDocumentCore(dw, dr) return c.copyDocumentCore(dw, dr)
} }
// CopyArrayFromBytes copies the values from a BSON array represented as a
// []byte to a ValueWriter.
func (c Copier) CopyArrayFromBytes(dst ValueWriter, src []byte) error {
aw, err := dst.WriteArray()
if err != nil {
return err
}
err = c.CopyBytesToArrayWriter(aw, src)
if err != nil {
return err
}
return aw.WriteArrayEnd()
}
// CopyDocumentFromBytes copies the values from a BSON document represented as a // CopyDocumentFromBytes copies the values from a BSON document represented as a
// []byte to a ValueWriter. // []byte to a ValueWriter.
func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error { func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
@ -61,9 +77,29 @@ func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
return dw.WriteDocumentEnd() return dw.WriteDocumentEnd()
} }
type writeElementFn func(key string) (ValueWriter, error)
// CopyBytesToArrayWriter copies the values from a BSON Array represented as a []byte to an
// ArrayWriter.
func (c Copier) CopyBytesToArrayWriter(dst ArrayWriter, src []byte) error {
wef := func(_ string) (ValueWriter, error) {
return dst.WriteArrayElement()
}
return c.copyBytesToValueWriter(src, wef)
}
// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a // CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
// DocumentWriter. // DocumentWriter.
func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error { func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
wef := func(key string) (ValueWriter, error) {
return dst.WriteDocumentElement(key)
}
return c.copyBytesToValueWriter(src, wef)
}
func (c Copier) copyBytesToValueWriter(src []byte, wef writeElementFn) error {
// TODO(skriptble): Create errors types here. Anything thats a tag should be a property. // TODO(skriptble): Create errors types here. Anything thats a tag should be a property.
length, rem, ok := bsoncore.ReadLength(src) length, rem, ok := bsoncore.ReadLength(src)
if !ok { if !ok {
@ -93,15 +129,18 @@ func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error
if !ok { if !ok {
return fmt.Errorf("invalid key found. remaining bytes=%v", rem) return fmt.Errorf("invalid key found. remaining bytes=%v", rem)
} }
dvw, err := dst.WriteDocumentElement(key)
// write as either array element or document element using writeElementFn
vw, err := wef(key)
if err != nil { if err != nil {
return err return err
} }
val, rem, ok = bsoncore.ReadValue(rem, t) val, rem, ok = bsoncore.ReadValue(rem, t)
if !ok { if !ok {
return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t) return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t)
} }
err = c.CopyValueFromBytes(dvw, t, val.Data) err = c.CopyValueFromBytes(vw, t, val.Data)
if err != nil { if err != nil {
return err return err
} }
@ -133,6 +172,23 @@ func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error)
return dst, err return dst, err
} }
// AppendArrayBytes copies an array from the ValueReader to dst.
func (c Copier) AppendArrayBytes(dst []byte, src ValueReader) ([]byte, error) {
if br, ok := src.(BytesReader); ok {
_, dst, err := br.ReadValueBytes(dst)
return dst, err
}
vw := vwPool.Get().(*valueWriter)
defer vwPool.Put(vw)
vw.reset(dst)
err := c.copyArray(vw, src)
dst = vw.buf
return dst, err
}
// CopyValueFromBytes will write the value represtend by t and src to dst. // CopyValueFromBytes will write the value represtend by t and src to dst.
func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error { func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
if wvb, ok := dst.(BytesWriter); ok { if wvb, ok := dst.(BytesWriter); ok {

View File

@ -7,9 +7,12 @@
package bsonrw package bsonrw
import ( import (
"encoding/base64"
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"strings"
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
@ -66,6 +69,7 @@ type extJSONParser struct {
maxDepth int maxDepth int
emptyObject bool emptyObject bool
relaxedUUID bool
} }
// newExtJSONParser returns a new extended JSON parser, ready to to begin // newExtJSONParser returns a new extended JSON parser, ready to to begin
@ -119,6 +123,12 @@ func (ejp *extJSONParser) peekType() (bsontype.Type, error) {
} }
t = wrapperKeyBSONType(ejp.k) t = wrapperKeyBSONType(ejp.k)
// if $uuid is encountered, parse as binary subtype 4
if ejp.k == "$uuid" {
ejp.relaxedUUID = true
t = bsontype.Binary
}
switch t { switch t {
case bsontype.JavaScript: case bsontype.JavaScript:
// just saw $code, need to check for $scope at same level // just saw $code, need to check for $scope at same level
@ -273,6 +283,64 @@ func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
ejp.advanceState() ejp.advanceState()
if t == bsontype.Binary && ejp.s == jpsSawValue { if t == bsontype.Binary && ejp.s == jpsSawValue {
// convert relaxed $uuid format
if ejp.relaxedUUID {
defer func() { ejp.relaxedUUID = false }()
uuid, err := ejp.v.parseSymbol()
if err != nil {
return nil, err
}
// RFC 4122 defines the length of a UUID as 36 and the hyphens in a UUID as appearing
// in the 8th, 13th, 18th, and 23rd characters.
//
// See https://tools.ietf.org/html/rfc4122#section-3
valid := len(uuid) == 36 &&
string(uuid[8]) == "-" &&
string(uuid[13]) == "-" &&
string(uuid[18]) == "-" &&
string(uuid[23]) == "-"
if !valid {
return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
}
// remove hyphens
uuidNoHyphens := strings.Replace(uuid, "-", "", -1)
if len(uuidNoHyphens) != 32 {
return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding length and hyphens")
}
// convert hex to bytes
bytes, err := hex.DecodeString(uuidNoHyphens)
if err != nil {
return nil, fmt.Errorf("$uuid value does not follow RFC 4122 format regarding hex bytes: %v", err)
}
ejp.advanceState()
if ejp.s != jpsSawEndObject {
return nil, invalidJSONErrorForType("$uuid and value and then }", bsontype.Binary)
}
base64 := &extJSONValue{
t: bsontype.String,
v: base64.StdEncoding.EncodeToString(bytes),
}
subType := &extJSONValue{
t: bsontype.String,
v: "04",
}
v = &extJSONValue{
t: bsontype.EmbeddedDocument,
v: &extJSONObject{
keys: []string{"base64", "subType"},
values: []*extJSONValue{base64, subType},
},
}
break
}
// convert legacy $binary format // convert legacy $binary format
base64 := ejp.v base64 := ejp.v

View File

@ -159,29 +159,18 @@ func (ejvr *extJSONValueReader) pop() {
} }
} }
func (ejvr *extJSONValueReader) skipDocument() error { func (ejvr *extJSONValueReader) skipObject() {
// read entire document until ErrEOD (using readKey and readValue) // read entire object until depth returns to 0 (last ending } or ] seen)
_, typ, err := ejvr.p.readKey() depth := 1
for err == nil { for depth > 0 {
_, err = ejvr.p.readValue(typ) ejvr.p.advanceState()
if err != nil { switch ejvr.p.s {
break case jpsSawBeginObject, jpsSawBeginArray:
depth++
case jpsSawEndObject, jpsSawEndArray:
depth--
} }
_, typ, err = ejvr.p.readKey()
} }
return err
}
func (ejvr *extJSONValueReader) skipArray() error {
// read entire array until ErrEOA (using peekType)
_, err := ejvr.p.peekType()
for err == nil {
_, err = ejvr.p.peekType()
}
return err
} }
func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error { func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
@ -234,30 +223,9 @@ func (ejvr *extJSONValueReader) Skip() error {
t := ejvr.stack[ejvr.frame].vType t := ejvr.stack[ejvr.frame].vType
switch t { switch t {
case bsontype.Array: case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
// read entire array until ErrEOA // read entire array, doc or CodeWithScope
err := ejvr.skipArray() ejvr.skipObject()
if err != ErrEOA {
return err
}
case bsontype.EmbeddedDocument:
// read entire doc until ErrEOD
err := ejvr.skipDocument()
if err != ErrEOD {
return err
}
case bsontype.CodeWithScope:
// read the code portion and set up parser in document mode
_, err := ejvr.p.readValue(t)
if err != nil {
return err
}
// read until ErrEOD
err = ejvr.skipDocument()
if err != ErrEOD {
return err
}
default: default:
_, err := ejvr.p.readValue(t) _, err := ejvr.p.readValue(t)
if err != nil { if err != nil {

View File

@ -217,7 +217,7 @@ func parseDatetimeString(data string) (int64, error) {
return 0, fmt.Errorf("invalid $date value string: %s", data) return 0, fmt.Errorf("invalid $date value string: %s", data)
} }
return t.Unix()*1e3 + int64(t.Nanosecond())/1e6, nil return int64(primitive.NewDateTimeFromTime(t)), nil
} }
func parseDatetimeObject(data *extJSONObject) (d int64, err error) { func parseDatetimeObject(data *extJSONObject) (d int64, err error) {

View File

@ -12,6 +12,7 @@ import (
"io" "io"
"math" "math"
"strconv" "strconv"
"strings"
"sync" "sync"
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
@ -247,7 +248,12 @@ func (vw *valueWriter) invalidTransitionError(destination mode, name string, mod
func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error { func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error {
switch vw.stack[vw.frame].mode { switch vw.stack[vw.frame].mode {
case mElement: case mElement:
vw.buf = bsoncore.AppendHeader(vw.buf, t, vw.stack[vw.frame].key) key := vw.stack[vw.frame].key
if !isValidCString(key) {
return errors.New("BSON element key cannot contain null bytes")
}
vw.buf = bsoncore.AppendHeader(vw.buf, t, key)
case mValue: case mValue:
// TODO: Do this with a cache of the first 1000 or so array keys. // TODO: Do this with a cache of the first 1000 or so array keys.
vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey)) vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey))
@ -430,6 +436,9 @@ func (vw *valueWriter) WriteObjectID(oid primitive.ObjectID) error {
} }
func (vw *valueWriter) WriteRegex(pattern string, options string) error { func (vw *valueWriter) WriteRegex(pattern string, options string) error {
if !isValidCString(pattern) || !isValidCString(options) {
return errors.New("BSON regex values cannot contain null bytes")
}
if err := vw.writeElementHeader(bsontype.Regex, mode(0), "WriteRegex"); err != nil { if err := vw.writeElementHeader(bsontype.Regex, mode(0), "WriteRegex"); err != nil {
return err return err
} }
@ -602,3 +611,7 @@ func (vw *valueWriter) writeLength() error {
vw.buf[start+3] = byte(length >> 24) vw.buf[start+3] = byte(length >> 24)
return nil return nil
} }
func isValidCString(cs string) bool {
return !strings.ContainsRune(cs, '\x00')
}

View File

@ -43,7 +43,7 @@
// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). // 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M).
// 7. BSON array unmarshals to a bson.A. // 7. BSON array unmarshals to a bson.A.
// 8. BSON ObjectId unmarshals to a primitive.ObjectID. // 8. BSON ObjectId unmarshals to a primitive.ObjectID.
// 9. BSON datetime unmarshals to a primitive.Datetime. // 9. BSON datetime unmarshals to a primitive.DateTime.
// 10. BSON binary unmarshals to a primitive.Binary. // 10. BSON binary unmarshals to a primitive.Binary.
// 11. BSON regular expression unmarshals to a primitive.Regex. // 11. BSON regular expression unmarshals to a primitive.Regex.
// 12. BSON JavaScript unmarshals to a primitive.JavaScript. // 12. BSON JavaScript unmarshals to a primitive.JavaScript.
@ -53,7 +53,7 @@
// 16. BSON min key unmarshals to an primitive.MinKey. // 16. BSON min key unmarshals to an primitive.MinKey.
// 17. BSON max key unmarshals to an primitive.MaxKey. // 17. BSON max key unmarshals to an primitive.MaxKey.
// 18. BSON undefined unmarshals to a primitive.Undefined. // 18. BSON undefined unmarshals to a primitive.Undefined.
// 19. BSON null unmarshals to a primitive.Null. // 19. BSON null unmarshals to nil.
// 20. BSON DBPointer unmarshals to a primitive.DBPointer. // 20. BSON DBPointer unmarshals to a primitive.DBPointer.
// 21. BSON symbol unmarshals to a primitive.Symbol. // 21. BSON symbol unmarshals to a primitive.Symbol.
// //
@ -67,13 +67,13 @@
// 5. uint8 and uint16 marshal to a BSON int32. // 5. uint8 and uint16 marshal to a BSON int32.
// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, // 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32,
// inclusive, and BSON int64 otherwise. // inclusive, and BSON int64 otherwise.
// 7. BSON null values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null value into a string // 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or
// will yield the empty string.). // undefined value into a string will yield the empty string.).
// //
// Structs // Structs
// //
// Structs can be marshalled/unmarshalled to/from BSON. When transforming structs to/from BSON, the following rules // Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended
// apply: // JSON, the following rules apply:
// //
// 1. Only exported fields in structs will be marshalled or unmarshalled. // 1. Only exported fields in structs will be marshalled or unmarshalled.
// //
@ -89,12 +89,27 @@
// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents // 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents
// unmarshalled into an interface{} field will be unmarshalled as a D. // unmarshalled into an interface{} field will be unmarshalled as a D.
// //
// The following struct tags can be used to configure behavior: // The encoding of each struct field can be customized by the "bson" struct tag.
//
// This tag behavior is configurable, and different struct tag behavior can be configured by initializing a new
// bsoncodec.StructCodec with the desired tag parser and registering that StructCodec onto the Registry. By default, JSON tags
// are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below:
//
// Example:
// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser)
//
// The bson tag gives the name of the field, possibly followed by a comma-separated list of options.
// The name may be empty in order to specify options without overriding the default field name. The following options can be used
// to configure behavior:
// //
// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to // 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to
// the zero value. By default, a struct field is only considered empty if the field's type implements the Zeroer // the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if
// interface and the IsZero method returns true. Struct fields of types that do not implement Zeroer are always // their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings).
// marshalled as embedded documents. This tag should be used for all slice and map values. // Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered
// empty if their value is nil. By default, structs are only considered empty if the struct type implements the
// bsoncodec.Zeroer interface and the IsZero method returns true. Struct fields whose types do not implement Zeroer are
// never considered empty and will be marshalled as embedded documents.
// NOTE: It is recommended that this tag be used for all slice and map fields.
// //
// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of // 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of
// the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other // the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other
@ -110,9 +125,10 @@
// pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a // pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a
// map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be // map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be
// {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are // {"foo": "bar"} instead of {"map": {"foo": "bar"}}. There can only be one inlined map field in a struct. If there are
// duplicated fields in the resulting document when an inlined field is marshalled, an error will be returned. This tag // duplicated fields in the resulting document when an inlined struct is marshalled, the inlined field will be overwritten.
// can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be marshalled. // If there are duplicated fields in the resulting document when an inlined map is marshalled, an error will be returned.
// For fields that are not maps or structs, this tag is ignored. // This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be
// marshalled. For fields that are not maps or structs, this tag is ignored.
// //
// Marshalling and Unmarshalling // Marshalling and Unmarshalling
// //

View File

@ -10,6 +10,7 @@
package primitive package primitive
import ( import (
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"math/big" "math/big"
@ -206,6 +207,54 @@ func (d Decimal128) IsInf() int {
return -1 return -1
} }
// IsZero returns true if d is the empty Decimal128.
func (d Decimal128) IsZero() bool {
return d.h == 0 && d.l == 0
}
// MarshalJSON returns Decimal128 as a string.
func (d Decimal128) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// UnmarshalJSON creates a primitive.Decimal128 from a JSON string, an extended JSON $numberDecimal value, or the string
// "null". If b is a JSON string or extended JSON value, d will have the value of that string, and if b is "null", d will
// be unchanged.
func (d *Decimal128) UnmarshalJSON(b []byte) error {
// Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer Decimal128 field
// will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not
// enter the UnmarshalJSON hook.
if string(b) == "null" {
return nil
}
var res interface{}
err := json.Unmarshal(b, &res)
if err != nil {
return err
}
str, ok := res.(string)
// Extended JSON
if !ok {
m, ok := res.(map[string]interface{})
if !ok {
return errors.New("not an extended JSON Decimal128: expected document")
}
d128, ok := m["$numberDecimal"]
if !ok {
return errors.New("not an extended JSON Decimal128: expected key $numberDecimal")
}
str, ok = d128.(string)
if !ok {
return errors.New("not an extended JSON Decimal128: expected decimal to be string")
}
}
*d, err = ParseDecimal128(str)
return err
}
func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) { func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
div64 := uint64(div) div64 := uint64(div)
a := h >> 32 a := h >> 32

View File

@ -88,16 +88,29 @@ func ObjectIDFromHex(s string) (ObjectID, error) {
return oid, nil return oid, nil
} }
// IsValidObjectID returns true if the provided hex string represents a valid ObjectID and false if not.
func IsValidObjectID(s string) bool {
_, err := ObjectIDFromHex(s)
return err == nil
}
// MarshalJSON returns the ObjectID as a string // MarshalJSON returns the ObjectID as a string
func (id ObjectID) MarshalJSON() ([]byte, error) { func (id ObjectID) MarshalJSON() ([]byte, error) {
return json.Marshal(id.Hex()) return json.Marshal(id.Hex())
} }
// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 64 bytes long, it // UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 24 bytes long, it
// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes // will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
// long, it will be populated with the BSON representation of the ObjectID. Otherwise, it will // long, it will be populated with the BSON representation of the ObjectID. This method also accepts empty strings and
// return an error. // decodes them as NilObjectID. For any other inputs, an error will be returned.
func (id *ObjectID) UnmarshalJSON(b []byte) error { func (id *ObjectID) UnmarshalJSON(b []byte) error {
// Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer ObjectID field
// will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not
// enter the UnmarshalJSON hook.
if string(b) == "null" {
return nil
}
var err error var err error
switch len(b) { switch len(b) {
case 12: case 12:
@ -125,6 +138,12 @@ func (id *ObjectID) UnmarshalJSON(b []byte) error {
} }
} }
// An empty string is not a valid ObjectID, but we treat it as a special value that decodes as NilObjectID.
if len(str) == 0 {
copy(id[:], NilObjectID[:])
return nil
}
if len(str) != 24 { if len(str) != 24 {
return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str)) return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str))
} }

View File

@ -40,11 +40,32 @@ type Undefined struct{}
// DateTime represents the BSON datetime value. // DateTime represents the BSON datetime value.
type DateTime int64 type DateTime int64
var _ json.Marshaler = DateTime(0)
var _ json.Unmarshaler = (*DateTime)(nil)
// MarshalJSON marshal to time type // MarshalJSON marshal to time type
func (d DateTime) MarshalJSON() ([]byte, error) { func (d DateTime) MarshalJSON() ([]byte, error) {
return json.Marshal(d.Time()) return json.Marshal(d.Time())
} }
// UnmarshalJSON creates a primitive.DateTime from a JSON string.
func (d *DateTime) UnmarshalJSON(data []byte) error {
// Ignore "null" to keep parity with the time.Time type and the standard library. Decoding "null" into a non-pointer
// DateTime field will leave the field unchanged. For pointer values, the encoding/json will set the pointer to nil
// and will not defer to the UnmarshalJSON hook.
if string(data) == "null" {
return nil
}
var tempTime time.Time
if err := json.Unmarshal(data, &tempTime); err != nil {
return err
}
*d = NewDateTimeFromTime(tempTime)
return nil
}
// Time returns the date as a time type. // Time returns the date as a time type.
func (d DateTime) Time() time.Time { func (d DateTime) Time() time.Time {
return time.Unix(int64(d)/1000, int64(d)%1000*1000000) return time.Unix(int64(d)/1000, int64(d)%1000*1000000)
@ -52,7 +73,7 @@ func (d DateTime) Time() time.Time {
// NewDateTimeFromTime creates a new DateTime from a Time. // NewDateTimeFromTime creates a new DateTime from a Time.
func NewDateTimeFromTime(t time.Time) DateTime { func NewDateTimeFromTime(t time.Time) DateTime {
return DateTime(t.UnixNano() / 1000000) return DateTime(t.Unix()*1e3 + int64(t.Nanosecond())/1e6)
} }
// Null represents the BSON null value. // Null represents the BSON null value.

View File

@ -104,7 +104,9 @@ func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interfa
} }
func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} } func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} }
func convertToCoreValue(v RawValue) bsoncore.Value { return bsoncore.Value{Type: v.Type, Data: v.Value} } func convertToCoreValue(v RawValue) bsoncore.Value {
return bsoncore.Value{Type: v.Type, Data: v.Value}
}
// Validate ensures the value is a valid BSON value. // Validate ensures the value is a valid BSON value.
func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() } func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() }
@ -176,7 +178,9 @@ func (rv RawValue) ObjectID() primitive.ObjectID { return convertToCoreValue(rv)
// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of // ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
// panicking. // panicking.
func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) { return convertToCoreValue(rv).ObjectIDOK() } func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) {
return convertToCoreValue(rv).ObjectIDOK()
}
// Boolean returns the boolean value the Value represents. It panics if the // Boolean returns the boolean value the Value represents. It panics if the
// value is a BSON type other than boolean. // value is a BSON type other than boolean.
@ -214,7 +218,9 @@ func (rv RawValue) RegexOK() (pattern, options string, ok bool) {
// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON // DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
// type other than DBPointer. // type other than DBPointer.
func (rv RawValue) DBPointer() (string, primitive.ObjectID) { return convertToCoreValue(rv).DBPointer() } func (rv RawValue) DBPointer() (string, primitive.ObjectID) {
return convertToCoreValue(rv).DBPointer()
}
// DBPointerOK is the same as DBPoitner, except that it returns a boolean // DBPointerOK is the same as DBPoitner, except that it returns a boolean
// instead of panicking. // instead of panicking.
@ -260,6 +266,14 @@ func (rv RawValue) Int32() int32 { return convertToCoreValue(rv).Int32() }
// panicking. // panicking.
func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32OK() } func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32OK() }
// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
// will panic.
func (rv RawValue) AsInt32() int32 { return convertToCoreValue(rv).AsInt32() }
// AsInt32OK is the same as AsInt32, except that it returns a boolean instead of
// panicking.
func (rv RawValue) AsInt32OK() (int32, bool) { return convertToCoreValue(rv).AsInt32OK() }
// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a // Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
// BSON type other than timestamp. // BSON type other than timestamp.
func (rv RawValue) Timestamp() (t, i uint32) { return convertToCoreValue(rv).Timestamp() } func (rv RawValue) Timestamp() (t, i uint32) { return convertToCoreValue(rv).Timestamp() }
@ -276,6 +290,14 @@ func (rv RawValue) Int64() int64 { return convertToCoreValue(rv).Int64() }
// panicking. // panicking.
func (rv RawValue) Int64OK() (int64, bool) { return convertToCoreValue(rv).Int64OK() } func (rv RawValue) Int64OK() (int64, bool) { return convertToCoreValue(rv).Int64OK() }
// AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method
// will panic.
func (rv RawValue) AsInt64() int64 { return convertToCoreValue(rv).AsInt64() }
// AsInt64OK is the same as AsInt64, except that it returns a boolean instead of
// panicking.
func (rv RawValue) AsInt64OK() (int64, bool) { return convertToCoreValue(rv).AsInt64OK() }
// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than // Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
// decimal. // decimal.
func (rv RawValue) Decimal128() primitive.Decimal128 { return convertToCoreValue(rv).Decimal128() } func (rv RawValue) Decimal128() primitive.Decimal128 { return convertToCoreValue(rv).Decimal128() }

View File

@ -0,0 +1,164 @@
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package bsoncore
import (
"bytes"
"fmt"
"io"
"strconv"
)
// NewArrayLengthError creates and returns an error for when the length of an array exceeds the
// bytes available.
func NewArrayLengthError(length, rem int) error {
return lengthError("array", length, rem)
}
// Array is a raw bytes representation of a BSON array.
type Array []byte
// NewArrayFromReader reads an array from r. This function will only validate the length is
// correct and that the array ends with a null byte.
func NewArrayFromReader(r io.Reader) (Array, error) {
return newBufferFromReader(r)
}
// Index searches for and retrieves the value at the given index. This method will panic if
// the array is invalid or if the index is out of bounds.
func (a Array) Index(index uint) Value {
value, err := a.IndexErr(index)
if err != nil {
panic(err)
}
return value
}
// IndexErr searches for and retrieves the value at the given index.
func (a Array) IndexErr(index uint) (Value, error) {
elem, err := indexErr(a, index)
if err != nil {
return Value{}, err
}
return elem.Value(), err
}
// DebugString outputs a human readable version of Array. It will attempt to stringify the
// valid components of the array even if the entire array is not valid.
func (a Array) DebugString() string {
if len(a) < 5 {
return "<malformed>"
}
var buf bytes.Buffer
buf.WriteString("Array")
length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length
buf.WriteByte('(')
buf.WriteString(strconv.Itoa(int(length)))
length -= 4
buf.WriteString(")[")
var elem Element
var ok bool
for length > 1 {
elem, rem, ok = ReadElement(rem)
length -= int32(len(elem))
if !ok {
buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
break
}
fmt.Fprintf(&buf, "%s", elem.Value().DebugString())
if length != 1 {
buf.WriteByte(',')
}
}
buf.WriteByte(']')
return buf.String()
}
// String outputs an ExtendedJSON version of Array. If the Array is not valid, this method
// returns an empty string.
func (a Array) String() string {
if len(a) < 5 {
return ""
}
var buf bytes.Buffer
buf.WriteByte('[')
length, rem, _ := ReadLength(a) // We know we have enough bytes to read the length
length -= 4
var elem Element
var ok bool
for length > 1 {
elem, rem, ok = ReadElement(rem)
length -= int32(len(elem))
if !ok {
return ""
}
fmt.Fprintf(&buf, "%s", elem.Value().String())
if length > 1 {
buf.WriteByte(',')
}
}
if length != 1 { // Missing final null byte or inaccurate length
return ""
}
buf.WriteByte(']')
return buf.String()
}
// Values returns this array as a slice of values. The returned slice will contain valid values.
// If the array is not valid, the values up to the invalid point will be returned along with an
// error.
func (a Array) Values() ([]Value, error) {
return values(a)
}
// Validate validates the array and ensures the elements contained within are valid.
func (a Array) Validate() error {
length, rem, ok := ReadLength(a)
if !ok {
return NewInsufficientBytesError(a, rem)
}
if int(length) > len(a) {
return NewArrayLengthError(int(length), len(a))
}
if a[length-1] != 0x00 {
return ErrMissingNull
}
length -= 4
var elem Element
var keyNum int64
for length > 1 {
elem, rem, ok = ReadElement(rem)
length -= int32(len(elem))
if !ok {
return NewInsufficientBytesError(a, rem)
}
// validate element
err := elem.Validate()
if err != nil {
return err
}
// validate keys increase numerically
if fmt.Sprint(keyNum) != elem.Key() {
return fmt.Errorf("array key %q is out of order or invalid", elem.Key())
}
keyNum++
}
if len(rem) < 1 || rem[0] != 0x00 {
return ErrMissingNull
}
return nil
}

View File

@ -0,0 +1,201 @@
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package bsoncore
import (
"strconv"
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// ArrayBuilder builds a bson array
type ArrayBuilder struct {
arr []byte
indexes []int32
keys []int
}
// NewArrayBuilder creates a new ArrayBuilder
func NewArrayBuilder() *ArrayBuilder {
return (&ArrayBuilder{}).startArray()
}
// startArray reserves the array's length and sets the index to where the length begins
func (a *ArrayBuilder) startArray() *ArrayBuilder {
var index int32
index, a.arr = AppendArrayStart(a.arr)
a.indexes = append(a.indexes, index)
a.keys = append(a.keys, 0)
return a
}
// Build updates the length of the array and index to the beginning of the documents length
// bytes, then returns the array (bson bytes)
func (a *ArrayBuilder) Build() Array {
lastIndex := len(a.indexes) - 1
lastKey := len(a.keys) - 1
a.arr, _ = AppendArrayEnd(a.arr, a.indexes[lastIndex])
a.indexes = a.indexes[:lastIndex]
a.keys = a.keys[:lastKey]
return a.arr
}
// incrementKey() increments the value keys and returns the key to be used to a.appendArray* functions
func (a *ArrayBuilder) incrementKey() string {
idx := len(a.keys) - 1
key := strconv.Itoa(a.keys[idx])
a.keys[idx]++
return key
}
// AppendInt32 will append i32 to ArrayBuilder.arr
func (a *ArrayBuilder) AppendInt32(i32 int32) *ArrayBuilder {
a.arr = AppendInt32Element(a.arr, a.incrementKey(), i32)
return a
}
// AppendDocument will append doc to ArrayBuilder.arr
func (a *ArrayBuilder) AppendDocument(doc []byte) *ArrayBuilder {
a.arr = AppendDocumentElement(a.arr, a.incrementKey(), doc)
return a
}
// AppendArray will append arr to ArrayBuilder.arr
func (a *ArrayBuilder) AppendArray(arr []byte) *ArrayBuilder {
a.arr = AppendArrayElement(a.arr, a.incrementKey(), arr)
return a
}
// AppendDouble will append f to ArrayBuilder.doc
func (a *ArrayBuilder) AppendDouble(f float64) *ArrayBuilder {
a.arr = AppendDoubleElement(a.arr, a.incrementKey(), f)
return a
}
// AppendString will append str to ArrayBuilder.doc
func (a *ArrayBuilder) AppendString(str string) *ArrayBuilder {
a.arr = AppendStringElement(a.arr, a.incrementKey(), str)
return a
}
// AppendObjectID will append oid to ArrayBuilder.doc
func (a *ArrayBuilder) AppendObjectID(oid primitive.ObjectID) *ArrayBuilder {
a.arr = AppendObjectIDElement(a.arr, a.incrementKey(), oid)
return a
}
// AppendBinary will append a BSON binary element using subtype, and
// b to a.arr
func (a *ArrayBuilder) AppendBinary(subtype byte, b []byte) *ArrayBuilder {
a.arr = AppendBinaryElement(a.arr, a.incrementKey(), subtype, b)
return a
}
// AppendUndefined will append a BSON undefined element using key to a.arr
func (a *ArrayBuilder) AppendUndefined() *ArrayBuilder {
a.arr = AppendUndefinedElement(a.arr, a.incrementKey())
return a
}
// AppendBoolean will append a boolean element using b to a.arr
func (a *ArrayBuilder) AppendBoolean(b bool) *ArrayBuilder {
a.arr = AppendBooleanElement(a.arr, a.incrementKey(), b)
return a
}
// AppendDateTime will append datetime element dt to a.arr
func (a *ArrayBuilder) AppendDateTime(dt int64) *ArrayBuilder {
a.arr = AppendDateTimeElement(a.arr, a.incrementKey(), dt)
return a
}
// AppendNull will append a null element to a.arr
func (a *ArrayBuilder) AppendNull() *ArrayBuilder {
a.arr = AppendNullElement(a.arr, a.incrementKey())
return a
}
// AppendRegex will append pattern and options to a.arr
func (a *ArrayBuilder) AppendRegex(pattern, options string) *ArrayBuilder {
a.arr = AppendRegexElement(a.arr, a.incrementKey(), pattern, options)
return a
}
// AppendDBPointer will append ns and oid to a.arr
func (a *ArrayBuilder) AppendDBPointer(ns string, oid primitive.ObjectID) *ArrayBuilder {
a.arr = AppendDBPointerElement(a.arr, a.incrementKey(), ns, oid)
return a
}
// AppendJavaScript will append js to a.arr
func (a *ArrayBuilder) AppendJavaScript(js string) *ArrayBuilder {
a.arr = AppendJavaScriptElement(a.arr, a.incrementKey(), js)
return a
}
// AppendSymbol will append symbol to a.arr
func (a *ArrayBuilder) AppendSymbol(symbol string) *ArrayBuilder {
a.arr = AppendSymbolElement(a.arr, a.incrementKey(), symbol)
return a
}
// AppendCodeWithScope will append code and scope to a.arr
func (a *ArrayBuilder) AppendCodeWithScope(code string, scope Document) *ArrayBuilder {
a.arr = AppendCodeWithScopeElement(a.arr, a.incrementKey(), code, scope)
return a
}
// AppendTimestamp will append t and i to a.arr
func (a *ArrayBuilder) AppendTimestamp(t, i uint32) *ArrayBuilder {
a.arr = AppendTimestampElement(a.arr, a.incrementKey(), t, i)
return a
}
// AppendInt64 will append i64 to a.arr
func (a *ArrayBuilder) AppendInt64(i64 int64) *ArrayBuilder {
a.arr = AppendInt64Element(a.arr, a.incrementKey(), i64)
return a
}
// AppendDecimal128 will append d128 to a.arr
func (a *ArrayBuilder) AppendDecimal128(d128 primitive.Decimal128) *ArrayBuilder {
a.arr = AppendDecimal128Element(a.arr, a.incrementKey(), d128)
return a
}
// AppendMaxKey will append a max key element to a.arr
func (a *ArrayBuilder) AppendMaxKey() *ArrayBuilder {
a.arr = AppendMaxKeyElement(a.arr, a.incrementKey())
return a
}
// AppendMinKey will append a min key element to a.arr
func (a *ArrayBuilder) AppendMinKey() *ArrayBuilder {
a.arr = AppendMinKeyElement(a.arr, a.incrementKey())
return a
}
// AppendValue appends a BSON value to the array.
func (a *ArrayBuilder) AppendValue(val Value) *ArrayBuilder {
a.arr = AppendValueElement(a.arr, a.incrementKey(), val)
return a
}
// StartArray starts building an inline Array. After this document is completed,
// the user must call a.FinishArray
func (a *ArrayBuilder) StartArray() *ArrayBuilder {
a.arr = AppendHeader(a.arr, bsontype.Array, a.incrementKey())
a.startArray()
return a
}
// FinishArray builds the most recent array created
func (a *ArrayBuilder) FinishArray() *ArrayBuilder {
a.arr = a.Build()
return a
}

View File

@ -0,0 +1,189 @@
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package bsoncore
import (
"go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// DocumentBuilder builds a bson document
type DocumentBuilder struct {
doc []byte
indexes []int32
}
// startDocument reserves the document's length and set the index to where the length begins
func (db *DocumentBuilder) startDocument() *DocumentBuilder {
var index int32
index, db.doc = AppendDocumentStart(db.doc)
db.indexes = append(db.indexes, index)
return db
}
// NewDocumentBuilder creates a new DocumentBuilder
func NewDocumentBuilder() *DocumentBuilder {
return (&DocumentBuilder{}).startDocument()
}
// Build updates the length of the document and index to the beginning of the documents length
// bytes, then returns the document (bson bytes)
func (db *DocumentBuilder) Build() Document {
last := len(db.indexes) - 1
db.doc, _ = AppendDocumentEnd(db.doc, db.indexes[last])
db.indexes = db.indexes[:last]
return db.doc
}
// AppendInt32 will append an int32 element using key and i32 to DocumentBuilder.doc
func (db *DocumentBuilder) AppendInt32(key string, i32 int32) *DocumentBuilder {
db.doc = AppendInt32Element(db.doc, key, i32)
return db
}
// AppendDocument will append a bson embeded document element using key
// and doc to DocumentBuilder.doc
func (db *DocumentBuilder) AppendDocument(key string, doc []byte) *DocumentBuilder {
db.doc = AppendDocumentElement(db.doc, key, doc)
return db
}
// AppendArray will append a bson array using key and arr to DocumentBuilder.doc
func (db *DocumentBuilder) AppendArray(key string, arr []byte) *DocumentBuilder {
db.doc = AppendHeader(db.doc, bsontype.Array, key)
db.doc = AppendArray(db.doc, arr)
return db
}
// AppendDouble will append a double element using key and f to DocumentBuilder.doc
func (db *DocumentBuilder) AppendDouble(key string, f float64) *DocumentBuilder {
db.doc = AppendDoubleElement(db.doc, key, f)
return db
}
// AppendString will append str to DocumentBuilder.doc with the given key
func (db *DocumentBuilder) AppendString(key string, str string) *DocumentBuilder {
db.doc = AppendStringElement(db.doc, key, str)
return db
}
// AppendObjectID will append oid to DocumentBuilder.doc with the given key
func (db *DocumentBuilder) AppendObjectID(key string, oid primitive.ObjectID) *DocumentBuilder {
db.doc = AppendObjectIDElement(db.doc, key, oid)
return db
}
// AppendBinary will append a BSON binary element using key, subtype, and
// b to db.doc
func (db *DocumentBuilder) AppendBinary(key string, subtype byte, b []byte) *DocumentBuilder {
db.doc = AppendBinaryElement(db.doc, key, subtype, b)
return db
}
// AppendUndefined will append a BSON undefined element using key to db.doc
func (db *DocumentBuilder) AppendUndefined(key string) *DocumentBuilder {
db.doc = AppendUndefinedElement(db.doc, key)
return db
}
// AppendBoolean will append a boolean element using key and b to db.doc
func (db *DocumentBuilder) AppendBoolean(key string, b bool) *DocumentBuilder {
db.doc = AppendBooleanElement(db.doc, key, b)
return db
}
// AppendDateTime will append a datetime element using key and dt to db.doc
func (db *DocumentBuilder) AppendDateTime(key string, dt int64) *DocumentBuilder {
db.doc = AppendDateTimeElement(db.doc, key, dt)
return db
}
// AppendNull will append a null element using key to db.doc
func (db *DocumentBuilder) AppendNull(key string) *DocumentBuilder {
db.doc = AppendNullElement(db.doc, key)
return db
}
// AppendRegex will append pattern and options using key to db.doc
func (db *DocumentBuilder) AppendRegex(key, pattern, options string) *DocumentBuilder {
db.doc = AppendRegexElement(db.doc, key, pattern, options)
return db
}
// AppendDBPointer will append ns and oid to using key to db.doc
func (db *DocumentBuilder) AppendDBPointer(key string, ns string, oid primitive.ObjectID) *DocumentBuilder {
db.doc = AppendDBPointerElement(db.doc, key, ns, oid)
return db
}
// AppendJavaScript will append js using the provided key to db.doc
func (db *DocumentBuilder) AppendJavaScript(key, js string) *DocumentBuilder {
db.doc = AppendJavaScriptElement(db.doc, key, js)
return db
}
// AppendSymbol will append a BSON symbol element using key and symbol db.doc
func (db *DocumentBuilder) AppendSymbol(key, symbol string) *DocumentBuilder {
db.doc = AppendSymbolElement(db.doc, key, symbol)
return db
}
// AppendCodeWithScope will append code and scope using key to db.doc
func (db *DocumentBuilder) AppendCodeWithScope(key string, code string, scope Document) *DocumentBuilder {
db.doc = AppendCodeWithScopeElement(db.doc, key, code, scope)
return db
}
// AppendTimestamp will append t and i to db.doc using provided key
func (db *DocumentBuilder) AppendTimestamp(key string, t, i uint32) *DocumentBuilder {
db.doc = AppendTimestampElement(db.doc, key, t, i)
return db
}
// AppendInt64 will append i64 to dst using key to db.doc
func (db *DocumentBuilder) AppendInt64(key string, i64 int64) *DocumentBuilder {
db.doc = AppendInt64Element(db.doc, key, i64)
return db
}
// AppendDecimal128 will append d128 to db.doc using provided key
func (db *DocumentBuilder) AppendDecimal128(key string, d128 primitive.Decimal128) *DocumentBuilder {
db.doc = AppendDecimal128Element(db.doc, key, d128)
return db
}
// AppendMaxKey will append a max key element using key to db.doc
func (db *DocumentBuilder) AppendMaxKey(key string) *DocumentBuilder {
db.doc = AppendMaxKeyElement(db.doc, key)
return db
}
// AppendMinKey will append a min key element using key to db.doc
func (db *DocumentBuilder) AppendMinKey(key string) *DocumentBuilder {
db.doc = AppendMinKeyElement(db.doc, key)
return db
}
// AppendValue will append a BSON element with the provided key and value to the document.
func (db *DocumentBuilder) AppendValue(key string, val Value) *DocumentBuilder {
db.doc = AppendValueElement(db.doc, key, val)
return db
}
// StartDocument starts building an inline document element with the provided key
// After this document is completed, the user must call finishDocument
func (db *DocumentBuilder) StartDocument(key string) *DocumentBuilder {
db.doc = AppendHeader(db.doc, bsontype.EmbeddedDocument, key)
db = db.startDocument()
return db
}
// FinishDocument builds the most recent document created
func (db *DocumentBuilder) FinishDocument() *DocumentBuilder {
db.doc = db.Build()
return db
}

View File

@ -30,24 +30,35 @@ import (
"fmt" "fmt"
"math" "math"
"strconv" "strconv"
"strings"
"time" "time"
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
"go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/bson/primitive"
) )
// EmptyDocumentLength is the length of a document that has been started/ended but has no elements. const (
const EmptyDocumentLength = 5 // EmptyDocumentLength is the length of a document that has been started/ended but has no elements.
EmptyDocumentLength = 5
// nullTerminator is a string version of the 0 byte that is appended at the end of cstrings.
nullTerminator = string(byte(0))
invalidKeyPanicMsg = "BSON element keys cannot contain null bytes"
invalidRegexPanicMsg = "BSON regex values cannot contain null bytes"
)
// AppendType will append t to dst and return the extended buffer. // AppendType will append t to dst and return the extended buffer.
func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) } func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) }
// AppendKey will append key to dst and return the extended buffer. // AppendKey will append key to dst and return the extended buffer.
func AppendKey(dst []byte, key string) []byte { return append(dst, key+string(0x00)...) } func AppendKey(dst []byte, key string) []byte { return append(dst, key+nullTerminator...) }
// AppendHeader will append Type t and key to dst and return the extended // AppendHeader will append Type t and key to dst and return the extended
// buffer. // buffer.
func AppendHeader(dst []byte, t bsontype.Type, key string) []byte { func AppendHeader(dst []byte, t bsontype.Type, key string) []byte {
if !isValidCString(key) {
panic(invalidKeyPanicMsg)
}
dst = AppendType(dst, t) dst = AppendType(dst, t)
dst = append(dst, key...) dst = append(dst, key...)
return append(dst, 0x00) return append(dst, 0x00)
@ -427,7 +438,11 @@ func AppendNullElement(dst []byte, key string) []byte { return AppendHeader(dst,
// AppendRegex will append pattern and options to dst and return the extended buffer. // AppendRegex will append pattern and options to dst and return the extended buffer.
func AppendRegex(dst []byte, pattern, options string) []byte { func AppendRegex(dst []byte, pattern, options string) []byte {
return append(dst, pattern+string(0x00)+options+string(0x00)...) if !isValidCString(pattern) || !isValidCString(options) {
panic(invalidRegexPanicMsg)
}
return append(dst, pattern+nullTerminator+options+nullTerminator...)
} }
// AppendRegexElement will append a BSON regex element using key, pattern, and // AppendRegexElement will append a BSON regex element using key, pattern, and
@ -815,7 +830,7 @@ func readstring(src []byte) (string, []byte, bool) {
if !ok { if !ok {
return "", src, false return "", src, false
} }
if len(src[4:]) < int(l) { if len(src[4:]) < int(l) || l == 0 {
return "", src, false return "", src, false
} }
@ -841,3 +856,7 @@ func appendBinarySubtype2(dst []byte, subtype byte, b []byte) []byte {
dst = appendLength(dst, int32(len(b))) dst = appendLength(dst, int32(len(b)))
return append(dst, b...) return append(dst, b...)
} }
func isValidCString(cs string) bool {
return !strings.ContainsRune(cs, '\x00')
}

View File

@ -17,17 +17,20 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype" "go.mongodb.org/mongo-driver/bson/bsontype"
) )
// DocumentValidationError is an error type returned when attempting to validate a document. // ValidationError is an error type returned when attempting to validate a document or array.
type DocumentValidationError string type ValidationError string
func (dve DocumentValidationError) Error() string { return string(dve) } func (ve ValidationError) Error() string { return string(ve) }
// NewDocumentLengthError creates and returns an error for when the length of a document exceeds the // NewDocumentLengthError creates and returns an error for when the length of a document exceeds the
// bytes available. // bytes available.
func NewDocumentLengthError(length, rem int) error { func NewDocumentLengthError(length, rem int) error {
return DocumentValidationError( return lengthError("document", length, rem)
fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem), }
)
func lengthError(bufferType string, length, rem int) error {
return ValidationError(fmt.Sprintf("%v length exceeds available bytes. length=%d remainingBytes=%d",
bufferType, length, rem))
} }
// InsufficientBytesError indicates that there were not enough bytes to read the next component. // InsufficientBytesError indicates that there were not enough bytes to read the next component.
@ -94,15 +97,16 @@ func (idte InvalidDepthTraversalError) Error() string {
) )
} }
// ErrMissingNull is returned when a document's last byte is not null. // ErrMissingNull is returned when a document or array's last byte is not null.
const ErrMissingNull DocumentValidationError = "document end is missing null byte" const ErrMissingNull ValidationError = "document or array end is missing null byte"
// ErrInvalidLength indicates that a length in a binary representation of a BSON document or array
// is invalid.
const ErrInvalidLength ValidationError = "document or array length is invalid"
// ErrNilReader indicates that an operation was attempted on a nil io.Reader. // ErrNilReader indicates that an operation was attempted on a nil io.Reader.
var ErrNilReader = errors.New("nil reader") var ErrNilReader = errors.New("nil reader")
// ErrInvalidLength indicates that a length in a binary representation of a BSON document is invalid.
var ErrInvalidLength = errors.New("document length is invalid")
// ErrEmptyKey indicates that no key was provided to a Lookup method. // ErrEmptyKey indicates that no key was provided to a Lookup method.
var ErrEmptyKey = errors.New("empty key provided") var ErrEmptyKey = errors.New("empty key provided")
@ -115,12 +119,13 @@ var ErrOutOfBounds = errors.New("out of bounds")
// Document is a raw bytes representation of a BSON document. // Document is a raw bytes representation of a BSON document.
type Document []byte type Document []byte
// Array is a raw bytes representation of a BSON array.
type Array = Document
// NewDocumentFromReader reads a document from r. This function will only validate the length is // NewDocumentFromReader reads a document from r. This function will only validate the length is
// correct and that the document ends with a null byte. // correct and that the document ends with a null byte.
func NewDocumentFromReader(r io.Reader) (Document, error) { func NewDocumentFromReader(r io.Reader) (Document, error) {
return newBufferFromReader(r)
}
func newBufferFromReader(r io.Reader) ([]byte, error) {
if r == nil { if r == nil {
return nil, ErrNilReader return nil, ErrNilReader
} }
@ -137,20 +142,20 @@ func NewDocumentFromReader(r io.Reader) (Document, error) {
if length < 0 { if length < 0 {
return nil, ErrInvalidLength return nil, ErrInvalidLength
} }
document := make([]byte, length) buffer := make([]byte, length)
copy(document, lengthBytes[:]) copy(buffer, lengthBytes[:])
_, err = io.ReadFull(r, document[4:]) _, err = io.ReadFull(r, buffer[4:])
if err != nil { if err != nil {
return nil, err return nil, err
} }
if document[length-1] != 0x00 { if buffer[length-1] != 0x00 {
return nil, ErrMissingNull return nil, ErrMissingNull
} }
return document, nil return buffer, nil
} }
// Lookup searches the document, potentially recursively, for the given key. If there are multiple // Lookup searches the document, potentially recursively, for the given key. If there are multiple
@ -181,7 +186,8 @@ func (d Document) LookupErr(key ...string) (Value, error) {
if !ok { if !ok {
return Value{}, NewInsufficientBytesError(d, rem) return Value{}, NewInsufficientBytesError(d, rem)
} }
if elem.Key() != key[0] { // We use `KeyBytes` rather than `Key` to avoid a needless string alloc.
if string(elem.KeyBytes()) != key[0] {
continue continue
} }
if len(key) > 1 { if len(key) > 1 {
@ -220,9 +226,13 @@ func (d Document) Index(index uint) Element {
// IndexErr searches for and retrieves the element at the given index. // IndexErr searches for and retrieves the element at the given index.
func (d Document) IndexErr(index uint) (Element, error) { func (d Document) IndexErr(index uint) (Element, error) {
length, rem, ok := ReadLength(d) return indexErr(d, index)
}
func indexErr(b []byte, index uint) (Element, error) {
length, rem, ok := ReadLength(b)
if !ok { if !ok {
return nil, NewInsufficientBytesError(d, rem) return nil, NewInsufficientBytesError(b, rem)
} }
length -= 4 length -= 4
@ -233,7 +243,7 @@ func (d Document) IndexErr(index uint) (Element, error) {
elem, rem, ok = ReadElement(rem) elem, rem, ok = ReadElement(rem)
length -= int32(len(elem)) length -= int32(len(elem))
if !ok { if !ok {
return nil, NewInsufficientBytesError(d, rem) return nil, NewInsufficientBytesError(b, rem)
} }
if current != index { if current != index {
current++ current++
@ -337,9 +347,13 @@ func (d Document) Elements() ([]Element, error) {
// If the document is not valid, the values up to the invalid point will be returned along with an // If the document is not valid, the values up to the invalid point will be returned along with an
// error. // error.
func (d Document) Values() ([]Value, error) { func (d Document) Values() ([]Value, error) {
length, rem, ok := ReadLength(d) return values(d)
}
func values(b []byte) ([]Value, error) {
length, rem, ok := ReadLength(b)
if !ok { if !ok {
return nil, NewInsufficientBytesError(d, rem) return nil, NewInsufficientBytesError(b, rem)
} }
length -= 4 length -= 4
@ -350,7 +364,7 @@ func (d Document) Values() ([]Value, error) {
elem, rem, ok = ReadElement(rem) elem, rem, ok = ReadElement(rem)
length -= int32(len(elem)) length -= int32(len(elem))
if !ok { if !ok {
return vals, NewInsufficientBytesError(d, rem) return vals, NewInsufficientBytesError(b, rem)
} }
if err := elem.Value().Validate(); err != nil { if err := elem.Value().Validate(); err != nil {
return vals, err return vals, err
@ -367,7 +381,7 @@ func (d Document) Validate() error {
return NewInsufficientBytesError(d, rem) return NewInsufficientBytesError(d, rem)
} }
if int(length) > len(d) { if int(length) > len(d) {
return d.lengtherror(int(length), len(d)) return NewDocumentLengthError(int(length), len(d))
} }
if d[length-1] != 0x00 { if d[length-1] != 0x00 {
return ErrMissingNull return ErrMissingNull
@ -393,7 +407,3 @@ func (d Document) Validate() error {
} }
return nil return nil
} }
func (Document) lengtherror(length, rem int) error {
return DocumentValidationError(fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem))
}

View File

@ -105,7 +105,7 @@ github.com/astaxie/beego/session/redis_sentinel
github.com/astaxie/beego/toolbox github.com/astaxie/beego/toolbox
github.com/astaxie/beego/utils github.com/astaxie/beego/utils
github.com/astaxie/beego/validation github.com/astaxie/beego/validation
# github.com/aws/aws-sdk-go v1.32.5 # github.com/aws/aws-sdk-go v1.34.28
## explicit ## explicit
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/arn
@ -186,7 +186,8 @@ github.com/cespare/xxhash/v2
## explicit ## explicit
# github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59 # github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59
github.com/containerd/cgroups/stats/v1 github.com/containerd/cgroups/stats/v1
# github.com/containerd/containerd v1.4.4 # github.com/containerd/containerd v1.4.8
## explicit
github.com/containerd/containerd/archive/compression github.com/containerd/containerd/archive/compression
github.com/containerd/containerd/content github.com/containerd/containerd/content
github.com/containerd/containerd/content/local github.com/containerd/containerd/content/local
@ -497,7 +498,7 @@ github.com/imdario/mergo
github.com/inconshreveable/mousetrap github.com/inconshreveable/mousetrap
# github.com/jinzhu/gorm v1.9.8 # github.com/jinzhu/gorm v1.9.8
## explicit ## explicit
# github.com/jmespath/go-jmespath v0.3.0 # github.com/jmespath/go-jmespath v0.4.0
github.com/jmespath/go-jmespath github.com/jmespath/go-jmespath
# github.com/jpillora/backoff v1.0.0 # github.com/jpillora/backoff v1.0.0
## explicit ## explicit
@ -558,13 +559,14 @@ github.com/opencontainers/go-digest
## explicit ## explicit
github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1 github.com/opencontainers/image-spec/specs-go/v1
# github.com/opencontainers/runc v0.1.1 # github.com/opencontainers/runc v1.0.0-rc95
## explicit
github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/user
# github.com/opentracing/opentracing-go v1.2.0 # github.com/opentracing/opentracing-go v1.2.0
## explicit ## explicit
github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go
github.com/opentracing/opentracing-go/log github.com/opentracing/opentracing-go/log
# github.com/pelletier/go-toml v1.4.0 # github.com/pelletier/go-toml v1.7.0
github.com/pelletier/go-toml github.com/pelletier/go-toml
# github.com/peterbourgon/diskv v2.0.1+incompatible # github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/peterbourgon/diskv github.com/peterbourgon/diskv
@ -661,7 +663,8 @@ github.com/xeipuuv/gojsonreference
github.com/xeipuuv/gojsonschema github.com/xeipuuv/gojsonschema
# github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca # github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca
github.com/xlab/treeprint github.com/xlab/treeprint
# go.mongodb.org/mongo-driver v1.3.4 # go.mongodb.org/mongo-driver v1.5.1
## explicit
go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson
go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsoncodec
go.mongodb.org/mongo-driver/bson/bsonoptions go.mongodb.org/mongo-driver/bson/bsonoptions