mirror of
https://github.com/goharbor/harbor.git
synced 2024-12-29 03:57:56 +01:00
bump up github.com/prometheus/client_golang to v1.13.0 (#17688)
Signed-off-by: yminer <yminer@vmware.com> update
This commit is contained in:
parent
1a1a6d2fd2
commit
b193f3f717
10
src/go.mod
10
src/go.mod
@ -49,7 +49,7 @@ require (
|
|||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.12.1
|
github.com/prometheus/client_golang v1.13.0
|
||||||
github.com/robfig/cron v1.0.0 // indirect
|
github.com/robfig/cron v1.0.0 // indirect
|
||||||
github.com/robfig/cron/v3 v3.0.0
|
github.com/robfig/cron/v3 v3.0.0
|
||||||
github.com/spf13/viper v1.8.1
|
github.com/spf13/viper v1.8.1
|
||||||
@ -67,7 +67,7 @@ require (
|
|||||||
go.uber.org/ratelimit v0.2.0
|
go.uber.org/ratelimit v0.2.0
|
||||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
|
||||||
gopkg.in/h2non/gock.v1 v1.0.16
|
gopkg.in/h2non/gock.v1 v1.0.16
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
@ -179,8 +179,8 @@ require (
|
|||||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/satori/go.uuid v1.2.0 // indirect
|
github.com/satori/go.uuid v1.2.0 // indirect
|
||||||
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 // indirect
|
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||||
@ -213,7 +213,7 @@ require (
|
|||||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
|
||||||
google.golang.org/grpc v1.47.0 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||||
|
20
src/go.sum
20
src/go.sum
@ -493,12 +493,14 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
|||||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||||
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
|
||||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||||
|
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||||
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
|
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
|
||||||
github.com/go-ldap/ldap/v3 v3.2.4 h1:PFavAq2xTgzo/loE8qNXcQaofAaqIpI4WgaLdv+1l3E=
|
github.com/go-ldap/ldap/v3 v3.2.4 h1:PFavAq2xTgzo/loE8qNXcQaofAaqIpI4WgaLdv+1l3E=
|
||||||
github.com/go-ldap/ldap/v3 v3.2.4/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg=
|
github.com/go-ldap/ldap/v3 v3.2.4/go.mod h1:iYS1MdmrmceOJ1QOTnRXrIs7i3kloqtmGQjRvjKpyMg=
|
||||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||||
|
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
@ -1173,8 +1175,9 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD
|
|||||||
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
|
||||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
|
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||||
|
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@ -1190,8 +1193,9 @@ github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+
|
|||||||
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
|
||||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
|
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||||
|
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
@ -1204,8 +1208,9 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
|||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
|
||||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||||
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
@ -1589,6 +1594,8 @@ golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211013171255-e13a2654a71e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211013171255-e13a2654a71e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
|
||||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||||
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
@ -1600,8 +1607,9 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
|
|||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -1712,6 +1720,7 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
|
||||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
@ -1915,8 +1924,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
|
||||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
|
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||||
|
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
2
src/vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
2
src/vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -51,7 +51,7 @@ type Counter interface {
|
|||||||
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
|
||||||
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
|
||||||
// of the provided labels are invalid, or if the provided labels contain more
|
// of the provided labels are invalid, or if the provided labels contain more
|
||||||
// than 64 runes in total.
|
// than 128 runes in total.
|
||||||
type ExemplarAdder interface {
|
type ExemplarAdder interface {
|
||||||
AddWithExemplar(value float64, exemplar Labels)
|
AddWithExemplar(value float64, exemplar Labels)
|
||||||
}
|
}
|
||||||
|
5
src/vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
5
src/vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -20,6 +20,9 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
@ -154,7 +157,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||||||
Value: proto.String(v),
|
Value: proto.String(v),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(labelPairSorter(d.constLabelPairs))
|
sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
26
src/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
generated
vendored
Normal file
26
src/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !js || wasm
|
||||||
|
// +build !js wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
func getPIDFn() func() (int, error) {
|
||||||
|
pid := os.Getpid()
|
||||||
|
return func() (int, error) {
|
||||||
|
return pid, nil
|
||||||
|
}
|
||||||
|
}
|
23
src/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
generated
vendored
Normal file
23
src/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright 2015 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js && !wasm
|
||||||
|
// +build js,!wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
func getPIDFn() func() (int, error) {
|
||||||
|
return func() (int, error) {
|
||||||
|
return 1, nil
|
||||||
|
}
|
||||||
|
}
|
20
src/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
20
src/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -19,6 +19,10 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
|
||||||
|
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
|
||||||
|
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
|
||||||
|
// populated using runtime/metrics.
|
||||||
func goRuntimeMemStats() memStatsMetrics {
|
func goRuntimeMemStats() memStatsMetrics {
|
||||||
return memStatsMetrics{
|
return memStatsMetrics{
|
||||||
{
|
{
|
||||||
@ -197,14 +201,6 @@ func goRuntimeMemStats() memStatsMetrics {
|
|||||||
),
|
),
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||||
valType: GaugeValue,
|
valType: GaugeValue,
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("gc_cpu_fraction"),
|
|
||||||
"The fraction of this program's available CPU time used by the GC since the program started.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
|
||||||
valType: GaugeValue,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -232,7 +228,7 @@ func newBaseGoCollector() baseGoCollector {
|
|||||||
"A summary of the pause duration of garbage collection cycles.",
|
"A summary of the pause duration of garbage collection cycles.",
|
||||||
nil, nil),
|
nil, nil),
|
||||||
gcLastTimeDesc: NewDesc(
|
gcLastTimeDesc: NewDesc(
|
||||||
memstatNamespace("last_gc_time_seconds"),
|
"go_memstats_last_gc_time_seconds",
|
||||||
"Number of seconds since 1970 of last garbage collection.",
|
"Number of seconds since 1970 of last garbage collection.",
|
||||||
nil, nil),
|
nil, nil),
|
||||||
goInfoDesc: NewDesc(
|
goInfoDesc: NewDesc(
|
||||||
@ -254,8 +250,9 @@ func (c *baseGoCollector) Describe(ch chan<- *Desc) {
|
|||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
func (c *baseGoCollector) Collect(ch chan<- Metric) {
|
func (c *baseGoCollector) Collect(ch chan<- Metric) {
|
||||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
n, _ := runtime.ThreadCreateProfile(nil)
|
|
||||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
n := getRuntimeNumThreads()
|
||||||
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
|
||||||
|
|
||||||
var stats debug.GCStats
|
var stats debug.GCStats
|
||||||
stats.PauseQuantiles = make([]time.Duration, 5)
|
stats.PauseQuantiles = make([]time.Duration, 5)
|
||||||
@ -268,7 +265,6 @@ func (c *baseGoCollector) Collect(ch chan<- Metric) {
|
|||||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||||
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
|
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
17
src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
17
src/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
@ -40,13 +40,28 @@ type goCollector struct {
|
|||||||
//
|
//
|
||||||
// Deprecated: Use collectors.NewGoCollector instead.
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
func NewGoCollector() Collector {
|
func NewGoCollector() Collector {
|
||||||
|
msMetrics := goRuntimeMemStats()
|
||||||
|
msMetrics = append(msMetrics, struct {
|
||||||
|
desc *Desc
|
||||||
|
eval func(*runtime.MemStats) float64
|
||||||
|
valType ValueType
|
||||||
|
}{
|
||||||
|
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
desc: NewDesc(
|
||||||
|
memstatNamespace("gc_cpu_fraction"),
|
||||||
|
"The fraction of this program's available CPU time used by the GC since the program started.",
|
||||||
|
nil, nil,
|
||||||
|
),
|
||||||
|
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||||
|
valType: GaugeValue,
|
||||||
|
})
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
base: newBaseGoCollector(),
|
base: newBaseGoCollector(),
|
||||||
msLast: &runtime.MemStats{},
|
msLast: &runtime.MemStats{},
|
||||||
msRead: runtime.ReadMemStats,
|
msRead: runtime.ReadMemStats,
|
||||||
msMaxWait: time.Second,
|
msMaxWait: time.Second,
|
||||||
msMaxAge: 5 * time.Minute,
|
msMaxAge: 5 * time.Minute,
|
||||||
msMetrics: goRuntimeMemStats(),
|
msMetrics: msMetrics,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,10 +25,72 @@ import (
|
|||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// constants for strings referenced more than once.
|
||||||
|
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
|
||||||
|
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
|
||||||
|
goGCHeapFreesObjects = "/gc/heap/frees:objects"
|
||||||
|
goGCHeapFreesBytes = "/gc/heap/frees:bytes"
|
||||||
|
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
|
||||||
|
goGCHeapObjects = "/gc/heap/objects:objects"
|
||||||
|
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
|
||||||
|
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
|
||||||
|
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
|
||||||
|
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
|
||||||
|
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
|
||||||
|
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
|
||||||
|
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
|
||||||
|
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
|
||||||
|
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
|
||||||
|
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
|
||||||
|
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
|
||||||
|
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
|
||||||
|
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
|
||||||
|
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
|
||||||
|
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
|
||||||
|
)
|
||||||
|
|
||||||
|
// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
|
||||||
|
var rmNamesForMemStatsMetrics = []string{
|
||||||
|
goGCHeapTinyAllocsObjects,
|
||||||
|
goGCHeapAllocsObjects,
|
||||||
|
goGCHeapFreesObjects,
|
||||||
|
goGCHeapAllocsBytes,
|
||||||
|
goGCHeapObjects,
|
||||||
|
goGCHeapGoalBytes,
|
||||||
|
goMemoryClassesTotalBytes,
|
||||||
|
goMemoryClassesHeapObjectsBytes,
|
||||||
|
goMemoryClassesHeapUnusedBytes,
|
||||||
|
goMemoryClassesHeapReleasedBytes,
|
||||||
|
goMemoryClassesHeapFreeBytes,
|
||||||
|
goMemoryClassesHeapStacksBytes,
|
||||||
|
goMemoryClassesOSStacksBytes,
|
||||||
|
goMemoryClassesMetadataMSpanInuseBytes,
|
||||||
|
goMemoryClassesMetadataMSPanFreeBytes,
|
||||||
|
goMemoryClassesMetadataMCacheInuseBytes,
|
||||||
|
goMemoryClassesMetadataMCacheFreeBytes,
|
||||||
|
goMemoryClassesProfilingBucketsBytes,
|
||||||
|
goMemoryClassesMetadataOtherBytes,
|
||||||
|
goMemoryClassesOtherBytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
func bestEffortLookupRM(lookup []string) []metrics.Description {
|
||||||
|
ret := make([]metrics.Description, 0, len(lookup))
|
||||||
|
for _, rm := range metrics.All() {
|
||||||
|
for _, m := range lookup {
|
||||||
|
if m == rm.Name {
|
||||||
|
ret = append(ret, rm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
type goCollector struct {
|
type goCollector struct {
|
||||||
base baseGoCollector
|
base baseGoCollector
|
||||||
|
|
||||||
@ -36,70 +98,124 @@ type goCollector struct {
|
|||||||
// snapshot is always produced by Collect.
|
// snapshot is always produced by Collect.
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
|
||||||
// rm... fields all pertain to the runtime/metrics package.
|
// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
|
||||||
rmSampleBuf []metrics.Sample
|
sampleBuf []metrics.Sample
|
||||||
rmSampleMap map[string]*metrics.Sample
|
// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
|
||||||
rmMetrics []collectorMetric
|
sampleMap map[string]*metrics.Sample
|
||||||
|
|
||||||
|
// rmExposedMetrics represents all runtime/metrics package metrics
|
||||||
|
// that were configured to be exposed.
|
||||||
|
rmExposedMetrics []collectorMetric
|
||||||
|
rmExactSumMapForHist map[string]string
|
||||||
|
|
||||||
// With Go 1.17, the runtime/metrics package was introduced.
|
// With Go 1.17, the runtime/metrics package was introduced.
|
||||||
// From that point on, metric names produced by the runtime/metrics
|
// From that point on, metric names produced by the runtime/metrics
|
||||||
// package could be generated from runtime/metrics names. However,
|
// package could be generated from runtime/metrics names. However,
|
||||||
// these differ from the old names for the same values.
|
// these differ from the old names for the same values.
|
||||||
//
|
//
|
||||||
// This field exist to export the same values under the old names
|
// This field exists to export the same values under the old names
|
||||||
// as well.
|
// as well.
|
||||||
msMetrics memStatsMetrics
|
msMetrics memStatsMetrics
|
||||||
|
msMetricsEnabled bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type rmMetricDesc struct {
|
||||||
|
metrics.Description
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
|
||||||
|
var descs []rmMetricDesc
|
||||||
|
for _, d := range metrics.All() {
|
||||||
|
var (
|
||||||
|
deny = true
|
||||||
|
desc rmMetricDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
for _, r := range rules {
|
||||||
|
if !r.Matcher.MatchString(d.Name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deny = r.Deny
|
||||||
|
}
|
||||||
|
if deny {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
desc.Description = d
|
||||||
|
descs = append(descs, desc)
|
||||||
|
}
|
||||||
|
return descs
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultGoCollectorOptions() internal.GoCollectorOptions {
|
||||||
|
return internal.GoCollectorOptions{
|
||||||
|
RuntimeMetricSumForHist: map[string]string{
|
||||||
|
"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
|
||||||
|
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
|
||||||
|
},
|
||||||
|
RuntimeMetricRules: []internal.GoCollectorRule{
|
||||||
|
//{Matcher: regexp.MustCompile("")},
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
// See there for documentation.
|
// See there for documentation.
|
||||||
//
|
//
|
||||||
// Deprecated: Use collectors.NewGoCollector instead.
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
func NewGoCollector() Collector {
|
func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
|
||||||
descriptions := metrics.All()
|
opt := defaultGoCollectorOptions()
|
||||||
|
for _, o := range opts {
|
||||||
|
o(&opt)
|
||||||
|
}
|
||||||
|
|
||||||
|
exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
|
||||||
|
|
||||||
// Collect all histogram samples so that we can get their buckets.
|
// Collect all histogram samples so that we can get their buckets.
|
||||||
// The API guarantees that the buckets are always fixed for the lifetime
|
// The API guarantees that the buckets are always fixed for the lifetime
|
||||||
// of the process.
|
// of the process.
|
||||||
var histograms []metrics.Sample
|
var histograms []metrics.Sample
|
||||||
for _, d := range descriptions {
|
for _, d := range exposedDescriptions {
|
||||||
if d.Kind == metrics.KindFloat64Histogram {
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
histograms = append(histograms, metrics.Sample{Name: d.Name})
|
histograms = append(histograms, metrics.Sample{Name: d.Name})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(histograms) > 0 {
|
||||||
metrics.Read(histograms)
|
metrics.Read(histograms)
|
||||||
|
}
|
||||||
|
|
||||||
bucketsMap := make(map[string][]float64)
|
bucketsMap := make(map[string][]float64)
|
||||||
for i := range histograms {
|
for i := range histograms {
|
||||||
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
|
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate a Desc and ValueType for each runtime/metrics metric.
|
// Generate a collector for each exposed runtime/metrics metric.
|
||||||
metricSet := make([]collectorMetric, 0, len(descriptions))
|
metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
|
||||||
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
|
// SampleBuf is used for reading from runtime/metrics.
|
||||||
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
|
// We are assuming the largest case to have stable pointers for sampleMap purposes.
|
||||||
for i := range descriptions {
|
sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
|
||||||
d := &descriptions[i]
|
sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
|
||||||
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
|
for _, d := range exposedDescriptions {
|
||||||
|
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Just ignore this metric; we can't do anything with it here.
|
// Just ignore this metric; we can't do anything with it here.
|
||||||
// If a user decides to use the latest version of Go, we don't want
|
// If a user decides to use the latest version of Go, we don't want
|
||||||
// to fail here. This condition is tested elsewhere.
|
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up sample buffer for reading, and a map
|
|
||||||
// for quick lookup of sample values.
|
|
||||||
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
|
||||||
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
|
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
|
||||||
var m collectorMetric
|
var m collectorMetric
|
||||||
if d.Kind == metrics.KindFloat64Histogram {
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
_, hasSum := rmExactSumMap[d.Name]
|
_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
|
||||||
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
|
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
|
||||||
m = newBatchHistogram(
|
m = newBatchHistogram(
|
||||||
NewDesc(
|
NewDesc(
|
||||||
BuildFQName(namespace, subsystem, name),
|
BuildFQName(namespace, subsystem, name),
|
||||||
d.Description,
|
d.Description.Description,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
),
|
),
|
||||||
@ -111,24 +227,61 @@ func NewGoCollector() Collector {
|
|||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: name,
|
Name: name,
|
||||||
Help: d.Description,
|
Help: d.Description.Description,
|
||||||
})
|
},
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
m = NewGauge(GaugeOpts{
|
m = NewGauge(GaugeOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: name,
|
Name: name,
|
||||||
Help: d.Description,
|
Help: d.Description.Description,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
metricSet = append(metricSet, m)
|
metricSet = append(metricSet, m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add exact sum metrics to sampleBuf if not added before.
|
||||||
|
for _, h := range histograms {
|
||||||
|
sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := sampleMap[sumMetric]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
|
||||||
|
sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msDescriptions []metrics.Description
|
||||||
|
)
|
||||||
|
|
||||||
|
if !opt.DisableMemStatsLikeMetrics {
|
||||||
|
msMetrics = goRuntimeMemStats()
|
||||||
|
msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
|
||||||
|
|
||||||
|
// Check if metric was not exposed before and if not, add to sampleBuf.
|
||||||
|
for _, mdDesc := range msDescriptions {
|
||||||
|
if _, ok := sampleMap[mdDesc.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
|
||||||
|
sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return &goCollector{
|
return &goCollector{
|
||||||
base: newBaseGoCollector(),
|
base: newBaseGoCollector(),
|
||||||
rmSampleBuf: sampleBuf,
|
sampleBuf: sampleBuf,
|
||||||
rmSampleMap: sampleMap,
|
sampleMap: sampleMap,
|
||||||
rmMetrics: metricSet,
|
rmExposedMetrics: metricSet,
|
||||||
msMetrics: goRuntimeMemStats(),
|
rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
|
||||||
|
msMetrics: msMetrics,
|
||||||
|
msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,7 +291,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
|
|||||||
for _, i := range c.msMetrics {
|
for _, i := range c.msMetrics {
|
||||||
ch <- i.desc
|
ch <- i.desc
|
||||||
}
|
}
|
||||||
for _, m := range c.rmMetrics {
|
for _, m := range c.rmExposedMetrics {
|
||||||
ch <- m.Desc()
|
ch <- m.Desc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -148,8 +301,12 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||||||
// Collect base non-memory metrics.
|
// Collect base non-memory metrics.
|
||||||
c.base.Collect(ch)
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
if len(c.sampleBuf) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Collect must be thread-safe, so prevent concurrent use of
|
// Collect must be thread-safe, so prevent concurrent use of
|
||||||
// rmSampleBuf. Just read into rmSampleBuf but write all the data
|
// sampleBuf elements. Just read into sampleBuf but write all the data
|
||||||
// we get into our Metrics or MemStats.
|
// we get into our Metrics or MemStats.
|
||||||
//
|
//
|
||||||
// This lock also ensures that the Metrics we send out are all from
|
// This lock also ensures that the Metrics we send out are all from
|
||||||
@ -164,14 +321,17 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||||||
defer c.mu.Unlock()
|
defer c.mu.Unlock()
|
||||||
|
|
||||||
// Populate runtime/metrics sample buffer.
|
// Populate runtime/metrics sample buffer.
|
||||||
metrics.Read(c.rmSampleBuf)
|
metrics.Read(c.sampleBuf)
|
||||||
|
|
||||||
|
// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
|
||||||
|
for i, metric := range c.rmExposedMetrics {
|
||||||
|
// We created samples for exposed metrics first in order, so indexes match.
|
||||||
|
sample := c.sampleBuf[i]
|
||||||
|
|
||||||
// Update all our metrics from rmSampleBuf.
|
|
||||||
for i, sample := range c.rmSampleBuf {
|
|
||||||
// N.B. switch on concrete type because it's significantly more efficient
|
// N.B. switch on concrete type because it's significantly more efficient
|
||||||
// than checking for the Counter and Gauge interface implementations. In
|
// than checking for the Counter and Gauge interface implementations. In
|
||||||
// this case, we control all the types here.
|
// this case, we control all the types here.
|
||||||
switch m := c.rmMetrics[i].(type) {
|
switch m := metric.(type) {
|
||||||
case *counter:
|
case *counter:
|
||||||
// Guard against decreases. This should never happen, but a failure
|
// Guard against decreases. This should never happen, but a failure
|
||||||
// to do so will result in a panic, which is a harsh consequence for
|
// to do so will result in a panic, which is a harsh consequence for
|
||||||
@ -191,13 +351,16 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||||||
panic("unexpected metric type")
|
panic("unexpected metric type")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if c.msMetricsEnabled {
|
||||||
// ms is a dummy MemStats that we populate ourselves so that we can
|
// ms is a dummy MemStats that we populate ourselves so that we can
|
||||||
// populate the old metrics from it.
|
// populate the old metrics from it if goMemStatsCollection is enabled.
|
||||||
var ms runtime.MemStats
|
var ms runtime.MemStats
|
||||||
memStatsFromRM(&ms, c.rmSampleMap)
|
memStatsFromRM(&ms, c.sampleMap)
|
||||||
for _, i := range c.msMetrics {
|
for _, i := range c.msMetrics {
|
||||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
|
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
|
||||||
@ -224,11 +387,6 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var rmExactSumMap = map[string]string{
|
|
||||||
"/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
|
|
||||||
"/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
|
|
||||||
}
|
|
||||||
|
|
||||||
// exactSumFor takes a runtime/metrics metric name (that is assumed to
|
// exactSumFor takes a runtime/metrics metric name (that is assumed to
|
||||||
// be of kind KindFloat64Histogram) and returns its exact sum and whether
|
// be of kind KindFloat64Histogram) and returns its exact sum and whether
|
||||||
// its exact sum exists.
|
// its exact sum exists.
|
||||||
@ -236,11 +394,11 @@ var rmExactSumMap = map[string]string{
|
|||||||
// The runtime/metrics API for histograms doesn't currently expose exact
|
// The runtime/metrics API for histograms doesn't currently expose exact
|
||||||
// sums, but some of the other metrics are in fact exact sums of histograms.
|
// sums, but some of the other metrics are in fact exact sums of histograms.
|
||||||
func (c *goCollector) exactSumFor(rmName string) float64 {
|
func (c *goCollector) exactSumFor(rmName string) float64 {
|
||||||
sumName, ok := rmExactSumMap[rmName]
|
sumName, ok := c.rmExactSumMapForHist[rmName]
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
s, ok := c.rmSampleMap[sumName]
|
s, ok := c.sampleMap[sumName]
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -261,35 +419,30 @@ func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
|
|||||||
// while having Mallocs - Frees still represent a live object count.
|
// while having Mallocs - Frees still represent a live object count.
|
||||||
// Unfortunately, MemStats doesn't actually export a large allocation count,
|
// Unfortunately, MemStats doesn't actually export a large allocation count,
|
||||||
// so it's impossible to pull this number out directly.
|
// so it's impossible to pull this number out directly.
|
||||||
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
|
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
|
||||||
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
|
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
|
||||||
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
|
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
|
||||||
|
|
||||||
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
|
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
|
||||||
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
|
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
|
||||||
ms.Lookups = 0 // Already always zero.
|
ms.Lookups = 0 // Already always zero.
|
||||||
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
|
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
|
||||||
ms.Alloc = ms.HeapAlloc
|
ms.Alloc = ms.HeapAlloc
|
||||||
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
|
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
|
||||||
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
|
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
|
||||||
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
|
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
|
||||||
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
|
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
|
||||||
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
|
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
|
||||||
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
|
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
|
||||||
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
|
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
|
||||||
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
|
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
|
||||||
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
|
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
|
||||||
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
|
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
|
||||||
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
|
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
|
||||||
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
|
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
|
||||||
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
|
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
|
||||||
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
|
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
|
||||||
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
|
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
|
||||||
|
|
||||||
// N.B. LastGC is omitted because runtime.GCStats already has this.
|
|
||||||
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
|
||||||
// for more details.
|
|
||||||
ms.LastGC = 0
|
|
||||||
|
|
||||||
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
|
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
|
||||||
// and often misleading due to the fact that it's an average over the lifetime
|
// and often misleading due to the fact that it's an average over the lifetime
|
||||||
@ -324,6 +477,11 @@ type batchHistogram struct {
|
|||||||
// buckets must always be from the runtime/metrics package, following
|
// buckets must always be from the runtime/metrics package, following
|
||||||
// the same conventions.
|
// the same conventions.
|
||||||
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
|
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
|
||||||
|
// We need to remove -Inf values. runtime/metrics keeps them around.
|
||||||
|
// But -Inf bucket should not be allowed for prometheus histograms.
|
||||||
|
if buckets[0] == math.Inf(-1) {
|
||||||
|
buckets = buckets[1:]
|
||||||
|
}
|
||||||
h := &batchHistogram{
|
h := &batchHistogram{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
@ -382,9 +540,11 @@ func (h *batchHistogram) Write(out *dto.Metric) error {
|
|||||||
for i, count := range h.counts {
|
for i, count := range h.counts {
|
||||||
totalCount += count
|
totalCount += count
|
||||||
if !h.hasSum {
|
if !h.hasSum {
|
||||||
|
if count != 0 {
|
||||||
// N.B. This computed sum is an underestimate.
|
// N.B. This computed sum is an underestimate.
|
||||||
sum += h.buckets[i] * float64(count)
|
sum += h.buckets[i] * float64(count)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Skip the +Inf bucket, but only for the bucket list.
|
// Skip the +Inf bucket, but only for the bucket list.
|
||||||
// It must still count for sum and totalCount.
|
// It must still count for sum and totalCount.
|
2
src/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
2
src/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -581,11 +581,11 @@ func (h *constHistogram) Desc() *Desc {
|
|||||||
|
|
||||||
func (h *constHistogram) Write(out *dto.Metric) error {
|
func (h *constHistogram) Write(out *dto.Metric) error {
|
||||||
his := &dto.Histogram{}
|
his := &dto.Histogram{}
|
||||||
|
|
||||||
buckets := make([]*dto.Bucket, 0, len(h.buckets))
|
buckets := make([]*dto.Bucket, 0, len(h.buckets))
|
||||||
|
|
||||||
his.SampleCount = proto.Uint64(h.count)
|
his.SampleCount = proto.Uint64(h.count)
|
||||||
his.SampleSum = proto.Float64(h.sum)
|
his.SampleSum = proto.Float64(h.sum)
|
||||||
|
|
||||||
for upperBound, count := range h.buckets {
|
for upperBound, count := range h.buckets {
|
||||||
buckets = append(buckets, &dto.Bucket{
|
buckets = append(buckets, &dto.Bucket{
|
||||||
CumulativeCount: proto.Uint64(count),
|
CumulativeCount: proto.Uint64(count),
|
||||||
|
651
src/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
Normal file
651
src/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
generated
vendored
Normal file
@ -0,0 +1,651 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
// It provides tools to compare sequences of strings and generate textual diffs.
|
||||||
|
//
|
||||||
|
// Maintaining `GetUnifiedDiffString` here because original repository
|
||||||
|
// (https://github.com/pmezard/go-difflib) is no loger maintained.
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculateRatio(matches, length int) float64 {
|
||||||
|
if length > 0 {
|
||||||
|
return 2.0 * float64(matches) / float64(length)
|
||||||
|
}
|
||||||
|
return 1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
type Match struct {
|
||||||
|
A int
|
||||||
|
B int
|
||||||
|
Size int
|
||||||
|
}
|
||||||
|
|
||||||
|
type OpCode struct {
|
||||||
|
Tag byte
|
||||||
|
I1 int
|
||||||
|
I2 int
|
||||||
|
J1 int
|
||||||
|
J2 int
|
||||||
|
}
|
||||||
|
|
||||||
|
// SequenceMatcher compares sequence of strings. The basic
|
||||||
|
// algorithm predates, and is a little fancier than, an algorithm
|
||||||
|
// published in the late 1980's by Ratcliff and Obershelp under the
|
||||||
|
// hyperbolic name "gestalt pattern matching". The basic idea is to find
|
||||||
|
// the longest contiguous matching subsequence that contains no "junk"
|
||||||
|
// elements (R-O doesn't address junk). The same idea is then applied
|
||||||
|
// recursively to the pieces of the sequences to the left and to the right
|
||||||
|
// of the matching subsequence. This does not yield minimal edit
|
||||||
|
// sequences, but does tend to yield matches that "look right" to people.
|
||||||
|
//
|
||||||
|
// SequenceMatcher tries to compute a "human-friendly diff" between two
|
||||||
|
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
|
||||||
|
// longest *contiguous* & junk-free matching subsequence. That's what
|
||||||
|
// catches peoples' eyes. The Windows(tm) windiff has another interesting
|
||||||
|
// notion, pairing up elements that appear uniquely in each sequence.
|
||||||
|
// That, and the method here, appear to yield more intuitive difference
|
||||||
|
// reports than does diff. This method appears to be the least vulnerable
|
||||||
|
// to synching up on blocks of "junk lines", though (like blank lines in
|
||||||
|
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
|
||||||
|
// because this is the only method of the 3 that has a *concept* of
|
||||||
|
// "junk" <wink>.
|
||||||
|
//
|
||||||
|
// Timing: Basic R-O is cubic time worst case and quadratic time expected
|
||||||
|
// case. SequenceMatcher is quadratic time for the worst case and has
|
||||||
|
// expected-case behavior dependent in a complicated way on how many
|
||||||
|
// elements the sequences have in common; best case time is linear.
|
||||||
|
type SequenceMatcher struct {
|
||||||
|
a []string
|
||||||
|
b []string
|
||||||
|
b2j map[string][]int
|
||||||
|
IsJunk func(string) bool
|
||||||
|
autoJunk bool
|
||||||
|
bJunk map[string]struct{}
|
||||||
|
matchingBlocks []Match
|
||||||
|
fullBCount map[string]int
|
||||||
|
bPopular map[string]struct{}
|
||||||
|
opCodes []OpCode
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMatcher(a, b []string) *SequenceMatcher {
|
||||||
|
m := SequenceMatcher{autoJunk: true}
|
||||||
|
m.SetSeqs(a, b)
|
||||||
|
return &m
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMatcherWithJunk(a, b []string, autoJunk bool,
|
||||||
|
isJunk func(string) bool,
|
||||||
|
) *SequenceMatcher {
|
||||||
|
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
|
||||||
|
m.SetSeqs(a, b)
|
||||||
|
return &m
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set two sequences to be compared.
|
||||||
|
func (m *SequenceMatcher) SetSeqs(a, b []string) {
|
||||||
|
m.SetSeq1(a)
|
||||||
|
m.SetSeq2(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the first sequence to be compared. The second sequence to be compared is
|
||||||
|
// not changed.
|
||||||
|
//
|
||||||
|
// SequenceMatcher computes and caches detailed information about the second
|
||||||
|
// sequence, so if you want to compare one sequence S against many sequences,
|
||||||
|
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
|
||||||
|
// sequences.
|
||||||
|
//
|
||||||
|
// See also SetSeqs() and SetSeq2().
|
||||||
|
func (m *SequenceMatcher) SetSeq1(a []string) {
|
||||||
|
if &a == &m.a {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.a = a
|
||||||
|
m.matchingBlocks = nil
|
||||||
|
m.opCodes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the second sequence to be compared. The first sequence to be compared is
|
||||||
|
// not changed.
|
||||||
|
func (m *SequenceMatcher) SetSeq2(b []string) {
|
||||||
|
if &b == &m.b {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.b = b
|
||||||
|
m.matchingBlocks = nil
|
||||||
|
m.opCodes = nil
|
||||||
|
m.fullBCount = nil
|
||||||
|
m.chainB()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SequenceMatcher) chainB() {
|
||||||
|
// Populate line -> index mapping
|
||||||
|
b2j := map[string][]int{}
|
||||||
|
for i, s := range m.b {
|
||||||
|
indices := b2j[s]
|
||||||
|
indices = append(indices, i)
|
||||||
|
b2j[s] = indices
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge junk elements
|
||||||
|
m.bJunk = map[string]struct{}{}
|
||||||
|
if m.IsJunk != nil {
|
||||||
|
junk := m.bJunk
|
||||||
|
for s := range b2j {
|
||||||
|
if m.IsJunk(s) {
|
||||||
|
junk[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for s := range junk {
|
||||||
|
delete(b2j, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge remaining popular elements
|
||||||
|
popular := map[string]struct{}{}
|
||||||
|
n := len(m.b)
|
||||||
|
if m.autoJunk && n >= 200 {
|
||||||
|
ntest := n/100 + 1
|
||||||
|
for s, indices := range b2j {
|
||||||
|
if len(indices) > ntest {
|
||||||
|
popular[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for s := range popular {
|
||||||
|
delete(b2j, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.bPopular = popular
|
||||||
|
m.b2j = b2j
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SequenceMatcher) isBJunk(s string) bool {
|
||||||
|
_, ok := m.bJunk[s]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
|
||||||
|
//
|
||||||
|
// If IsJunk is not defined:
|
||||||
|
//
|
||||||
|
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
|
||||||
|
// alo <= i <= i+k <= ahi
|
||||||
|
// blo <= j <= j+k <= bhi
|
||||||
|
// and for all (i',j',k') meeting those conditions,
|
||||||
|
// k >= k'
|
||||||
|
// i <= i'
|
||||||
|
// and if i == i', j <= j'
|
||||||
|
//
|
||||||
|
// In other words, of all maximal matching blocks, return one that
|
||||||
|
// starts earliest in a, and of all those maximal matching blocks that
|
||||||
|
// start earliest in a, return the one that starts earliest in b.
|
||||||
|
//
|
||||||
|
// If IsJunk is defined, first the longest matching block is
|
||||||
|
// determined as above, but with the additional restriction that no
|
||||||
|
// junk element appears in the block. Then that block is extended as
|
||||||
|
// far as possible by matching (only) junk elements on both sides. So
|
||||||
|
// the resulting block never matches on junk except as identical junk
|
||||||
|
// happens to be adjacent to an "interesting" match.
|
||||||
|
//
|
||||||
|
// If no blocks match, return (alo, blo, 0).
|
||||||
|
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
|
||||||
|
// CAUTION: stripping common prefix or suffix would be incorrect.
|
||||||
|
// E.g.,
|
||||||
|
// ab
|
||||||
|
// acab
|
||||||
|
// Longest matching block is "ab", but if common prefix is
|
||||||
|
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
|
||||||
|
// strip, so ends up claiming that ab is changed to acab by
|
||||||
|
// inserting "ca" in the middle. That's minimal but unintuitive:
|
||||||
|
// "it's obvious" that someone inserted "ac" at the front.
|
||||||
|
// Windiff ends up at the same place as diff, but by pairing up
|
||||||
|
// the unique 'b's and then matching the first two 'a's.
|
||||||
|
besti, bestj, bestsize := alo, blo, 0
|
||||||
|
|
||||||
|
// find longest junk-free match
|
||||||
|
// during an iteration of the loop, j2len[j] = length of longest
|
||||||
|
// junk-free match ending with a[i-1] and b[j]
|
||||||
|
j2len := map[int]int{}
|
||||||
|
for i := alo; i != ahi; i++ {
|
||||||
|
// look at all instances of a[i] in b; note that because
|
||||||
|
// b2j has no junk keys, the loop is skipped if a[i] is junk
|
||||||
|
newj2len := map[int]int{}
|
||||||
|
for _, j := range m.b2j[m.a[i]] {
|
||||||
|
// a[i] matches b[j]
|
||||||
|
if j < blo {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if j >= bhi {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
k := j2len[j-1] + 1
|
||||||
|
newj2len[j] = k
|
||||||
|
if k > bestsize {
|
||||||
|
besti, bestj, bestsize = i-k+1, j-k+1, k
|
||||||
|
}
|
||||||
|
}
|
||||||
|
j2len = newj2len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend the best by non-junk elements on each end. In particular,
|
||||||
|
// "popular" non-junk elements aren't in b2j, which greatly speeds
|
||||||
|
// the inner loop above, but also means "the best" match so far
|
||||||
|
// doesn't contain any junk *or* popular non-junk elements.
|
||||||
|
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
|
||||||
|
m.a[besti-1] == m.b[bestj-1] {
|
||||||
|
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||||
|
}
|
||||||
|
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||||
|
!m.isBJunk(m.b[bestj+bestsize]) &&
|
||||||
|
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||||
|
bestsize++
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that we have a wholly interesting match (albeit possibly
|
||||||
|
// empty!), we may as well suck up the matching junk on each
|
||||||
|
// side of it too. Can't think of a good reason not to, and it
|
||||||
|
// saves post-processing the (possibly considerable) expense of
|
||||||
|
// figuring out what to do with it. In the case of an empty
|
||||||
|
// interesting match, this is clearly the right thing to do,
|
||||||
|
// because no other kind of match is possible in the regions.
|
||||||
|
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
|
||||||
|
m.a[besti-1] == m.b[bestj-1] {
|
||||||
|
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
|
||||||
|
}
|
||||||
|
for besti+bestsize < ahi && bestj+bestsize < bhi &&
|
||||||
|
m.isBJunk(m.b[bestj+bestsize]) &&
|
||||||
|
m.a[besti+bestsize] == m.b[bestj+bestsize] {
|
||||||
|
bestsize++
|
||||||
|
}
|
||||||
|
|
||||||
|
return Match{A: besti, B: bestj, Size: bestsize}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of triples describing matching subsequences.
|
||||||
|
//
|
||||||
|
// Each triple is of the form (i, j, n), and means that
|
||||||
|
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
|
||||||
|
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
|
||||||
|
// adjacent triples in the list, and the second is not the last triple in the
|
||||||
|
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
|
||||||
|
// adjacent equal blocks.
|
||||||
|
//
|
||||||
|
// The last triple is a dummy, (len(a), len(b), 0), and is the only
|
||||||
|
// triple with n==0.
|
||||||
|
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
|
||||||
|
if m.matchingBlocks != nil {
|
||||||
|
return m.matchingBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
|
||||||
|
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
|
||||||
|
match := m.findLongestMatch(alo, ahi, blo, bhi)
|
||||||
|
i, j, k := match.A, match.B, match.Size
|
||||||
|
if match.Size > 0 {
|
||||||
|
if alo < i && blo < j {
|
||||||
|
matched = matchBlocks(alo, i, blo, j, matched)
|
||||||
|
}
|
||||||
|
matched = append(matched, match)
|
||||||
|
if i+k < ahi && j+k < bhi {
|
||||||
|
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return matched
|
||||||
|
}
|
||||||
|
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
|
||||||
|
|
||||||
|
// It's possible that we have adjacent equal blocks in the
|
||||||
|
// matching_blocks list now.
|
||||||
|
nonAdjacent := []Match{}
|
||||||
|
i1, j1, k1 := 0, 0, 0
|
||||||
|
for _, b := range matched {
|
||||||
|
// Is this block adjacent to i1, j1, k1?
|
||||||
|
i2, j2, k2 := b.A, b.B, b.Size
|
||||||
|
if i1+k1 == i2 && j1+k1 == j2 {
|
||||||
|
// Yes, so collapse them -- this just increases the length of
|
||||||
|
// the first block by the length of the second, and the first
|
||||||
|
// block so lengthened remains the block to compare against.
|
||||||
|
k1 += k2
|
||||||
|
} else {
|
||||||
|
// Not adjacent. Remember the first block (k1==0 means it's
|
||||||
|
// the dummy we started with), and make the second block the
|
||||||
|
// new block to compare against.
|
||||||
|
if k1 > 0 {
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||||
|
}
|
||||||
|
i1, j1, k1 = i2, j2, k2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if k1 > 0 {
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
|
||||||
|
}
|
||||||
|
|
||||||
|
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
|
||||||
|
m.matchingBlocks = nonAdjacent
|
||||||
|
return m.matchingBlocks
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return list of 5-tuples describing how to turn a into b.
|
||||||
|
//
|
||||||
|
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
|
||||||
|
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
|
||||||
|
// tuple preceding it, and likewise for j1 == the previous j2.
|
||||||
|
//
|
||||||
|
// The tags are characters, with these meanings:
|
||||||
|
//
|
||||||
|
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
|
||||||
|
//
|
||||||
|
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
|
||||||
|
//
|
||||||
|
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
|
||||||
|
//
|
||||||
|
// 'e' (equal): a[i1:i2] == b[j1:j2]
|
||||||
|
func (m *SequenceMatcher) GetOpCodes() []OpCode {
|
||||||
|
if m.opCodes != nil {
|
||||||
|
return m.opCodes
|
||||||
|
}
|
||||||
|
i, j := 0, 0
|
||||||
|
matching := m.GetMatchingBlocks()
|
||||||
|
opCodes := make([]OpCode, 0, len(matching))
|
||||||
|
for _, m := range matching {
|
||||||
|
// invariant: we've pumped out correct diffs to change
|
||||||
|
// a[:i] into b[:j], and the next matching block is
|
||||||
|
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
|
||||||
|
// out a diff to change a[i:ai] into b[j:bj], pump out
|
||||||
|
// the matching block, and move (i,j) beyond the match
|
||||||
|
ai, bj, size := m.A, m.B, m.Size
|
||||||
|
tag := byte(0)
|
||||||
|
if i < ai && j < bj {
|
||||||
|
tag = 'r'
|
||||||
|
} else if i < ai {
|
||||||
|
tag = 'd'
|
||||||
|
} else if j < bj {
|
||||||
|
tag = 'i'
|
||||||
|
}
|
||||||
|
if tag > 0 {
|
||||||
|
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
|
||||||
|
}
|
||||||
|
i, j = ai+size, bj+size
|
||||||
|
// the list of matching blocks is terminated by a
|
||||||
|
// sentinel with size 0
|
||||||
|
if size > 0 {
|
||||||
|
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.opCodes = opCodes
|
||||||
|
return m.opCodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Isolate change clusters by eliminating ranges with no changes.
|
||||||
|
//
|
||||||
|
// Return a generator of groups with up to n lines of context.
|
||||||
|
// Each group is in the same format as returned by GetOpCodes().
|
||||||
|
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
|
||||||
|
if n < 0 {
|
||||||
|
n = 3
|
||||||
|
}
|
||||||
|
codes := m.GetOpCodes()
|
||||||
|
if len(codes) == 0 {
|
||||||
|
codes = []OpCode{{'e', 0, 1, 0, 1}}
|
||||||
|
}
|
||||||
|
// Fixup leading and trailing groups if they show no changes.
|
||||||
|
if codes[0].Tag == 'e' {
|
||||||
|
c := codes[0]
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
|
||||||
|
}
|
||||||
|
if codes[len(codes)-1].Tag == 'e' {
|
||||||
|
c := codes[len(codes)-1]
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
|
||||||
|
}
|
||||||
|
nn := n + n
|
||||||
|
groups := [][]OpCode{}
|
||||||
|
group := []OpCode{}
|
||||||
|
for _, c := range codes {
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
// End the current group and start a new one whenever
|
||||||
|
// there is a large range with no changes.
|
||||||
|
if c.Tag == 'e' && i2-i1 > nn {
|
||||||
|
group = append(group, OpCode{
|
||||||
|
c.Tag, i1, min(i2, i1+n),
|
||||||
|
j1, min(j2, j1+n),
|
||||||
|
})
|
||||||
|
groups = append(groups, group)
|
||||||
|
group = []OpCode{}
|
||||||
|
i1, j1 = max(i1, i2-n), max(j1, j2-n)
|
||||||
|
}
|
||||||
|
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
|
||||||
|
}
|
||||||
|
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
|
||||||
|
groups = append(groups, group)
|
||||||
|
}
|
||||||
|
return groups
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return a measure of the sequences' similarity (float in [0,1]).
|
||||||
|
//
|
||||||
|
// Where T is the total number of elements in both sequences, and
|
||||||
|
// M is the number of matches, this is 2.0*M / T.
|
||||||
|
// Note that this is 1 if the sequences are identical, and 0 if
|
||||||
|
// they have nothing in common.
|
||||||
|
//
|
||||||
|
// .Ratio() is expensive to compute if you haven't already computed
|
||||||
|
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
|
||||||
|
// want to try .QuickRatio() or .RealQuickRation() first to get an
|
||||||
|
// upper bound.
|
||||||
|
func (m *SequenceMatcher) Ratio() float64 {
|
||||||
|
matches := 0
|
||||||
|
for _, m := range m.GetMatchingBlocks() {
|
||||||
|
matches += m.Size
|
||||||
|
}
|
||||||
|
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an upper bound on ratio() relatively quickly.
|
||||||
|
//
|
||||||
|
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||||
|
// is faster to compute.
|
||||||
|
func (m *SequenceMatcher) QuickRatio() float64 {
|
||||||
|
// viewing a and b as multisets, set matches to the cardinality
|
||||||
|
// of their intersection; this counts the number of matches
|
||||||
|
// without regard to order, so is clearly an upper bound
|
||||||
|
if m.fullBCount == nil {
|
||||||
|
m.fullBCount = map[string]int{}
|
||||||
|
for _, s := range m.b {
|
||||||
|
m.fullBCount[s]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// avail[x] is the number of times x appears in 'b' less the
|
||||||
|
// number of times we've seen it in 'a' so far ... kinda
|
||||||
|
avail := map[string]int{}
|
||||||
|
matches := 0
|
||||||
|
for _, s := range m.a {
|
||||||
|
n, ok := avail[s]
|
||||||
|
if !ok {
|
||||||
|
n = m.fullBCount[s]
|
||||||
|
}
|
||||||
|
avail[s] = n - 1
|
||||||
|
if n > 0 {
|
||||||
|
matches++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return calculateRatio(matches, len(m.a)+len(m.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an upper bound on ratio() very quickly.
|
||||||
|
//
|
||||||
|
// This isn't defined beyond that it is an upper bound on .Ratio(), and
|
||||||
|
// is faster to compute than either .Ratio() or .QuickRatio().
|
||||||
|
func (m *SequenceMatcher) RealQuickRatio() float64 {
|
||||||
|
la, lb := len(m.a), len(m.b)
|
||||||
|
return calculateRatio(min(la, lb), la+lb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert range to the "ed" format
|
||||||
|
func formatRangeUnified(start, stop int) string {
|
||||||
|
// Per the diff spec at http://www.unix.org/single_unix_specification/
|
||||||
|
beginning := start + 1 // lines start numbering with one
|
||||||
|
length := stop - start
|
||||||
|
if length == 1 {
|
||||||
|
return fmt.Sprintf("%d", beginning)
|
||||||
|
}
|
||||||
|
if length == 0 {
|
||||||
|
beginning-- // empty ranges begin at line just before the range
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d,%d", beginning, length)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unified diff parameters
|
||||||
|
type UnifiedDiff struct {
|
||||||
|
A []string // First sequence lines
|
||||||
|
FromFile string // First file name
|
||||||
|
FromDate string // First file time
|
||||||
|
B []string // Second sequence lines
|
||||||
|
ToFile string // Second file name
|
||||||
|
ToDate string // Second file time
|
||||||
|
Eol string // Headers end of line, defaults to LF
|
||||||
|
Context int // Number of context lines
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare two sequences of lines; generate the delta as a unified diff.
|
||||||
|
//
|
||||||
|
// Unified diffs are a compact way of showing line changes and a few
|
||||||
|
// lines of context. The number of context lines is set by 'n' which
|
||||||
|
// defaults to three.
|
||||||
|
//
|
||||||
|
// By default, the diff control lines (those with ---, +++, or @@) are
|
||||||
|
// created with a trailing newline. This is helpful so that inputs
|
||||||
|
// created from file.readlines() result in diffs that are suitable for
|
||||||
|
// file.writelines() since both the inputs and outputs have trailing
|
||||||
|
// newlines.
|
||||||
|
//
|
||||||
|
// For inputs that do not have trailing newlines, set the lineterm
|
||||||
|
// argument to "" so that the output will be uniformly newline free.
|
||||||
|
//
|
||||||
|
// The unidiff format normally has a header for filenames and modification
|
||||||
|
// times. Any or all of these may be specified using strings for
|
||||||
|
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
|
||||||
|
// The modification times are normally expressed in the ISO 8601 format.
|
||||||
|
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
|
||||||
|
buf := bufio.NewWriter(writer)
|
||||||
|
defer buf.Flush()
|
||||||
|
wf := func(format string, args ...interface{}) error {
|
||||||
|
_, err := buf.WriteString(fmt.Sprintf(format, args...))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ws := func(s string) error {
|
||||||
|
_, err := buf.WriteString(s)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(diff.Eol) == 0 {
|
||||||
|
diff.Eol = "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
started := false
|
||||||
|
m := NewMatcher(diff.A, diff.B)
|
||||||
|
for _, g := range m.GetGroupedOpCodes(diff.Context) {
|
||||||
|
if !started {
|
||||||
|
started = true
|
||||||
|
fromDate := ""
|
||||||
|
if len(diff.FromDate) > 0 {
|
||||||
|
fromDate = "\t" + diff.FromDate
|
||||||
|
}
|
||||||
|
toDate := ""
|
||||||
|
if len(diff.ToDate) > 0 {
|
||||||
|
toDate = "\t" + diff.ToDate
|
||||||
|
}
|
||||||
|
if diff.FromFile != "" || diff.ToFile != "" {
|
||||||
|
err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
first, last := g[0], g[len(g)-1]
|
||||||
|
range1 := formatRangeUnified(first.I1, last.I2)
|
||||||
|
range2 := formatRangeUnified(first.J1, last.J2)
|
||||||
|
if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, c := range g {
|
||||||
|
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
|
||||||
|
if c.Tag == 'e' {
|
||||||
|
for _, line := range diff.A[i1:i2] {
|
||||||
|
if err := ws(" " + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if c.Tag == 'r' || c.Tag == 'd' {
|
||||||
|
for _, line := range diff.A[i1:i2] {
|
||||||
|
if err := ws("-" + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.Tag == 'r' || c.Tag == 'i' {
|
||||||
|
for _, line := range diff.B[j1:j2] {
|
||||||
|
if err := ws("+" + line); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Like WriteUnifiedDiff but returns the diff a string.
|
||||||
|
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
|
||||||
|
w := &bytes.Buffer{}
|
||||||
|
err := WriteUnifiedDiff(w, diff)
|
||||||
|
return w.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split a string on "\n" while preserving them. The output can be used
|
||||||
|
// as input for UnifiedDiff and ContextDiff structures.
|
||||||
|
func SplitLines(s string) []string {
|
||||||
|
lines := strings.SplitAfter(s, "\n")
|
||||||
|
lines[len(lines)-1] += "\n"
|
||||||
|
return lines
|
||||||
|
}
|
32
src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
generated
vendored
Normal file
32
src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import "regexp"
|
||||||
|
|
||||||
|
type GoCollectorRule struct {
|
||||||
|
Matcher *regexp.Regexp
|
||||||
|
Deny bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
|
||||||
|
// Use it via collectors package instead. See issue
|
||||||
|
// https://github.com/prometheus/client_golang/issues/1030.
|
||||||
|
//
|
||||||
|
// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
|
||||||
|
type GoCollectorOptions struct {
|
||||||
|
DisableMemStatsLikeMetrics bool
|
||||||
|
RuntimeMetricSumForHist map[string]string
|
||||||
|
RuntimeMetricRules []GoCollectorRule
|
||||||
|
}
|
18
src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
18
src/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
@ -61,9 +61,9 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
|
|||||||
// name has - replaced with _ and is concatenated with the unit and
|
// name has - replaced with _ and is concatenated with the unit and
|
||||||
// other data.
|
// other data.
|
||||||
name = strings.ReplaceAll(name, "-", "_")
|
name = strings.ReplaceAll(name, "-", "_")
|
||||||
name = name + "_" + unit
|
name += "_" + unit
|
||||||
if d.Cumulative {
|
if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
|
||||||
name = name + "_total"
|
name += "_total"
|
||||||
}
|
}
|
||||||
|
|
||||||
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
||||||
@ -84,12 +84,12 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
|
|||||||
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
|
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
|
||||||
switch unit {
|
switch unit {
|
||||||
case "bytes":
|
case "bytes":
|
||||||
// Rebucket as powers of 2.
|
// Re-bucket as powers of 2.
|
||||||
return rebucketExp(buckets, 2)
|
return reBucketExp(buckets, 2)
|
||||||
case "seconds":
|
case "seconds":
|
||||||
// Rebucket as powers of 10 and then merge all buckets greater
|
// Re-bucket as powers of 10 and then merge all buckets greater
|
||||||
// than 1 second into the +Inf bucket.
|
// than 1 second into the +Inf bucket.
|
||||||
b := rebucketExp(buckets, 10)
|
b := reBucketExp(buckets, 10)
|
||||||
for i := range b {
|
for i := range b {
|
||||||
if b[i] <= 1 {
|
if b[i] <= 1 {
|
||||||
continue
|
continue
|
||||||
@ -103,11 +103,11 @@ func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
|
|||||||
return buckets
|
return buckets
|
||||||
}
|
}
|
||||||
|
|
||||||
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
|
// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
|
||||||
// downsamples the buckets to those a multiple of base apart. The end result
|
// downsamples the buckets to those a multiple of base apart. The end result
|
||||||
// is a roughly exponential (in many cases, perfectly exponential) bucketing
|
// is a roughly exponential (in many cases, perfectly exponential) bucketing
|
||||||
// scheme.
|
// scheme.
|
||||||
func rebucketExp(buckets []float64, base float64) []float64 {
|
func reBucketExp(buckets []float64, base float64) []float64 {
|
||||||
bucket := buckets[0]
|
bucket := buckets[0]
|
||||||
var newBuckets []float64
|
var newBuckets []float64
|
||||||
// We may see a -Inf here, in which case, add it and skip it
|
// We may see a -Inf here, in which case, add it and skip it
|
||||||
|
28
src/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
28
src/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
@ -19,18 +19,34 @@ import (
|
|||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// metricSorter is a sortable slice of *dto.Metric.
|
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||||
type metricSorter []*dto.Metric
|
// dto.LabelPair pointers.
|
||||||
|
type LabelPairSorter []*dto.LabelPair
|
||||||
|
|
||||||
func (s metricSorter) Len() int {
|
func (s LabelPairSorter) Len() int {
|
||||||
return len(s)
|
return len(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s metricSorter) Swap(i, j int) {
|
func (s LabelPairSorter) Swap(i, j int) {
|
||||||
s[i], s[j] = s[j], s[i]
|
s[i], s[j] = s[j], s[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s metricSorter) Less(i, j int) bool {
|
func (s LabelPairSorter) Less(i, j int) bool {
|
||||||
|
return s[i].GetName() < s[j].GetName()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricSorter is a sortable slice of *dto.Metric.
|
||||||
|
type MetricSorter []*dto.Metric
|
||||||
|
|
||||||
|
func (s MetricSorter) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MetricSorter) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s MetricSorter) Less(i, j int) bool {
|
||||||
if len(s[i].Label) != len(s[j].Label) {
|
if len(s[i].Label) != len(s[j].Label) {
|
||||||
// This should not happen. The metrics are
|
// This should not happen. The metrics are
|
||||||
// inconsistent. However, we have to deal with the fact, as
|
// inconsistent. However, we have to deal with the fact, as
|
||||||
@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool {
|
|||||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||||
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||||
for _, mf := range metricFamiliesByName {
|
for _, mf := range metricFamiliesByName {
|
||||||
sort.Sort(metricSorter(mf.Metric))
|
sort.Sort(MetricSorter(mf.Metric))
|
||||||
}
|
}
|
||||||
names := make([]string, 0, len(metricFamiliesByName))
|
names := make([]string, 0, len(metricFamiliesByName))
|
||||||
for name, mf := range metricFamiliesByName {
|
for name, mf := range metricFamiliesByName {
|
||||||
|
6
src/vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
6
src/vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
@ -39,7 +39,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality")
|
|||||||
|
|
||||||
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
|
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"%s: %q has %d variable labels named %q but %d values %q were provided",
|
"%w: %q has %d variable labels named %q but %d values %q were provided",
|
||||||
errInconsistentCardinality, fqName,
|
errInconsistentCardinality, fqName,
|
||||||
len(labels), labels,
|
len(labels), labels,
|
||||||
len(labelValues), labelValues,
|
len(labelValues), labelValues,
|
||||||
@ -49,7 +49,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin
|
|||||||
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
||||||
if len(labels) != expectedNumberOfValues {
|
if len(labels) != expectedNumberOfValues {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"%s: expected %d label values but got %d in %#v",
|
"%w: expected %d label values but got %d in %#v",
|
||||||
errInconsistentCardinality, expectedNumberOfValues,
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
len(labels), labels,
|
len(labels), labels,
|
||||||
)
|
)
|
||||||
@ -67,7 +67,7 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
|
|||||||
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
|
||||||
if len(vals) != expectedNumberOfValues {
|
if len(vals) != expectedNumberOfValues {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"%s: expected %d label values but got %d in %#v",
|
"%w: expected %d label values but got %d in %#v",
|
||||||
errInconsistentCardinality, expectedNumberOfValues,
|
errInconsistentCardinality, expectedNumberOfValues,
|
||||||
len(vals), vals,
|
len(vals), vals,
|
||||||
)
|
)
|
||||||
|
112
src/vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
112
src/vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -14,6 +14,9 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -115,22 +118,6 @@ func BuildFQName(namespace, subsystem, name string) string {
|
|||||||
return name
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// labelPairSorter implements sort.Interface. It is used to sort a slice of
|
|
||||||
// dto.LabelPair pointers.
|
|
||||||
type labelPairSorter []*dto.LabelPair
|
|
||||||
|
|
||||||
func (s labelPairSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s labelPairSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s labelPairSorter) Less(i, j int) bool {
|
|
||||||
return s[i].GetName() < s[j].GetName()
|
|
||||||
}
|
|
||||||
|
|
||||||
type invalidMetric struct {
|
type invalidMetric struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
err error
|
err error
|
||||||
@ -174,3 +161,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error {
|
|||||||
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||||
return timestampedMetric{Metric: m, t: t}
|
return timestampedMetric{Metric: m, t: t}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type withExemplarsMetric struct {
|
||||||
|
Metric
|
||||||
|
|
||||||
|
exemplars []*dto.Exemplar
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
|
||||||
|
if err := m.Metric.Write(pb); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case pb.Counter != nil:
|
||||||
|
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
|
||||||
|
case pb.Histogram != nil:
|
||||||
|
for _, e := range m.exemplars {
|
||||||
|
// pb.Histogram.Bucket are sorted by UpperBound.
|
||||||
|
i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
|
||||||
|
return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
|
||||||
|
})
|
||||||
|
if i < len(pb.Histogram.Bucket) {
|
||||||
|
pb.Histogram.Bucket[i].Exemplar = e
|
||||||
|
} else {
|
||||||
|
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
|
||||||
|
b := &dto.Bucket{
|
||||||
|
CumulativeCount: proto.Uint64(pb.Histogram.Bucket[len(pb.Histogram.GetBucket())-1].GetCumulativeCount()),
|
||||||
|
UpperBound: proto.Float64(math.Inf(1)),
|
||||||
|
Exemplar: e,
|
||||||
|
}
|
||||||
|
pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// TODO(bwplotka): Implement Gauge?
|
||||||
|
return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
|
||||||
|
type Exemplar struct {
|
||||||
|
Value float64
|
||||||
|
Labels Labels
|
||||||
|
// Optional.
|
||||||
|
// Default value (time.Time{}) indicates its empty, which should be
|
||||||
|
// understood as time.Now() time at the moment of creation of metric.
|
||||||
|
Timestamp time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
|
||||||
|
// exemplars. Exemplars are validated.
|
||||||
|
//
|
||||||
|
// Only last applicable exemplar is injected from the list.
|
||||||
|
// For example for Counter it means last exemplar is injected.
|
||||||
|
// For Histogram, it means last applicable exemplar for each bucket is injected.
|
||||||
|
//
|
||||||
|
// NewMetricWithExemplars works best with MustNewConstMetric and
|
||||||
|
// MustNewConstHistogram, see example.
|
||||||
|
func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
|
||||||
|
if len(exemplars) == 0 {
|
||||||
|
return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
now = time.Now()
|
||||||
|
exs = make([]*dto.Exemplar, len(exemplars))
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
for i, e := range exemplars {
|
||||||
|
ts := e.Timestamp
|
||||||
|
if ts == (time.Time{}) {
|
||||||
|
ts = now
|
||||||
|
}
|
||||||
|
exs[i], err = newExemplar(e.Value, ts, e.Labels)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
|
||||||
|
// NewMetricWithExemplars would have returned an error.
|
||||||
|
func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
|
||||||
|
ret, err := NewMetricWithExemplars(m, exemplars...)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
25
src/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
generated
vendored
Normal file
25
src/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !js || wasm
|
||||||
|
// +build !js wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime"
|
||||||
|
|
||||||
|
// getRuntimeNumThreads returns the number of open OS threads.
|
||||||
|
func getRuntimeNumThreads() float64 {
|
||||||
|
n, _ := runtime.ThreadCreateProfile(nil)
|
||||||
|
return float64(n)
|
||||||
|
}
|
22
src/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
generated
vendored
Normal file
22
src/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Copyright 2018 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js && !wasm
|
||||||
|
// +build js,!wasm
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
// getRuntimeNumThreads returns the number of open OS threads.
|
||||||
|
func getRuntimeNumThreads() float64 {
|
||||||
|
return 1
|
||||||
|
}
|
2
src/vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
2
src/vendor/github.com/prometheus/client_golang/prometheus/observer.go
generated
vendored
@ -58,7 +58,7 @@ type ObserverVec interface {
|
|||||||
// current time as timestamp, and the provided Labels. Empty Labels will lead to
|
// current time as timestamp, and the provided Labels. Empty Labels will lead to
|
||||||
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
|
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
|
||||||
// left in place. ObserveWithExemplar panics if any of the provided labels are
|
// left in place. ObserveWithExemplar panics if any of the provided labels are
|
||||||
// invalid or if the provided labels contain more than 64 runes in total.
|
// invalid or if the provided labels contain more than 128 runes in total.
|
||||||
type ExemplarObserver interface {
|
type ExemplarObserver interface {
|
||||||
ObserveWithExemplar(value float64, exemplar Labels)
|
ObserveWithExemplar(value float64, exemplar Labels)
|
||||||
}
|
}
|
||||||
|
10
src/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
10
src/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@ -16,7 +16,6 @@ package prometheus
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -104,8 +103,7 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if opts.PidFn == nil {
|
if opts.PidFn == nil {
|
||||||
pid := os.Getpid()
|
c.pidFn = getPIDFn()
|
||||||
c.pidFn = func() (int, error) { return pid, nil }
|
|
||||||
} else {
|
} else {
|
||||||
c.pidFn = opts.PidFn
|
c.pidFn = opts.PidFn
|
||||||
}
|
}
|
||||||
@ -152,13 +150,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
|
|||||||
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
|
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
|
||||||
func NewPidFileFn(pidFilePath string) func() (int, error) {
|
func NewPidFileFn(pidFilePath string) func() (int, error) {
|
||||||
return func() (int, error) {
|
return func() (int, error) {
|
||||||
content, err := ioutil.ReadFile(pidFilePath)
|
content, err := os.ReadFile(pidFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
|
return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
|
||||||
}
|
}
|
||||||
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
|
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
|
return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return pid, nil
|
return pid, nil
|
||||||
|
26
src/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
generated
vendored
Normal file
26
src/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Copyright 2019 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build js
|
||||||
|
// +build js
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
func canCollectProcess() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||||
|
// noop on this platform
|
||||||
|
return
|
||||||
|
}
|
4
src/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
4
src/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
@ -11,8 +11,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//go:build !windows
|
//go:build !windows && !js
|
||||||
// +build !windows
|
// +build !windows,!js
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
|
18
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
18
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
@ -76,16 +76,19 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
|
|||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
type closeNotifierDelegator struct{ *responseWriterDelegator }
|
type (
|
||||||
type flusherDelegator struct{ *responseWriterDelegator }
|
closeNotifierDelegator struct{ *responseWriterDelegator }
|
||||||
type hijackerDelegator struct{ *responseWriterDelegator }
|
flusherDelegator struct{ *responseWriterDelegator }
|
||||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
hijackerDelegator struct{ *responseWriterDelegator }
|
||||||
type pusherDelegator struct{ *responseWriterDelegator }
|
readerFromDelegator struct{ *responseWriterDelegator }
|
||||||
|
pusherDelegator struct{ *responseWriterDelegator }
|
||||||
|
)
|
||||||
|
|
||||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||||
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
|
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
|
||||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d flusherDelegator) Flush() {
|
func (d flusherDelegator) Flush() {
|
||||||
// If applicable, call WriteHeader here so that observeWriteHeader is
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
// handled appropriately.
|
// handled appropriately.
|
||||||
@ -94,9 +97,11 @@ func (d flusherDelegator) Flush() {
|
|||||||
}
|
}
|
||||||
d.ResponseWriter.(http.Flusher).Flush()
|
d.ResponseWriter.(http.Flusher).Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
return d.ResponseWriter.(http.Hijacker).Hijack()
|
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||||
// If applicable, call WriteHeader here so that observeWriteHeader is
|
// If applicable, call WriteHeader here so that observeWriteHeader is
|
||||||
// handled appropriately.
|
// handled appropriately.
|
||||||
@ -107,6 +112,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
|||||||
d.written += n
|
d.written += n
|
||||||
return n, err
|
return n, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||||
}
|
}
|
||||||
@ -261,7 +267,7 @@ func init() {
|
|||||||
http.Flusher
|
http.Flusher
|
||||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||||
}
|
}
|
||||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
|
||||||
return struct {
|
return struct {
|
||||||
*responseWriterDelegator
|
*responseWriterDelegator
|
||||||
http.Pusher
|
http.Pusher
|
||||||
|
20
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
20
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -33,6 +33,7 @@ package promhttp
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -84,6 +85,13 @@ func Handler() http.Handler {
|
|||||||
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||||
// kind of instrumentation as it is used by the Handler function.
|
// kind of instrumentation as it is used by the Handler function.
|
||||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||||
|
return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
|
||||||
|
// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
|
||||||
|
// call to `done` of that `Gather`.
|
||||||
|
func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
|
||||||
var (
|
var (
|
||||||
inFlightSem chan struct{}
|
inFlightSem chan struct{}
|
||||||
errCnt = prometheus.NewCounterVec(
|
errCnt = prometheus.NewCounterVec(
|
||||||
@ -103,7 +111,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
errCnt.WithLabelValues("gathering")
|
errCnt.WithLabelValues("gathering")
|
||||||
errCnt.WithLabelValues("encoding")
|
errCnt.WithLabelValues("encoding")
|
||||||
if err := opts.Registry.Register(errCnt); err != nil {
|
if err := opts.Registry.Register(errCnt); err != nil {
|
||||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
are := &prometheus.AlreadyRegisteredError{}
|
||||||
|
if errors.As(err, are) {
|
||||||
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
} else {
|
} else {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -123,7 +132,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mfs, err := reg.Gather()
|
mfs, done, err := reg.Gather()
|
||||||
|
defer done()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if opts.ErrorLog != nil {
|
if opts.ErrorLog != nil {
|
||||||
opts.ErrorLog.Println("error gathering metrics:", err)
|
opts.ErrorLog.Println("error gathering metrics:", err)
|
||||||
@ -242,7 +252,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht
|
|||||||
cnt.WithLabelValues("500")
|
cnt.WithLabelValues("500")
|
||||||
cnt.WithLabelValues("503")
|
cnt.WithLabelValues("503")
|
||||||
if err := reg.Register(cnt); err != nil {
|
if err := reg.Register(cnt); err != nil {
|
||||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
are := &prometheus.AlreadyRegisteredError{}
|
||||||
|
if errors.As(err, are) {
|
||||||
cnt = are.ExistingCollector.(*prometheus.CounterVec)
|
cnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||||
} else {
|
} else {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -254,7 +265,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht
|
|||||||
Help: "Current number of scrapes being served.",
|
Help: "Current number of scrapes being served.",
|
||||||
})
|
})
|
||||||
if err := reg.Register(gge); err != nil {
|
if err := reg.Register(gge); err != nil {
|
||||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
are := &prometheus.AlreadyRegisteredError{}
|
||||||
|
if errors.As(err, are) {
|
||||||
gge = are.ExistingCollector.(prometheus.Gauge)
|
gge = are.ExistingCollector.(prometheus.Gauge)
|
||||||
} else {
|
} else {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
39
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
39
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
|
|||||||
//
|
//
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
|
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
gauge.Inc()
|
gauge.Inc()
|
||||||
defer gauge.Dec()
|
defer gauge.Dec()
|
||||||
return next.RoundTrip(r)
|
return next.RoundTrip(r)
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentRoundTripperCounter is a middleware that wraps the provided
|
// InstrumentRoundTripperCounter is a middleware that wraps the provided
|
||||||
@ -59,22 +59,29 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
|
|||||||
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
|
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
|
||||||
// is not incremented.
|
// is not incremented.
|
||||||
//
|
//
|
||||||
|
// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
|
||||||
|
//
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
||||||
rtOpts := &option{}
|
rtOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(rtOpts)
|
o.apply(rtOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(counter)
|
code, method := checkLabels(counter)
|
||||||
|
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
exemplarAdd(
|
||||||
|
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||||
|
1,
|
||||||
|
rtOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
|
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
||||||
@ -94,24 +101,30 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||||||
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
||||||
// reported.
|
// reported.
|
||||||
//
|
//
|
||||||
|
// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
|
||||||
|
//
|
||||||
// Note that this method is only guaranteed to never observe negative durations
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
// if used with Go1.9+.
|
// if used with Go1.9+.
|
||||||
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
||||||
rtOpts := &option{}
|
rtOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(rtOpts)
|
o.apply(rtOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
|
exemplarObserve(
|
||||||
|
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||||
|
time.Since(start).Seconds(),
|
||||||
|
rtOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
// InstrumentTrace is used to offer flexibility in instrumenting the available
|
||||||
@ -149,7 +162,7 @@ type InstrumentTrace struct {
|
|||||||
//
|
//
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
trace := &httptrace.ClientTrace{
|
trace := &httptrace.ClientTrace{
|
||||||
@ -231,5 +244,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro
|
|||||||
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
|
||||||
|
|
||||||
return next.RoundTrip(r)
|
return next.RoundTrip(r)
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
119
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
119
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
@ -28,6 +28,22 @@ import (
|
|||||||
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
|
||||||
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
|
||||||
|
|
||||||
|
func exemplarObserve(obs prometheus.Observer, val float64, labels map[string]string) {
|
||||||
|
if labels == nil {
|
||||||
|
obs.Observe(val)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
func exemplarAdd(obs prometheus.Counter, val float64, labels map[string]string) {
|
||||||
|
if labels == nil {
|
||||||
|
obs.Add(val)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
|
||||||
|
}
|
||||||
|
|
||||||
// InstrumentHandlerInFlight is a middleware that wraps the provided
|
// InstrumentHandlerInFlight is a middleware that wraps the provided
|
||||||
// http.Handler. It sets the provided prometheus.Gauge to the number of
|
// http.Handler. It sets the provided prometheus.Gauge to the number of
|
||||||
// requests currently handled by the wrapped http.Handler.
|
// requests currently handled by the wrapped http.Handler.
|
||||||
@ -48,7 +64,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
|
|||||||
// names are "code" and "method". The function panics otherwise. For the "method"
|
// names are "code" and "method". The function panics otherwise. For the "method"
|
||||||
// label a predefined default label value set is used to filter given values.
|
// label a predefined default label value set is used to filter given values.
|
||||||
// Values besides predefined values will count as `unknown` method.
|
// Values besides predefined values will count as `unknown` method.
|
||||||
//`WithExtraMethods` can be used to add more methods to the set. The Observe
|
// `WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
// method of the Observer in the ObserverVec is called with the request duration
|
// method of the Observer in the ObserverVec is called with the request duration
|
||||||
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
|
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
|
||||||
// the respective instance label names are present in the ObserverVec. For
|
// the respective instance label names are present in the ObserverVec. For
|
||||||
@ -62,28 +78,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
|
|||||||
// Note that this method is only guaranteed to never observe negative durations
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
// if used with Go1.9+.
|
// if used with Go1.9+.
|
||||||
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
mwOpts := &option{}
|
hOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(mwOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
|
exemplarObserve(
|
||||||
})
|
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
|
time.Since(now).Seconds(),
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
|
|
||||||
})
|
exemplarObserve(
|
||||||
|
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||||
|
time.Since(now).Seconds(),
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
|
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
|
||||||
@ -104,25 +129,34 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
mwOpts := &option{}
|
hOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(mwOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(counter)
|
code, method := checkLabels(counter)
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
|
|
||||||
})
|
exemplarAdd(
|
||||||
|
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
|
1,
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
|
exemplarAdd(
|
||||||
})
|
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||||
|
1,
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
|
||||||
@ -148,20 +182,24 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
mwOpts := &option{}
|
hOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(mwOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
d := newDelegator(w, func(status int) {
|
d := newDelegator(w, func(status int) {
|
||||||
obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
|
exemplarObserve(
|
||||||
|
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
|
||||||
|
time.Since(now).Seconds(),
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
})
|
})
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
// InstrumentHandlerRequestSize is a middleware that wraps the provided
|
||||||
@ -184,27 +222,34 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
mwOpts := &option{}
|
hOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(mwOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
|
exemplarObserve(
|
||||||
})
|
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
|
float64(size),
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
|
exemplarObserve(
|
||||||
})
|
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||||
|
float64(size),
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InstrumentHandlerResponseSize is a middleware that wraps the provided
|
// InstrumentHandlerResponseSize is a middleware that wraps the provided
|
||||||
@ -227,9 +272,9 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
|
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
|
||||||
mwOpts := &option{}
|
hOpts := defaultOptions()
|
||||||
for _, o := range opts {
|
for _, o := range opts {
|
||||||
o(mwOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
@ -237,7 +282,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
|
|||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
|
exemplarObserve(
|
||||||
|
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
|
float64(d.Written()),
|
||||||
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -246,7 +295,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
|
|||||||
// Collector does not have a Desc or has more than one Desc or its Desc is
|
// Collector does not have a Desc or has more than one Desc or its Desc is
|
||||||
// invalid. It also panics if the Collector has any non-const, non-curried
|
// invalid. It also panics if the Collector has any non-const, non-curried
|
||||||
// labels that are not named "code" or "method".
|
// labels that are not named "code" or "method".
|
||||||
func checkLabels(c prometheus.Collector) (code bool, method bool) {
|
func checkLabels(c prometheus.Collector) (code, method bool) {
|
||||||
// TODO(beorn7): Remove this hacky way to check for instance labels
|
// TODO(beorn7): Remove this hacky way to check for instance labels
|
||||||
// once Descriptors can have their dimensionality queried.
|
// once Descriptors can have their dimensionality queried.
|
||||||
var (
|
var (
|
||||||
|
39
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
39
src/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
@ -13,19 +13,46 @@
|
|||||||
|
|
||||||
package promhttp
|
package promhttp
|
||||||
|
|
||||||
// Option are used to configure a middleware or round tripper..
|
import (
|
||||||
type Option func(*option)
|
"context"
|
||||||
|
|
||||||
type option struct {
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
extraMethods []string
|
)
|
||||||
|
|
||||||
|
// Option are used to configure both handler (middleware) or round tripper.
|
||||||
|
type Option interface {
|
||||||
|
apply(*options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// options store options for both a handler or round tripper.
|
||||||
|
type options struct {
|
||||||
|
extraMethods []string
|
||||||
|
getExemplarFn func(requestCtx context.Context) prometheus.Labels
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultOptions() *options {
|
||||||
|
return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
|
||||||
|
}
|
||||||
|
|
||||||
|
type optionApplyFunc func(*options)
|
||||||
|
|
||||||
|
func (o optionApplyFunc) apply(opt *options) { o(opt) }
|
||||||
|
|
||||||
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
|
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
|
||||||
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
|
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
|
||||||
//
|
//
|
||||||
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
|
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
|
||||||
func WithExtraMethods(methods ...string) Option {
|
func WithExtraMethods(methods ...string) Option {
|
||||||
return func(o *option) {
|
return optionApplyFunc(func(o *options) {
|
||||||
o.extraMethods = methods
|
o.extraMethods = methods
|
||||||
}
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
|
||||||
|
// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
|
||||||
|
// will get instrumented without exemplar.
|
||||||
|
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
|
||||||
|
return optionApplyFunc(func(o *options) {
|
||||||
|
o.getExemplarFn = getExemplarFn
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
118
src/vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
118
src/vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -15,8 +15,8 @@ package prometheus
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -289,7 +289,7 @@ func (r *Registry) Register(c Collector) error {
|
|||||||
|
|
||||||
// Is the descriptor valid at all?
|
// Is the descriptor valid at all?
|
||||||
if desc.err != nil {
|
if desc.err != nil {
|
||||||
return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
|
return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is the descID unique?
|
// Is the descID unique?
|
||||||
@ -407,6 +407,14 @@ func (r *Registry) MustRegister(cs ...Collector) {
|
|||||||
|
|
||||||
// Gather implements Gatherer.
|
// Gather implements Gatherer.
|
||||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||||
|
r.mtx.RLock()
|
||||||
|
|
||||||
|
if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
|
||||||
|
// Fast path.
|
||||||
|
r.mtx.RUnlock()
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
checkedMetricChan = make(chan Metric, capMetricChan)
|
checkedMetricChan = make(chan Metric, capMetricChan)
|
||||||
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
||||||
@ -416,7 +424,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||||
)
|
)
|
||||||
|
|
||||||
r.mtx.RLock()
|
|
||||||
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
||||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||||
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
||||||
@ -556,7 +563,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
// This is intended for use with the textfile collector of the node exporter.
|
// This is intended for use with the textfile collector of the node exporter.
|
||||||
// Note that the node exporter expects the filename to be suffixed with ".prom".
|
// Note that the node exporter expects the filename to be suffixed with ".prom".
|
||||||
func WriteToTextfile(filename string, g Gatherer) error {
|
func WriteToTextfile(filename string, g Gatherer) error {
|
||||||
tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
|
tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -575,7 +582,7 @@ func WriteToTextfile(filename string, g Gatherer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.Chmod(tmp.Name(), 0644); err != nil {
|
if err := os.Chmod(tmp.Name(), 0o644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.Rename(tmp.Name(), filename)
|
return os.Rename(tmp.Name(), filename)
|
||||||
@ -596,7 +603,7 @@ func processMetric(
|
|||||||
}
|
}
|
||||||
dtoMetric := &dto.Metric{}
|
dtoMetric := &dto.Metric{}
|
||||||
if err := metric.Write(dtoMetric); err != nil {
|
if err := metric.Write(dtoMetric); err != nil {
|
||||||
return fmt.Errorf("error collecting metric %v: %s", desc, err)
|
return fmt.Errorf("error collecting metric %v: %w", desc, err)
|
||||||
}
|
}
|
||||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||||
if ok { // Existing name.
|
if ok { // Existing name.
|
||||||
@ -718,12 +725,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||||||
for i, g := range gs {
|
for i, g := range gs {
|
||||||
mfs, err := g.Gather()
|
mfs, err := g.Gather()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if multiErr, ok := err.(MultiError); ok {
|
multiErr := MultiError{}
|
||||||
|
if errors.As(err, &multiErr) {
|
||||||
for _, err := range multiErr {
|
for _, err := range multiErr {
|
||||||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
|
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
|
errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, mf := range mfs {
|
for _, mf := range mfs {
|
||||||
@ -884,11 +892,11 @@ func checkMetricConsistency(
|
|||||||
h.Write(separatorByteSlice)
|
h.Write(separatorByteSlice)
|
||||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||||
// check.
|
// check.
|
||||||
if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
|
if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
|
||||||
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
|
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
|
||||||
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
|
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
|
||||||
copy(copiedLabels, dtoMetric.Label)
|
copy(copiedLabels, dtoMetric.Label)
|
||||||
sort.Sort(labelPairSorter(copiedLabels))
|
sort.Sort(internal.LabelPairSorter(copiedLabels))
|
||||||
dtoMetric.Label = copiedLabels
|
dtoMetric.Label = copiedLabels
|
||||||
}
|
}
|
||||||
for _, lp := range dtoMetric.Label {
|
for _, lp := range dtoMetric.Label {
|
||||||
@ -935,7 +943,7 @@ func checkDescConsistency(
|
|||||||
metricFamily.GetName(), dtoMetric, desc,
|
metricFamily.GetName(), dtoMetric, desc,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
sort.Sort(labelPairSorter(lpsFromDesc))
|
sort.Sort(internal.LabelPairSorter(lpsFromDesc))
|
||||||
for i, lpFromDesc := range lpsFromDesc {
|
for i, lpFromDesc := range lpsFromDesc {
|
||||||
lpFromMetric := dtoMetric.Label[i]
|
lpFromMetric := dtoMetric.Label[i]
|
||||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||||
@ -948,3 +956,89 @@ func checkDescConsistency(
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var _ TransactionalGatherer = &MultiTRegistry{}
|
||||||
|
|
||||||
|
// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
|
||||||
|
// transactional gatherers.
|
||||||
|
//
|
||||||
|
// It is caller responsibility to ensure two registries have mutually exclusive metric families,
|
||||||
|
// no deduplication will happen.
|
||||||
|
type MultiTRegistry struct {
|
||||||
|
tGatherers []TransactionalGatherer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiTRegistry creates MultiTRegistry.
|
||||||
|
func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
|
||||||
|
return &MultiTRegistry{
|
||||||
|
tGatherers: tGatherers,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather implements TransactionalGatherer interface.
|
||||||
|
func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
|
||||||
|
errs := MultiError{}
|
||||||
|
|
||||||
|
dFns := make([]func(), 0, len(r.tGatherers))
|
||||||
|
// TODO(bwplotka): Implement concurrency for those?
|
||||||
|
for _, g := range r.tGatherers {
|
||||||
|
// TODO(bwplotka): Check for duplicates?
|
||||||
|
m, d, err := g.Gather()
|
||||||
|
errs.Append(err)
|
||||||
|
|
||||||
|
mfs = append(mfs, m...)
|
||||||
|
dFns = append(dFns, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
|
||||||
|
sort.Slice(mfs, func(i, j int) bool {
|
||||||
|
return *mfs[i].Name < *mfs[j].Name
|
||||||
|
})
|
||||||
|
return mfs, func() {
|
||||||
|
for _, d := range dFns {
|
||||||
|
d()
|
||||||
|
}
|
||||||
|
}, errs.MaybeUnwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
|
||||||
|
// used by metric family is no longer used by a caller. This allows implementations with cache.
|
||||||
|
type TransactionalGatherer interface {
|
||||||
|
// Gather returns metrics in a lexicographically sorted slice
|
||||||
|
// of uniquely named MetricFamily protobufs. Gather ensures that the
|
||||||
|
// returned slice is valid and self-consistent so that it can be used
|
||||||
|
// for valid exposition. As an exception to the strict consistency
|
||||||
|
// requirements described for metric.Desc, Gather will tolerate
|
||||||
|
// different sets of label names for metrics of the same metric family.
|
||||||
|
//
|
||||||
|
// Even if an error occurs, Gather attempts to gather as many metrics as
|
||||||
|
// possible. Hence, if a non-nil error is returned, the returned
|
||||||
|
// MetricFamily slice could be nil (in case of a fatal error that
|
||||||
|
// prevented any meaningful metric collection) or contain a number of
|
||||||
|
// MetricFamily protobufs, some of which might be incomplete, and some
|
||||||
|
// might be missing altogether. The returned error (which might be a
|
||||||
|
// MultiError) explains the details. Note that this is mostly useful for
|
||||||
|
// debugging purposes. If the gathered protobufs are to be used for
|
||||||
|
// exposition in actual monitoring, it is almost always better to not
|
||||||
|
// expose an incomplete result and instead disregard the returned
|
||||||
|
// MetricFamily protobufs in case the returned error is non-nil.
|
||||||
|
//
|
||||||
|
// Important: done is expected to be triggered (even if the error occurs!)
|
||||||
|
// once caller does not need returned slice of dto.MetricFamily.
|
||||||
|
Gather() (_ []*dto.MetricFamily, done func(), err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
|
||||||
|
func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
|
||||||
|
return &noTransactionGatherer{g: g}
|
||||||
|
}
|
||||||
|
|
||||||
|
type noTransactionGatherer struct {
|
||||||
|
g Gatherer
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gather implements TransactionalGatherer interface.
|
||||||
|
func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
|
||||||
|
mfs, err := g.g.Gather()
|
||||||
|
return mfs, func() {}, err
|
||||||
|
}
|
||||||
|
43
src/vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
43
src/vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -23,6 +23,8 @@ import (
|
|||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,6 +40,23 @@ const (
|
|||||||
UntypedValue
|
UntypedValue
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
|
||||||
|
GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
|
||||||
|
UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
|
||||||
|
)
|
||||||
|
|
||||||
|
func (v ValueType) ToDTO() *dto.MetricType {
|
||||||
|
switch v {
|
||||||
|
case CounterValue:
|
||||||
|
return CounterMetricTypePtr
|
||||||
|
case GaugeValue:
|
||||||
|
return GaugeMetricTypePtr
|
||||||
|
default:
|
||||||
|
return UntypedMetricTypePtr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||||
// from a function. It implements Metric and Collector. Its effective type is
|
// from a function. It implements Metric and Collector. Its effective type is
|
||||||
// determined by ValueType. This is a low-level building block used by the
|
// determined by ValueType. This is a low-level building block used by the
|
||||||
@ -91,11 +110,15 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
|
|||||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metric := &dto.Metric{}
|
||||||
|
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &constMetric{
|
return &constMetric{
|
||||||
desc: desc,
|
desc: desc,
|
||||||
valType: valueType,
|
metric: metric,
|
||||||
val: value,
|
|
||||||
labelPairs: MakeLabelPairs(desc, labelValues),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,9 +134,7 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
|
|||||||
|
|
||||||
type constMetric struct {
|
type constMetric struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
valType ValueType
|
metric *dto.Metric
|
||||||
val float64
|
|
||||||
labelPairs []*dto.LabelPair
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *constMetric) Desc() *Desc {
|
func (m *constMetric) Desc() *Desc {
|
||||||
@ -121,7 +142,11 @@ func (m *constMetric) Desc() *Desc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *constMetric) Write(out *dto.Metric) error {
|
func (m *constMetric) Write(out *dto.Metric) error {
|
||||||
return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
|
out.Label = m.metric.Label
|
||||||
|
out.Counter = m.metric.Counter
|
||||||
|
out.Gauge = m.metric.Gauge
|
||||||
|
out.Untyped = m.metric.Untyped
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func populateMetric(
|
func populateMetric(
|
||||||
@ -170,12 +195,12 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||||
sort.Sort(labelPairSorter(labelPairs))
|
sort.Sort(internal.LabelPairSorter(labelPairs))
|
||||||
return labelPairs
|
return labelPairs
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
|
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
|
||||||
const ExemplarMaxRunes = 64
|
const ExemplarMaxRunes = 128
|
||||||
|
|
||||||
// newExemplar creates a new dto.Exemplar from the provided values. An error is
|
// newExemplar creates a new dto.Exemplar from the provided values. An error is
|
||||||
// returned if any of the label names or values are invalid or if the total
|
// returned if any of the label names or values are invalid or if the total
|
||||||
|
88
src/vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
88
src/vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -99,6 +99,16 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
|||||||
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
|
||||||
|
// passed in as labels. The order of the labels does not matter.
|
||||||
|
// It returns the number of metrics deleted.
|
||||||
|
//
|
||||||
|
// Note that curried labels will never be matched if deleting from the curried vector.
|
||||||
|
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
||||||
|
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
||||||
|
return m.metricMap.deleteByLabels(labels, m.curry)
|
||||||
|
}
|
||||||
|
|
||||||
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
|
||||||
// show up in GoDoc.
|
// show up in GoDoc.
|
||||||
|
|
||||||
@ -381,6 +391,82 @@ func (m *metricMap) deleteByHashWithLabels(
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deleteByLabels deletes a metric if the given labels are present in the metric.
|
||||||
|
func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
|
||||||
|
m.mtx.Lock()
|
||||||
|
defer m.mtx.Unlock()
|
||||||
|
|
||||||
|
var numDeleted int
|
||||||
|
|
||||||
|
for h, metrics := range m.metrics {
|
||||||
|
i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
|
||||||
|
if i >= len(metrics) {
|
||||||
|
// Didn't find matching labels in this metric slice.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
delete(m.metrics, h)
|
||||||
|
numDeleted++
|
||||||
|
}
|
||||||
|
|
||||||
|
return numDeleted
|
||||||
|
}
|
||||||
|
|
||||||
|
// findMetricWithPartialLabel returns the index of the matching metric or
|
||||||
|
// len(metrics) if not found.
|
||||||
|
func findMetricWithPartialLabels(
|
||||||
|
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
|
||||||
|
) int {
|
||||||
|
for i, metric := range metrics {
|
||||||
|
if matchPartialLabels(desc, metric.values, labels, curry) {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
// indexOf searches the given slice of strings for the target string and returns
|
||||||
|
// the index or len(items) as well as a boolean whether the search succeeded.
|
||||||
|
func indexOf(target string, items []string) (int, bool) {
|
||||||
|
for i, l := range items {
|
||||||
|
if l == target {
|
||||||
|
return i, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(items), false
|
||||||
|
}
|
||||||
|
|
||||||
|
// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
|
||||||
|
// and returns whether it matches either the "base" value or the curried value accordingly.
|
||||||
|
// It also indicates whether the match is against a curried or uncurried value.
|
||||||
|
func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
|
||||||
|
for _, curriedValue := range curry {
|
||||||
|
if curriedValue.index == index {
|
||||||
|
// This label was curried. See if the curried value matches our target.
|
||||||
|
return curriedValue.value == targetValue, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// This label was not curried. See if the current value matches our target label.
|
||||||
|
return values[index] == targetValue, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
|
||||||
|
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
|
for l, v := range labels {
|
||||||
|
// Check if the target label exists in our metrics and get the index.
|
||||||
|
varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
|
||||||
|
if validLabel {
|
||||||
|
// Check the value of that label against the target value.
|
||||||
|
// We don't consider curried values in partial matches.
|
||||||
|
matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
|
||||||
|
if matches && !curried {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
|
||||||
// or creates it and returns the new one.
|
// or creates it and returns the new one.
|
||||||
//
|
//
|
||||||
@ -485,7 +571,7 @@ func findMetricWithLabels(
|
|||||||
return len(metrics)
|
return len(metrics)
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
|
func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
|
||||||
if len(values) != len(lvs)+len(curry) {
|
if len(values) != len(lvs)+len(curry) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
4
src/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
4
src/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@ -21,6 +21,8 @@ import (
|
|||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WrapRegistererWith returns a Registerer wrapping the provided
|
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||||
@ -182,7 +184,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error {
|
|||||||
Value: proto.String(lv),
|
Value: proto.String(lv),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
sort.Sort(labelPairSorter(out.Label))
|
sort.Sort(internal.LabelPairSorter(out.Label))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
src/vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
1
src/vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
@ -12,6 +12,7 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// Build only when actually fuzzing
|
// Build only when actually fuzzing
|
||||||
|
//go:build gofuzz
|
||||||
// +build gofuzz
|
// +build gofuzz
|
||||||
|
|
||||||
package expfmt
|
package expfmt
|
||||||
|
4
src/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
4
src/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/golang/protobuf/ptypes"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
@ -473,10 +472,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
ts, err := ptypes.Timestamp((*e).Timestamp)
|
err = (*e).Timestamp.CheckValid()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
|
ts := (*e).Timestamp.AsTime()
|
||||||
// TODO(beorn7): Format this directly from components of ts to
|
// TODO(beorn7): Format this directly from components of ts to
|
||||||
// avoid overflow/underflow and precision issues of the float
|
// avoid overflow/underflow and precision issues of the float
|
||||||
// conversion.
|
// conversion.
|
||||||
|
2
src/vendor/github.com/prometheus/common/model/time.go
generated
vendored
2
src/vendor/github.com/prometheus/common/model/time.go
generated
vendored
@ -193,7 +193,7 @@ func ParseDuration(durationStr string) (Duration, error) {
|
|||||||
// Allow 0 without a unit.
|
// Allow 0 without a unit.
|
||||||
return 0, nil
|
return 0, nil
|
||||||
case "":
|
case "":
|
||||||
return 0, fmt.Errorf("empty duration string")
|
return 0, errors.New("empty duration string")
|
||||||
}
|
}
|
||||||
matches := durationRE.FindStringSubmatch(durationStr)
|
matches := durationRE.FindStringSubmatch(durationStr)
|
||||||
if matches == nil {
|
if matches == nil {
|
||||||
|
3
src/vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
3
src/vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
@ -1 +1,2 @@
|
|||||||
/fixtures/
|
/testdata/fixtures/
|
||||||
|
/fixtures
|
||||||
|
10
src/vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
10
src/vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
@ -1,4 +1,12 @@
|
|||||||
---
|
---
|
||||||
linters:
|
linters:
|
||||||
enable:
|
enable:
|
||||||
- golint
|
- godot
|
||||||
|
- revive
|
||||||
|
|
||||||
|
linter-settings:
|
||||||
|
godot:
|
||||||
|
capital: true
|
||||||
|
exclude:
|
||||||
|
# Ignore "See: URL"
|
||||||
|
- 'See:'
|
||||||
|
4
src/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
generated
vendored
4
src/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
generated
vendored
@ -1,3 +1,3 @@
|
|||||||
## Prometheus Community Code of Conduct
|
# Prometheus Community Code of Conduct
|
||||||
|
|
||||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||||
|
4
src/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
4
src/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some
|
|||||||
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
|
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
|
||||||
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
|
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
|
||||||
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
|
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
|
||||||
This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
|
This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
|
||||||
the file.
|
the file.
|
||||||
|
|
||||||
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
|
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
|
||||||
@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t
|
|||||||
```
|
```
|
||||||
|
|
||||||
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
|
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
|
||||||
can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
|
can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
|
||||||
not bother to check the size of the file before reading.
|
not bother to check the size of the file before reading.
|
||||||
```
|
```
|
||||||
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
|
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
|
||||||
|
10
src/vendor/github.com/prometheus/procfs/Makefile
generated
vendored
10
src/vendor/github.com/prometheus/procfs/Makefile
generated
vendored
@ -14,18 +14,18 @@
|
|||||||
include Makefile.common
|
include Makefile.common
|
||||||
|
|
||||||
%/.unpacked: %.ttar
|
%/.unpacked: %.ttar
|
||||||
@echo ">> extracting fixtures"
|
@echo ">> extracting fixtures $*"
|
||||||
./ttar -C $(dir $*) -x -f $*.ttar
|
./ttar -C $(dir $*) -x -f $*.ttar
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
fixtures: fixtures/.unpacked
|
fixtures: testdata/fixtures/.unpacked
|
||||||
|
|
||||||
update_fixtures:
|
update_fixtures:
|
||||||
rm -vf fixtures/.unpacked
|
rm -vf testdata/fixtures/.unpacked
|
||||||
./ttar -c -f fixtures.ttar fixtures/
|
./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
|
||||||
|
|
||||||
.PHONY: build
|
.PHONY: build
|
||||||
build:
|
build:
|
||||||
|
|
||||||
.PHONY: test
|
.PHONY: test
|
||||||
test: fixtures/.unpacked common-test
|
test: testdata/fixtures/.unpacked common-test
|
||||||
|
87
src/vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
87
src/vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version)
|
|||||||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||||
|
|
||||||
GOVENDOR :=
|
|
||||||
GO111MODULE :=
|
|
||||||
ifeq (, $(PRE_GO_111))
|
|
||||||
ifneq (,$(wildcard go.mod))
|
|
||||||
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
|
||||||
GO111MODULE := on
|
|
||||||
|
|
||||||
ifneq (,$(wildcard vendor))
|
|
||||||
# Always use the local vendor/ directory to satisfy the dependencies.
|
|
||||||
GOOPTS := $(GOOPTS) -mod=vendor
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
else
|
|
||||||
ifneq (,$(wildcard go.mod))
|
|
||||||
ifneq (,$(wildcard vendor))
|
|
||||||
$(warning This repository requires Go >= 1.11 because of Go modules)
|
|
||||||
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
|
||||||
endif
|
|
||||||
else
|
|
||||||
# This repository isn't using Go modules (yet).
|
|
||||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||||
pkgs = ./...
|
pkgs = ./...
|
||||||
|
|
||||||
@ -78,17 +55,23 @@ ifneq ($(shell which gotestsum),)
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.12.0
|
PROMU_VERSION ?= 0.13.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.39.0
|
GOLANGCI_LINT_VERSION ?= v1.45.2
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||||
|
# If we're in CI and there is an Actions file, that means the linter
|
||||||
|
# is being run in Actions, so we don't need to run it here.
|
||||||
|
ifeq (,$(CIRCLE_JOB))
|
||||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||||
|
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||||
|
endif
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -144,32 +127,25 @@ common-check_license:
|
|||||||
.PHONY: common-deps
|
.PHONY: common-deps
|
||||||
common-deps:
|
common-deps:
|
||||||
@echo ">> getting dependencies"
|
@echo ">> getting dependencies"
|
||||||
ifdef GO111MODULE
|
$(GO) mod download
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
|
||||||
else
|
|
||||||
$(GO) get $(GOOPTS) -t ./...
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: update-go-deps
|
.PHONY: update-go-deps
|
||||||
update-go-deps:
|
update-go-deps:
|
||||||
@echo ">> updating Go dependencies"
|
@echo ">> updating Go dependencies"
|
||||||
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||||
$(GO) get $$m; \
|
$(GO) get -d $$m; \
|
||||||
done
|
done
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
$(GO) mod tidy
|
||||||
ifneq (,$(wildcard vendor))
|
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: common-test-short
|
.PHONY: common-test-short
|
||||||
common-test-short: $(GOTEST_DIR)
|
common-test-short: $(GOTEST_DIR)
|
||||||
@echo ">> running short tests"
|
@echo ">> running short tests"
|
||||||
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
|
$(GOTEST) -short $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-test
|
.PHONY: common-test
|
||||||
common-test: $(GOTEST_DIR)
|
common-test: $(GOTEST_DIR)
|
||||||
@echo ">> running all tests"
|
@echo ">> running all tests"
|
||||||
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
$(GOTEST_DIR):
|
$(GOTEST_DIR):
|
||||||
@mkdir -p $@
|
@mkdir -p $@
|
||||||
@ -177,25 +153,21 @@ $(GOTEST_DIR):
|
|||||||
.PHONY: common-format
|
.PHONY: common-format
|
||||||
common-format:
|
common-format:
|
||||||
@echo ">> formatting code"
|
@echo ">> formatting code"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
$(GO) fmt $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-vet
|
.PHONY: common-vet
|
||||||
common-vet:
|
common-vet:
|
||||||
@echo ">> vetting code"
|
@echo ">> vetting code"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
$(GO) vet $(GOOPTS) $(pkgs)
|
||||||
|
|
||||||
.PHONY: common-lint
|
.PHONY: common-lint
|
||||||
common-lint: $(GOLANGCI_LINT)
|
common-lint: $(GOLANGCI_LINT)
|
||||||
ifdef GOLANGCI_LINT
|
ifdef GOLANGCI_LINT
|
||||||
@echo ">> running golangci-lint"
|
@echo ">> running golangci-lint"
|
||||||
ifdef GO111MODULE
|
|
||||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||||
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||||
else
|
|
||||||
$(GOLANGCI_LINT) run $(pkgs)
|
|
||||||
endif
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
.PHONY: common-yamllint
|
.PHONY: common-yamllint
|
||||||
@ -212,28 +184,15 @@ endif
|
|||||||
common-staticcheck: lint
|
common-staticcheck: lint
|
||||||
|
|
||||||
.PHONY: common-unused
|
.PHONY: common-unused
|
||||||
common-unused: $(GOVENDOR)
|
common-unused:
|
||||||
ifdef GOVENDOR
|
|
||||||
@echo ">> running check for unused packages"
|
|
||||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
|
||||||
else
|
|
||||||
ifdef GO111MODULE
|
|
||||||
@echo ">> running check for unused/missing packages in go.mod"
|
@echo ">> running check for unused/missing packages in go.mod"
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
$(GO) mod tidy
|
||||||
ifeq (,$(wildcard vendor))
|
|
||||||
@git diff --exit-code -- go.sum go.mod
|
@git diff --exit-code -- go.sum go.mod
|
||||||
else
|
|
||||||
@echo ">> running check for unused packages in vendor/"
|
|
||||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
|
||||||
@git diff --exit-code -- go.sum go.mod vendor/
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: common-build
|
.PHONY: common-build
|
||||||
common-build: promu
|
common-build: promu
|
||||||
@echo ">> building binaries"
|
@echo ">> building binaries"
|
||||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
||||||
|
|
||||||
.PHONY: common-tarball
|
.PHONY: common-tarball
|
||||||
common-tarball: promu
|
common-tarball: promu
|
||||||
@ -289,12 +248,6 @@ $(GOLANGCI_LINT):
|
|||||||
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef GOVENDOR
|
|
||||||
.PHONY: $(GOVENDOR)
|
|
||||||
$(GOVENDOR):
|
|
||||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: precheck
|
.PHONY: precheck
|
||||||
precheck::
|
precheck::
|
||||||
|
|
||||||
|
2
src/vendor/github.com/prometheus/procfs/SECURITY.md
generated
vendored
2
src/vendor/github.com/prometheus/procfs/SECURITY.md
generated
vendored
@ -3,4 +3,4 @@
|
|||||||
The Prometheus security policy, including how to report vulnerabilities, can be
|
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||||
found here:
|
found here:
|
||||||
|
|
||||||
https://prometheus.io/docs/operating/security/
|
<https://prometheus.io/docs/operating/security/>
|
||||||
|
45
src/vendor/github.com/prometheus/procfs/arp.go
generated
vendored
45
src/vendor/github.com/prometheus/procfs/arp.go
generated
vendored
@ -15,11 +15,28 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Learned from include/uapi/linux/if_arp.h.
|
||||||
|
const (
|
||||||
|
// completed entry (ha valid).
|
||||||
|
ATFComplete = 0x02
|
||||||
|
// permanent entry.
|
||||||
|
ATFPermanent = 0x04
|
||||||
|
// Publish entry.
|
||||||
|
ATFPublish = 0x08
|
||||||
|
// Has requested trailers.
|
||||||
|
ATFUseTrailers = 0x10
|
||||||
|
// Obsoleted: Want to use a netmask (only for proxy entries).
|
||||||
|
ATFNetmask = 0x20
|
||||||
|
// Don't answer this addresses.
|
||||||
|
ATFDontPublish = 0x40
|
||||||
|
)
|
||||||
|
|
||||||
// ARPEntry contains a single row of the columnar data represented in
|
// ARPEntry contains a single row of the columnar data represented in
|
||||||
// /proc/net/arp.
|
// /proc/net/arp.
|
||||||
type ARPEntry struct {
|
type ARPEntry struct {
|
||||||
@ -29,12 +46,14 @@ type ARPEntry struct {
|
|||||||
HWAddr net.HardwareAddr
|
HWAddr net.HardwareAddr
|
||||||
// Name of the device
|
// Name of the device
|
||||||
Device string
|
Device string
|
||||||
|
// Flags
|
||||||
|
Flags byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
||||||
// and then return a slice of ARPEntry's.
|
// and then return a slice of ARPEntry's.
|
||||||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||||
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
|
data, err := os.ReadFile(fs.proc.Path("net/arp"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
|
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
|
||||||
}
|
}
|
||||||
@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func parseARPEntry(columns []string) (ARPEntry, error) {
|
func parseARPEntry(columns []string) (ARPEntry, error) {
|
||||||
|
entry := ARPEntry{Device: columns[5]}
|
||||||
ip := net.ParseIP(columns[0])
|
ip := net.ParseIP(columns[0])
|
||||||
mac := net.HardwareAddr(columns[3])
|
entry.IPAddr = ip
|
||||||
|
|
||||||
entry := ARPEntry{
|
if mac, err := net.ParseMAC(columns[3]); err == nil {
|
||||||
IPAddr: ip,
|
entry.HWAddr = mac
|
||||||
HWAddr: mac,
|
} else {
|
||||||
Device: columns[5],
|
return ARPEntry{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
|
||||||
|
entry.Flags = byte(flags)
|
||||||
|
} else {
|
||||||
|
return ARPEntry{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return entry, nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsComplete returns true if ARP entry is marked with complete flag.
|
||||||
|
func (entry *ARPEntry) IsComplete() bool {
|
||||||
|
return entry.Flags&ATFComplete != 0
|
||||||
|
}
|
||||||
|
5
src/vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
5
src/vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
@ -27,7 +28,7 @@ import (
|
|||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
|
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
|
||||||
type CPUInfo struct {
|
type CPUInfo struct {
|
||||||
Processor uint
|
Processor uint
|
||||||
VendorID string
|
VendorID string
|
||||||
@ -469,7 +470,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
|
|||||||
}
|
}
|
||||||
|
|
||||||
// firstNonEmptyLine advances the scanner to the first non-empty line
|
// firstNonEmptyLine advances the scanner to the first non-empty line
|
||||||
// and returns the contents of that line
|
// and returns the contents of that line.
|
||||||
func firstNonEmptyLine(scanner *bufio.Scanner) string {
|
func firstNonEmptyLine(scanner *bufio.Scanner) string {
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux && (arm || arm64)
|
||||||
// +build linux
|
// +build linux
|
||||||
// +build arm arm64
|
// +build arm arm64
|
||||||
|
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux && (mips || mipsle || mips64 || mips64le)
|
||||||
// +build linux
|
// +build linux
|
||||||
// +build mips mipsle mips64 mips64le
|
// +build mips mipsle mips64 mips64le
|
||||||
|
|
||||||
|
4
src/vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
4
src/vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
@ -11,8 +11,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build linux
|
//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
|
||||||
// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux && (ppc64 || ppc64le)
|
||||||
// +build linux
|
// +build linux
|
||||||
// +build ppc64 ppc64le
|
// +build ppc64 ppc64le
|
||||||
|
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux && (riscv || riscv64)
|
||||||
// +build linux
|
// +build linux
|
||||||
// +build riscv riscv64
|
// +build riscv riscv64
|
||||||
|
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build linux && (386 || amd64)
|
||||||
// +build linux
|
// +build linux
|
||||||
// +build 386 amd64
|
// +build 386 amd64
|
||||||
|
|
||||||
|
7673
src/vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
7673
src/vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load Diff
2
src/vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
2
src/vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
@ -26,7 +26,7 @@ const (
|
|||||||
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||||
DefaultSysMountPoint = "/sys"
|
DefaultSysMountPoint = "/sys"
|
||||||
|
|
||||||
// DefaultConfigfsMountPoint is the common mount point of the configfs
|
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
||||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
6
src/vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
6
src/vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@ -14,7 +14,7 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io/ioutil"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
|
|||||||
|
|
||||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||||
func ReadUintFromFile(path string) (uint64, error) {
|
func ReadUintFromFile(path string) (uint64, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) {
|
|||||||
|
|
||||||
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
|
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
|
||||||
func ReadIntFromFile(path string) (int64, error) {
|
func ReadIntFromFile(path string) (int64, error) {
|
||||||
data, err := ioutil.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
11
src/vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
11
src/vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
@ -15,17 +15,16 @@ package util
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
|
// ReadFileNoStat uses io.ReadAll to read contents of entire file.
|
||||||
// This is similar to ioutil.ReadFile but without the call to os.Stat, because
|
// This is similar to os.ReadFile but without the call to os.Stat, because
|
||||||
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
|
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
|
||||||
// Reads a max file size of 512kB. For files larger than this, a scanner
|
// Reads a max file size of 1024kB. For files larger than this, a scanner
|
||||||
// should be used.
|
// should be used.
|
||||||
func ReadFileNoStat(filename string) ([]byte, error) {
|
func ReadFileNoStat(filename string) ([]byte, error) {
|
||||||
const maxBufferSize = 1024 * 512
|
const maxBufferSize = 1024 * 1024
|
||||||
|
|
||||||
f, err := os.Open(filename)
|
f, err := os.Open(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
reader := io.LimitReader(f, maxBufferSize)
|
reader := io.LimitReader(f, maxBufferSize)
|
||||||
return ioutil.ReadAll(reader)
|
return io.ReadAll(reader)
|
||||||
}
|
}
|
||||||
|
8
src/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
8
src/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
@ -11,7 +11,9 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build linux,!appengine
|
//go:build (linux || darwin) && !appengine
|
||||||
|
// +build linux darwin
|
||||||
|
// +build !appengine
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
||||||
@ -21,7 +23,7 @@ import (
|
|||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
|
||||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||||
//
|
//
|
||||||
// Note that this function will not read files larger than 128 bytes.
|
// Note that this function will not read files larger than 128 bytes.
|
||||||
@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) {
|
|||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||||
// Go's ioutil.ReadFile implementation to poll forever.
|
// Go's os.ReadFile implementation to poll forever.
|
||||||
//
|
//
|
||||||
// Since we either want to read data or bail immediately, do the simplest
|
// Since we either want to read data or bail immediately, do the simplest
|
||||||
// possible read using syscall directly.
|
// possible read using syscall directly.
|
||||||
|
3
src/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
3
src/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
@ -11,7 +11,8 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
// +build linux,appengine !linux
|
//go:build (linux && appengine) || (!linux && !darwin)
|
||||||
|
// +build linux,appengine !linux,!darwin
|
||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
||||||
|
3
src/vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
3
src/vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) {
|
|||||||
stats IPVSStats
|
stats IPVSStats
|
||||||
)
|
)
|
||||||
|
|
||||||
statContent, err := ioutil.ReadAll(r)
|
statContent, err := io.ReadAll(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return IPVSStats{}, err
|
return IPVSStats{}, err
|
||||||
}
|
}
|
||||||
|
1
src/vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
1
src/vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
2
src/vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
2
src/vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
|||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoadAvg represents an entry in /proc/loadavg
|
// LoadAvg represents an entry in /proc/loadavg.
|
||||||
type LoadAvg struct {
|
type LoadAvg struct {
|
||||||
Load1 float64
|
Load1 float64
|
||||||
Load5 float64
|
Load5 float64
|
||||||
|
10
src/vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
10
src/vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -15,7 +15,7 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -64,7 +64,7 @@ type MDStat struct {
|
|||||||
// structs containing the relevant info. More information available here:
|
// structs containing the relevant info. More information available here:
|
||||||
// https://raid.wiki.kernel.org/index.php/Mdstat
|
// https://raid.wiki.kernel.org/index.php/Mdstat
|
||||||
func (fs FS) MDStat() ([]MDStat, error) {
|
func (fs FS) MDStat() ([]MDStat, error) {
|
||||||
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
data, err := os.ReadFile(fs.proc.Path("mdstat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||||
|
statusFields := strings.Fields(statusLine)
|
||||||
|
if len(statusFields) < 1 {
|
||||||
|
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
|
||||||
|
}
|
||||||
|
|
||||||
sizeStr := strings.Fields(statusLine)[0]
|
sizeStr := statusFields[0]
|
||||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||||
|
12
src/vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
12
src/vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
|
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
|
||||||
// and contains netfilter conntrack statistics at one CPU core
|
// and contains netfilter conntrack statistics at one CPU core.
|
||||||
type ConntrackStatEntry struct {
|
type ConntrackStatEntry struct {
|
||||||
Entries uint64
|
Entries uint64
|
||||||
Found uint64
|
Found uint64
|
||||||
@ -38,12 +38,12 @@ type ConntrackStatEntry struct {
|
|||||||
SearchRestart uint64
|
SearchRestart uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
|
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
|
||||||
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
|
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
|
||||||
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
|
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a slice of ConntrackStatEntries from the given filepath
|
// Parses a slice of ConntrackStatEntries from the given filepath.
|
||||||
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
||||||
// This file is small and can be read with one syscall.
|
// This file is small and can be read with one syscall.
|
||||||
b, err := util.ReadFileNoStat(path)
|
b, err := util.ReadFileNoStat(path)
|
||||||
@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
|||||||
return stat, nil
|
return stat, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
|
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
|
||||||
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
||||||
var entries []ConntrackStatEntry
|
var entries []ConntrackStatEntry
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
|||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a ConntrackStatEntry from given array of fields
|
// Parses a ConntrackStatEntry from given array of fields.
|
||||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||||
if len(fields) != 17 {
|
if len(fields) != 17 {
|
||||||
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
||||||
@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
|||||||
return entry, nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a uint64 from given hex in string
|
// Parses a uint64 from given hex in string.
|
||||||
func parseConntrackStatField(field string) (uint64, error) {
|
func parseConntrackStatField(field string) (uint64, error) {
|
||||||
val, err := strconv.ParseUint(field, 16, 64)
|
val, err := strconv.ParseUint(field, 16, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
8
src/vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
8
src/vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) {
|
|||||||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||||
// must be filtered prior to calling this method.
|
// must be filtered prior to calling this method.
|
||||||
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||||
parts := strings.SplitN(rawLine, ":", 2)
|
idx := strings.LastIndex(rawLine, ":")
|
||||||
if len(parts) != 2 {
|
if idx == -1 {
|
||||||
return nil, errors.New("invalid net/dev line, missing colon")
|
return nil, errors.New("invalid net/dev line, missing colon")
|
||||||
}
|
}
|
||||||
fields := strings.Fields(strings.TrimSpace(parts[1]))
|
fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
line := &NetDevLine{}
|
line := &NetDevLine{}
|
||||||
|
|
||||||
// Interface Name
|
// Interface Name
|
||||||
line.Name = strings.TrimSpace(parts[0])
|
line.Name = strings.TrimSpace(rawLine[:idx])
|
||||||
if line.Name == "" {
|
if line.Name == "" {
|
||||||
return nil, errors.New("invalid net/dev line, empty interface name")
|
return nil, errors.New("invalid net/dev line, empty interface name")
|
||||||
}
|
}
|
||||||
|
2
src/vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
2
src/vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@ -34,7 +34,7 @@ const (
|
|||||||
readLimit = 4294967296 // Byte -> 4 GiB
|
readLimit = 4294967296 // Byte -> 4 GiB
|
||||||
)
|
)
|
||||||
|
|
||||||
// this contains generic data structures for both udp and tcp sockets
|
// This contains generic data structures for both udp and tcp sockets.
|
||||||
type (
|
type (
|
||||||
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
|
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
|
||||||
NetIPSocket []*netIPSocketLine
|
NetIPSocket []*netIPSocketLine
|
||||||
|
4
src/vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
4
src/vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NetProtocolStats stores the contents from /proc/net/protocols
|
// NetProtocolStats stores the contents from /proc/net/protocols.
|
||||||
type NetProtocolStats map[string]NetProtocolStatLine
|
type NetProtocolStats map[string]NetProtocolStatLine
|
||||||
|
|
||||||
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
|
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
|
||||||
@ -41,7 +41,7 @@ type NetProtocolStatLine struct {
|
|||||||
Capabilities NetProtocolCapabilities
|
Capabilities NetProtocolCapabilities
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetProtocolCapabilities contains a list of capabilities for each protocol
|
// NetProtocolCapabilities contains a list of capabilities for each protocol.
|
||||||
type NetProtocolCapabilities struct {
|
type NetProtocolCapabilities struct {
|
||||||
Close bool // 8
|
Close bool // 8
|
||||||
Connect bool // 9
|
Connect bool // 9
|
||||||
|
8
src/vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
8
src/vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -30,13 +30,13 @@ import (
|
|||||||
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||||
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||||
|
|
||||||
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
|
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
|
||||||
type SoftnetStat struct {
|
type SoftnetStat struct {
|
||||||
// Number of processed packets
|
// Number of processed packets.
|
||||||
Processed uint32
|
Processed uint32
|
||||||
// Number of dropped packets
|
// Number of dropped packets.
|
||||||
Dropped uint32
|
Dropped uint32
|
||||||
// Number of times processing packets ran out of quota
|
// Number of times processing packets ran out of quota.
|
||||||
TimeSqueezed uint32
|
TimeSqueezed uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,8 +80,11 @@ type XfrmStat struct {
|
|||||||
XfrmOutPolDead int
|
XfrmOutPolDead int
|
||||||
// Policy Error
|
// Policy Error
|
||||||
XfrmOutPolError int
|
XfrmOutPolError int
|
||||||
|
// Forward routing of a packet is not allowed
|
||||||
XfrmFwdHdrError int
|
XfrmFwdHdrError int
|
||||||
|
// State is invalid, perhaps expired
|
||||||
XfrmOutStateInvalid int
|
XfrmOutStateInvalid int
|
||||||
|
// State hasn’t been fully acquired before use
|
||||||
XfrmAcquireError int
|
XfrmAcquireError int
|
||||||
}
|
}
|
||||||
|
|
8
src/vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
8
src/vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
@ -21,13 +21,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NetStat contains statistics for all the counters from one file
|
// NetStat contains statistics for all the counters from one file.
|
||||||
type NetStat struct {
|
type NetStat struct {
|
||||||
Filename string
|
|
||||||
Stats map[string][]uint64
|
Stats map[string][]uint64
|
||||||
|
Filename string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NetStat retrieves stats from /proc/net/stat/
|
// NetStat retrieves stats from `/proc/net/stat/`.
|
||||||
func (fs FS) NetStat() ([]NetStat, error) {
|
func (fs FS) NetStat() ([]NetStat, error) {
|
||||||
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
|
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -55,7 +55,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
// Other strings represent per-CPU counters
|
// Other strings represent per-CPU counters
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
for num, counter := range strings.Fields(scanner.Text()) {
|
for num, counter := range strings.Fields(scanner.Text()) {
|
||||||
value, err := strconv.ParseUint(counter, 16, 32)
|
value, err := strconv.ParseUint(counter, 16, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
10
src/vendor/github.com/prometheus/procfs/proc.go
generated
vendored
10
src/vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -16,7 +16,7 @@ package procfs
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) {
|
|||||||
|
|
||||||
// NewProc returns a process for the given pid.
|
// NewProc returns a process for the given pid.
|
||||||
//
|
//
|
||||||
// Deprecated: use fs.Proc() instead
|
// Deprecated: Use fs.Proc() instead.
|
||||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||||
return fs.Proc(pid)
|
return fs.Proc(pid)
|
||||||
}
|
}
|
||||||
@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
data, err := ioutil.ReadAll(f)
|
data, err := io.ReadAll(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) {
|
|||||||
return wd, err
|
return wd, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// RootDir returns the absolute path to the process's root directory (as set by chroot)
|
// RootDir returns the absolute path to the process's root directory (as set by chroot).
|
||||||
func (p Proc) RootDir() (string, error) {
|
func (p Proc) RootDir() (string, error) {
|
||||||
rdir, err := os.Readlink(p.path("root"))
|
rdir, err := os.Readlink(p.path("root"))
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
|
|||||||
|
|
||||||
// Schedstat returns task scheduling information for the process.
|
// Schedstat returns task scheduling information for the process.
|
||||||
func (p Proc) Schedstat() (ProcSchedstat, error) {
|
func (p Proc) Schedstat() (ProcSchedstat, error) {
|
||||||
contents, err := ioutil.ReadFile(p.path("schedstat"))
|
contents, err := os.ReadFile(p.path("schedstat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcSchedstat{}, err
|
return ProcSchedstat{}, err
|
||||||
}
|
}
|
||||||
|
6
src/vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
6
src/vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@ -45,7 +45,7 @@ type Cgroup struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
|
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
|
||||||
// Line format is hierarchyID:[controller1,controller2]:path
|
// Line format is hierarchyID:[controller1,controller2]:path.
|
||||||
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
|||||||
return cgroup, nil
|
return cgroup, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseCgroups reads each line of the /proc/[pid]/cgroup file
|
// parseCgroups reads each line of the /proc/[pid]/cgroup file.
|
||||||
func parseCgroups(data []byte) ([]Cgroup, error) {
|
func parseCgroups(data []byte) ([]Cgroup, error) {
|
||||||
var cgroups []Cgroup
|
var cgroups []Cgroup
|
||||||
scanner := bufio.NewScanner(bytes.NewReader(data))
|
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||||
@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) {
|
|||||||
|
|
||||||
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
|
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
|
||||||
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
||||||
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
// so the len of the returned struct is equal to the number of active hierarchies on this system.
|
||||||
func (p Proc) Cgroups() ([]Cgroup, error) {
|
func (p Proc) Cgroups() ([]Cgroup, error) {
|
||||||
data, err := util.ReadFileNoStat(p.path("cgroup"))
|
data, err := util.ReadFileNoStat(p.path("cgroup"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
98
src/vendor/github.com/prometheus/procfs/proc_cgroups.go
generated
vendored
Normal file
98
src/vendor/github.com/prometheus/procfs/proc_cgroups.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CgroupSummary models one line from /proc/cgroups.
|
||||||
|
// This file contains information about the controllers that are compiled into the kernel.
|
||||||
|
//
|
||||||
|
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
|
||||||
|
type CgroupSummary struct {
|
||||||
|
// The name of the controller. controller is also known as subsystem.
|
||||||
|
SubsysName string
|
||||||
|
// The unique ID of the cgroup hierarchy on which this controller is mounted.
|
||||||
|
Hierarchy int
|
||||||
|
// The number of control groups in this hierarchy using this controller.
|
||||||
|
Cgroups int
|
||||||
|
// This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
|
||||||
|
Enabled int
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCgroupSummary parses each line of the /proc/cgroup file
|
||||||
|
// Line format is `subsys_name hierarchy num_cgroups enabled`.
|
||||||
|
func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
fields := strings.Fields(CgroupSummaryStr)
|
||||||
|
// require at least 4 fields
|
||||||
|
if len(fields) < 4 {
|
||||||
|
return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
CgroupSummary := &CgroupSummary{
|
||||||
|
SubsysName: fields[0],
|
||||||
|
}
|
||||||
|
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse hierarchy ID")
|
||||||
|
}
|
||||||
|
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Cgroup Num")
|
||||||
|
}
|
||||||
|
CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse Enabled")
|
||||||
|
}
|
||||||
|
return CgroupSummary, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCgroupSummary reads each line of the /proc/cgroup file.
|
||||||
|
func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
|
||||||
|
var CgroupSummarys []CgroupSummary
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||||
|
for scanner.Scan() {
|
||||||
|
CgroupSummaryString := scanner.Text()
|
||||||
|
// ignore comment lines
|
||||||
|
if strings.HasPrefix(CgroupSummaryString, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := scanner.Err()
|
||||||
|
return CgroupSummarys, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CgroupSummarys returns information about current /proc/cgroups.
|
||||||
|
func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
|
||||||
|
data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return parseCgroupSummary(data)
|
||||||
|
}
|
2
src/vendor/github.com/prometheus/procfs/proc_environ.go
generated
vendored
2
src/vendor/github.com/prometheus/procfs/proc_environ.go
generated
vendored
@ -19,7 +19,7 @@ import (
|
|||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Environ reads process environments from /proc/<pid>/environ
|
// Environ reads process environments from `/proc/<pid>/environ`.
|
||||||
func (p Proc) Environ() ([]string, error) {
|
func (p Proc) Environ() ([]string, error) {
|
||||||
environments := make([]string, 0)
|
environments := make([]string, 0)
|
||||||
|
|
||||||
|
3
src/vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
3
src/vendor/github.com/prometheus/procfs/proc_fdinfo.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
|||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Regexp variables
|
|
||||||
var (
|
var (
|
||||||
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
|
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
|
||||||
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
|
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
|
||||||
@ -122,7 +121,7 @@ func (p ProcFDInfos) Len() int { return len(p) }
|
|||||||
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||||
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
|
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
|
||||||
|
|
||||||
// InotifyWatchLen returns the total number of inotify watches
|
// InotifyWatchLen returns the total number of inotify watches.
|
||||||
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
|
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
|
||||||
length := 0
|
length := 0
|
||||||
for _, f := range p {
|
for _, f := range p {
|
||||||
|
2
src/vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
2
src/vendor/github.com/prometheus/procfs/proc_limits.go
generated
vendored
@ -79,7 +79,7 @@ var (
|
|||||||
|
|
||||||
// NewLimits returns the current soft limits of the process.
|
// NewLimits returns the current soft limits of the process.
|
||||||
//
|
//
|
||||||
// Deprecated: use p.Limits() instead
|
// Deprecated: Use p.Limits() instead.
|
||||||
func (p Proc) NewLimits() (ProcLimits, error) {
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
||||||
return p.Limits()
|
return p.Limits()
|
||||||
}
|
}
|
||||||
|
12
src/vendor/github.com/prometheus/procfs/proc_maps.go
generated
vendored
12
src/vendor/github.com/prometheus/procfs/proc_maps.go
generated
vendored
@ -11,7 +11,9 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
|
||||||
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||||
|
// +build !js
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
@ -25,7 +27,7 @@ import (
|
|||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
|
// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
|
||||||
type ProcMapPermissions struct {
|
type ProcMapPermissions struct {
|
||||||
// mapping has the [R]ead flag set
|
// mapping has the [R]ead flag set
|
||||||
Read bool
|
Read bool
|
||||||
@ -39,8 +41,8 @@ type ProcMapPermissions struct {
|
|||||||
Private bool
|
Private bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcMap contains the process memory-mappings of the process,
|
// ProcMap contains the process memory-mappings of the process
|
||||||
// read from /proc/[pid]/maps
|
// read from `/proc/[pid]/maps`.
|
||||||
type ProcMap struct {
|
type ProcMap struct {
|
||||||
// The start address of current mapping.
|
// The start address of current mapping.
|
||||||
StartAddr uintptr
|
StartAddr uintptr
|
||||||
@ -79,7 +81,7 @@ func parseDevice(s string) (uint64, error) {
|
|||||||
return unix.Mkdev(uint32(major), uint32(minor)), nil
|
return unix.Mkdev(uint32(major), uint32(minor)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseAddress just converts a hex-string to a uintptr
|
// parseAddress converts a hex-string to a uintptr.
|
||||||
func parseAddress(s string) (uintptr, error) {
|
func parseAddress(s string) (uintptr, error) {
|
||||||
a, err := strconv.ParseUint(s, 16, 0)
|
a, err := strconv.ParseUint(s, 16, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -89,7 +91,7 @@ func parseAddress(s string) (uintptr, error) {
|
|||||||
return uintptr(a), nil
|
return uintptr(a), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseAddresses parses the start-end address
|
// parseAddresses parses the start-end address.
|
||||||
func parseAddresses(s string) (uintptr, uintptr, error) {
|
func parseAddresses(s string) (uintptr, uintptr, error) {
|
||||||
toks := strings.Split(s, "-")
|
toks := strings.Split(s, "-")
|
||||||
if len(toks) < 2 {
|
if len(toks) < 2 {
|
||||||
|
440
src/vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
Normal file
440
src/vendor/github.com/prometheus/procfs/proc_netstat.go
generated
vendored
Normal file
@ -0,0 +1,440 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcNetstat models the content of /proc/<pid>/net/netstat.
|
||||||
|
type ProcNetstat struct {
|
||||||
|
// The process ID.
|
||||||
|
PID int
|
||||||
|
TcpExt
|
||||||
|
IpExt
|
||||||
|
}
|
||||||
|
|
||||||
|
type TcpExt struct { // nolint:revive
|
||||||
|
SyncookiesSent float64
|
||||||
|
SyncookiesRecv float64
|
||||||
|
SyncookiesFailed float64
|
||||||
|
EmbryonicRsts float64
|
||||||
|
PruneCalled float64
|
||||||
|
RcvPruned float64
|
||||||
|
OfoPruned float64
|
||||||
|
OutOfWindowIcmps float64
|
||||||
|
LockDroppedIcmps float64
|
||||||
|
ArpFilter float64
|
||||||
|
TW float64
|
||||||
|
TWRecycled float64
|
||||||
|
TWKilled float64
|
||||||
|
PAWSActive float64
|
||||||
|
PAWSEstab float64
|
||||||
|
DelayedACKs float64
|
||||||
|
DelayedACKLocked float64
|
||||||
|
DelayedACKLost float64
|
||||||
|
ListenOverflows float64
|
||||||
|
ListenDrops float64
|
||||||
|
TCPHPHits float64
|
||||||
|
TCPPureAcks float64
|
||||||
|
TCPHPAcks float64
|
||||||
|
TCPRenoRecovery float64
|
||||||
|
TCPSackRecovery float64
|
||||||
|
TCPSACKReneging float64
|
||||||
|
TCPSACKReorder float64
|
||||||
|
TCPRenoReorder float64
|
||||||
|
TCPTSReorder float64
|
||||||
|
TCPFullUndo float64
|
||||||
|
TCPPartialUndo float64
|
||||||
|
TCPDSACKUndo float64
|
||||||
|
TCPLossUndo float64
|
||||||
|
TCPLostRetransmit float64
|
||||||
|
TCPRenoFailures float64
|
||||||
|
TCPSackFailures float64
|
||||||
|
TCPLossFailures float64
|
||||||
|
TCPFastRetrans float64
|
||||||
|
TCPSlowStartRetrans float64
|
||||||
|
TCPTimeouts float64
|
||||||
|
TCPLossProbes float64
|
||||||
|
TCPLossProbeRecovery float64
|
||||||
|
TCPRenoRecoveryFail float64
|
||||||
|
TCPSackRecoveryFail float64
|
||||||
|
TCPRcvCollapsed float64
|
||||||
|
TCPDSACKOldSent float64
|
||||||
|
TCPDSACKOfoSent float64
|
||||||
|
TCPDSACKRecv float64
|
||||||
|
TCPDSACKOfoRecv float64
|
||||||
|
TCPAbortOnData float64
|
||||||
|
TCPAbortOnClose float64
|
||||||
|
TCPAbortOnMemory float64
|
||||||
|
TCPAbortOnTimeout float64
|
||||||
|
TCPAbortOnLinger float64
|
||||||
|
TCPAbortFailed float64
|
||||||
|
TCPMemoryPressures float64
|
||||||
|
TCPMemoryPressuresChrono float64
|
||||||
|
TCPSACKDiscard float64
|
||||||
|
TCPDSACKIgnoredOld float64
|
||||||
|
TCPDSACKIgnoredNoUndo float64
|
||||||
|
TCPSpuriousRTOs float64
|
||||||
|
TCPMD5NotFound float64
|
||||||
|
TCPMD5Unexpected float64
|
||||||
|
TCPMD5Failure float64
|
||||||
|
TCPSackShifted float64
|
||||||
|
TCPSackMerged float64
|
||||||
|
TCPSackShiftFallback float64
|
||||||
|
TCPBacklogDrop float64
|
||||||
|
PFMemallocDrop float64
|
||||||
|
TCPMinTTLDrop float64
|
||||||
|
TCPDeferAcceptDrop float64
|
||||||
|
IPReversePathFilter float64
|
||||||
|
TCPTimeWaitOverflow float64
|
||||||
|
TCPReqQFullDoCookies float64
|
||||||
|
TCPReqQFullDrop float64
|
||||||
|
TCPRetransFail float64
|
||||||
|
TCPRcvCoalesce float64
|
||||||
|
TCPOFOQueue float64
|
||||||
|
TCPOFODrop float64
|
||||||
|
TCPOFOMerge float64
|
||||||
|
TCPChallengeACK float64
|
||||||
|
TCPSYNChallenge float64
|
||||||
|
TCPFastOpenActive float64
|
||||||
|
TCPFastOpenActiveFail float64
|
||||||
|
TCPFastOpenPassive float64
|
||||||
|
TCPFastOpenPassiveFail float64
|
||||||
|
TCPFastOpenListenOverflow float64
|
||||||
|
TCPFastOpenCookieReqd float64
|
||||||
|
TCPFastOpenBlackhole float64
|
||||||
|
TCPSpuriousRtxHostQueues float64
|
||||||
|
BusyPollRxPackets float64
|
||||||
|
TCPAutoCorking float64
|
||||||
|
TCPFromZeroWindowAdv float64
|
||||||
|
TCPToZeroWindowAdv float64
|
||||||
|
TCPWantZeroWindowAdv float64
|
||||||
|
TCPSynRetrans float64
|
||||||
|
TCPOrigDataSent float64
|
||||||
|
TCPHystartTrainDetect float64
|
||||||
|
TCPHystartTrainCwnd float64
|
||||||
|
TCPHystartDelayDetect float64
|
||||||
|
TCPHystartDelayCwnd float64
|
||||||
|
TCPACKSkippedSynRecv float64
|
||||||
|
TCPACKSkippedPAWS float64
|
||||||
|
TCPACKSkippedSeq float64
|
||||||
|
TCPACKSkippedFinWait2 float64
|
||||||
|
TCPACKSkippedTimeWait float64
|
||||||
|
TCPACKSkippedChallenge float64
|
||||||
|
TCPWinProbe float64
|
||||||
|
TCPKeepAlive float64
|
||||||
|
TCPMTUPFail float64
|
||||||
|
TCPMTUPSuccess float64
|
||||||
|
TCPWqueueTooBig float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type IpExt struct { // nolint:revive
|
||||||
|
InNoRoutes float64
|
||||||
|
InTruncatedPkts float64
|
||||||
|
InMcastPkts float64
|
||||||
|
OutMcastPkts float64
|
||||||
|
InBcastPkts float64
|
||||||
|
OutBcastPkts float64
|
||||||
|
InOctets float64
|
||||||
|
OutOctets float64
|
||||||
|
InMcastOctets float64
|
||||||
|
OutMcastOctets float64
|
||||||
|
InBcastOctets float64
|
||||||
|
OutBcastOctets float64
|
||||||
|
InCsumErrors float64
|
||||||
|
InNoECTPkts float64
|
||||||
|
InECT1Pkts float64
|
||||||
|
InECT0Pkts float64
|
||||||
|
InCEPkts float64
|
||||||
|
ReasmOverlaps float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Proc) Netstat() (ProcNetstat, error) {
|
||||||
|
filename := p.path("net/netstat")
|
||||||
|
data, err := util.ReadFileNoStat(filename)
|
||||||
|
if err != nil {
|
||||||
|
return ProcNetstat{PID: p.PID}, err
|
||||||
|
}
|
||||||
|
procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
|
||||||
|
procNetstat.PID = p.PID
|
||||||
|
return procNetstat, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNetstat parses the metrics from proc/<pid>/net/netstat file
|
||||||
|
// and returns a ProcNetstat structure.
|
||||||
|
func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
procNetstat = ProcNetstat{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
nameParts := strings.Split(scanner.Text(), " ")
|
||||||
|
scanner.Scan()
|
||||||
|
valueParts := strings.Split(scanner.Text(), " ")
|
||||||
|
// Remove trailing :.
|
||||||
|
protocol := strings.TrimSuffix(nameParts[0], ":")
|
||||||
|
if len(nameParts) != len(valueParts) {
|
||||||
|
return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s",
|
||||||
|
fileName, protocol)
|
||||||
|
}
|
||||||
|
for i := 1; i < len(nameParts); i++ {
|
||||||
|
value, err := strconv.ParseFloat(valueParts[i], 64)
|
||||||
|
if err != nil {
|
||||||
|
return procNetstat, err
|
||||||
|
}
|
||||||
|
key := nameParts[i]
|
||||||
|
|
||||||
|
switch protocol {
|
||||||
|
case "TcpExt":
|
||||||
|
switch key {
|
||||||
|
case "SyncookiesSent":
|
||||||
|
procNetstat.TcpExt.SyncookiesSent = value
|
||||||
|
case "SyncookiesRecv":
|
||||||
|
procNetstat.TcpExt.SyncookiesRecv = value
|
||||||
|
case "SyncookiesFailed":
|
||||||
|
procNetstat.TcpExt.SyncookiesFailed = value
|
||||||
|
case "EmbryonicRsts":
|
||||||
|
procNetstat.TcpExt.EmbryonicRsts = value
|
||||||
|
case "PruneCalled":
|
||||||
|
procNetstat.TcpExt.PruneCalled = value
|
||||||
|
case "RcvPruned":
|
||||||
|
procNetstat.TcpExt.RcvPruned = value
|
||||||
|
case "OfoPruned":
|
||||||
|
procNetstat.TcpExt.OfoPruned = value
|
||||||
|
case "OutOfWindowIcmps":
|
||||||
|
procNetstat.TcpExt.OutOfWindowIcmps = value
|
||||||
|
case "LockDroppedIcmps":
|
||||||
|
procNetstat.TcpExt.LockDroppedIcmps = value
|
||||||
|
case "ArpFilter":
|
||||||
|
procNetstat.TcpExt.ArpFilter = value
|
||||||
|
case "TW":
|
||||||
|
procNetstat.TcpExt.TW = value
|
||||||
|
case "TWRecycled":
|
||||||
|
procNetstat.TcpExt.TWRecycled = value
|
||||||
|
case "TWKilled":
|
||||||
|
procNetstat.TcpExt.TWKilled = value
|
||||||
|
case "PAWSActive":
|
||||||
|
procNetstat.TcpExt.PAWSActive = value
|
||||||
|
case "PAWSEstab":
|
||||||
|
procNetstat.TcpExt.PAWSEstab = value
|
||||||
|
case "DelayedACKs":
|
||||||
|
procNetstat.TcpExt.DelayedACKs = value
|
||||||
|
case "DelayedACKLocked":
|
||||||
|
procNetstat.TcpExt.DelayedACKLocked = value
|
||||||
|
case "DelayedACKLost":
|
||||||
|
procNetstat.TcpExt.DelayedACKLost = value
|
||||||
|
case "ListenOverflows":
|
||||||
|
procNetstat.TcpExt.ListenOverflows = value
|
||||||
|
case "ListenDrops":
|
||||||
|
procNetstat.TcpExt.ListenDrops = value
|
||||||
|
case "TCPHPHits":
|
||||||
|
procNetstat.TcpExt.TCPHPHits = value
|
||||||
|
case "TCPPureAcks":
|
||||||
|
procNetstat.TcpExt.TCPPureAcks = value
|
||||||
|
case "TCPHPAcks":
|
||||||
|
procNetstat.TcpExt.TCPHPAcks = value
|
||||||
|
case "TCPRenoRecovery":
|
||||||
|
procNetstat.TcpExt.TCPRenoRecovery = value
|
||||||
|
case "TCPSackRecovery":
|
||||||
|
procNetstat.TcpExt.TCPSackRecovery = value
|
||||||
|
case "TCPSACKReneging":
|
||||||
|
procNetstat.TcpExt.TCPSACKReneging = value
|
||||||
|
case "TCPSACKReorder":
|
||||||
|
procNetstat.TcpExt.TCPSACKReorder = value
|
||||||
|
case "TCPRenoReorder":
|
||||||
|
procNetstat.TcpExt.TCPRenoReorder = value
|
||||||
|
case "TCPTSReorder":
|
||||||
|
procNetstat.TcpExt.TCPTSReorder = value
|
||||||
|
case "TCPFullUndo":
|
||||||
|
procNetstat.TcpExt.TCPFullUndo = value
|
||||||
|
case "TCPPartialUndo":
|
||||||
|
procNetstat.TcpExt.TCPPartialUndo = value
|
||||||
|
case "TCPDSACKUndo":
|
||||||
|
procNetstat.TcpExt.TCPDSACKUndo = value
|
||||||
|
case "TCPLossUndo":
|
||||||
|
procNetstat.TcpExt.TCPLossUndo = value
|
||||||
|
case "TCPLostRetransmit":
|
||||||
|
procNetstat.TcpExt.TCPLostRetransmit = value
|
||||||
|
case "TCPRenoFailures":
|
||||||
|
procNetstat.TcpExt.TCPRenoFailures = value
|
||||||
|
case "TCPSackFailures":
|
||||||
|
procNetstat.TcpExt.TCPSackFailures = value
|
||||||
|
case "TCPLossFailures":
|
||||||
|
procNetstat.TcpExt.TCPLossFailures = value
|
||||||
|
case "TCPFastRetrans":
|
||||||
|
procNetstat.TcpExt.TCPFastRetrans = value
|
||||||
|
case "TCPSlowStartRetrans":
|
||||||
|
procNetstat.TcpExt.TCPSlowStartRetrans = value
|
||||||
|
case "TCPTimeouts":
|
||||||
|
procNetstat.TcpExt.TCPTimeouts = value
|
||||||
|
case "TCPLossProbes":
|
||||||
|
procNetstat.TcpExt.TCPLossProbes = value
|
||||||
|
case "TCPLossProbeRecovery":
|
||||||
|
procNetstat.TcpExt.TCPLossProbeRecovery = value
|
||||||
|
case "TCPRenoRecoveryFail":
|
||||||
|
procNetstat.TcpExt.TCPRenoRecoveryFail = value
|
||||||
|
case "TCPSackRecoveryFail":
|
||||||
|
procNetstat.TcpExt.TCPSackRecoveryFail = value
|
||||||
|
case "TCPRcvCollapsed":
|
||||||
|
procNetstat.TcpExt.TCPRcvCollapsed = value
|
||||||
|
case "TCPDSACKOldSent":
|
||||||
|
procNetstat.TcpExt.TCPDSACKOldSent = value
|
||||||
|
case "TCPDSACKOfoSent":
|
||||||
|
procNetstat.TcpExt.TCPDSACKOfoSent = value
|
||||||
|
case "TCPDSACKRecv":
|
||||||
|
procNetstat.TcpExt.TCPDSACKRecv = value
|
||||||
|
case "TCPDSACKOfoRecv":
|
||||||
|
procNetstat.TcpExt.TCPDSACKOfoRecv = value
|
||||||
|
case "TCPAbortOnData":
|
||||||
|
procNetstat.TcpExt.TCPAbortOnData = value
|
||||||
|
case "TCPAbortOnClose":
|
||||||
|
procNetstat.TcpExt.TCPAbortOnClose = value
|
||||||
|
case "TCPDeferAcceptDrop":
|
||||||
|
procNetstat.TcpExt.TCPDeferAcceptDrop = value
|
||||||
|
case "IPReversePathFilter":
|
||||||
|
procNetstat.TcpExt.IPReversePathFilter = value
|
||||||
|
case "TCPTimeWaitOverflow":
|
||||||
|
procNetstat.TcpExt.TCPTimeWaitOverflow = value
|
||||||
|
case "TCPReqQFullDoCookies":
|
||||||
|
procNetstat.TcpExt.TCPReqQFullDoCookies = value
|
||||||
|
case "TCPReqQFullDrop":
|
||||||
|
procNetstat.TcpExt.TCPReqQFullDrop = value
|
||||||
|
case "TCPRetransFail":
|
||||||
|
procNetstat.TcpExt.TCPRetransFail = value
|
||||||
|
case "TCPRcvCoalesce":
|
||||||
|
procNetstat.TcpExt.TCPRcvCoalesce = value
|
||||||
|
case "TCPOFOQueue":
|
||||||
|
procNetstat.TcpExt.TCPOFOQueue = value
|
||||||
|
case "TCPOFODrop":
|
||||||
|
procNetstat.TcpExt.TCPOFODrop = value
|
||||||
|
case "TCPOFOMerge":
|
||||||
|
procNetstat.TcpExt.TCPOFOMerge = value
|
||||||
|
case "TCPChallengeACK":
|
||||||
|
procNetstat.TcpExt.TCPChallengeACK = value
|
||||||
|
case "TCPSYNChallenge":
|
||||||
|
procNetstat.TcpExt.TCPSYNChallenge = value
|
||||||
|
case "TCPFastOpenActive":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenActive = value
|
||||||
|
case "TCPFastOpenActiveFail":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenActiveFail = value
|
||||||
|
case "TCPFastOpenPassive":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenPassive = value
|
||||||
|
case "TCPFastOpenPassiveFail":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenPassiveFail = value
|
||||||
|
case "TCPFastOpenListenOverflow":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenListenOverflow = value
|
||||||
|
case "TCPFastOpenCookieReqd":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenCookieReqd = value
|
||||||
|
case "TCPFastOpenBlackhole":
|
||||||
|
procNetstat.TcpExt.TCPFastOpenBlackhole = value
|
||||||
|
case "TCPSpuriousRtxHostQueues":
|
||||||
|
procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
|
||||||
|
case "BusyPollRxPackets":
|
||||||
|
procNetstat.TcpExt.BusyPollRxPackets = value
|
||||||
|
case "TCPAutoCorking":
|
||||||
|
procNetstat.TcpExt.TCPAutoCorking = value
|
||||||
|
case "TCPFromZeroWindowAdv":
|
||||||
|
procNetstat.TcpExt.TCPFromZeroWindowAdv = value
|
||||||
|
case "TCPToZeroWindowAdv":
|
||||||
|
procNetstat.TcpExt.TCPToZeroWindowAdv = value
|
||||||
|
case "TCPWantZeroWindowAdv":
|
||||||
|
procNetstat.TcpExt.TCPWantZeroWindowAdv = value
|
||||||
|
case "TCPSynRetrans":
|
||||||
|
procNetstat.TcpExt.TCPSynRetrans = value
|
||||||
|
case "TCPOrigDataSent":
|
||||||
|
procNetstat.TcpExt.TCPOrigDataSent = value
|
||||||
|
case "TCPHystartTrainDetect":
|
||||||
|
procNetstat.TcpExt.TCPHystartTrainDetect = value
|
||||||
|
case "TCPHystartTrainCwnd":
|
||||||
|
procNetstat.TcpExt.TCPHystartTrainCwnd = value
|
||||||
|
case "TCPHystartDelayDetect":
|
||||||
|
procNetstat.TcpExt.TCPHystartDelayDetect = value
|
||||||
|
case "TCPHystartDelayCwnd":
|
||||||
|
procNetstat.TcpExt.TCPHystartDelayCwnd = value
|
||||||
|
case "TCPACKSkippedSynRecv":
|
||||||
|
procNetstat.TcpExt.TCPACKSkippedSynRecv = value
|
||||||
|
case "TCPACKSkippedPAWS":
|
||||||
|
procNetstat.TcpExt.TCPACKSkippedPAWS = value
|
||||||
|
case "TCPACKSkippedSeq":
|
||||||
|
procNetstat.TcpExt.TCPACKSkippedSeq = value
|
||||||
|
case "TCPACKSkippedFinWait2":
|
||||||
|
procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
|
||||||
|
case "TCPACKSkippedTimeWait":
|
||||||
|
procNetstat.TcpExt.TCPACKSkippedTimeWait = value
|
||||||
|
case "TCPACKSkippedChallenge":
|
||||||
|
procNetstat.TcpExt.TCPACKSkippedChallenge = value
|
||||||
|
case "TCPWinProbe":
|
||||||
|
procNetstat.TcpExt.TCPWinProbe = value
|
||||||
|
case "TCPKeepAlive":
|
||||||
|
procNetstat.TcpExt.TCPKeepAlive = value
|
||||||
|
case "TCPMTUPFail":
|
||||||
|
procNetstat.TcpExt.TCPMTUPFail = value
|
||||||
|
case "TCPMTUPSuccess":
|
||||||
|
procNetstat.TcpExt.TCPMTUPSuccess = value
|
||||||
|
case "TCPWqueueTooBig":
|
||||||
|
procNetstat.TcpExt.TCPWqueueTooBig = value
|
||||||
|
}
|
||||||
|
case "IpExt":
|
||||||
|
switch key {
|
||||||
|
case "InNoRoutes":
|
||||||
|
procNetstat.IpExt.InNoRoutes = value
|
||||||
|
case "InTruncatedPkts":
|
||||||
|
procNetstat.IpExt.InTruncatedPkts = value
|
||||||
|
case "InMcastPkts":
|
||||||
|
procNetstat.IpExt.InMcastPkts = value
|
||||||
|
case "OutMcastPkts":
|
||||||
|
procNetstat.IpExt.OutMcastPkts = value
|
||||||
|
case "InBcastPkts":
|
||||||
|
procNetstat.IpExt.InBcastPkts = value
|
||||||
|
case "OutBcastPkts":
|
||||||
|
procNetstat.IpExt.OutBcastPkts = value
|
||||||
|
case "InOctets":
|
||||||
|
procNetstat.IpExt.InOctets = value
|
||||||
|
case "OutOctets":
|
||||||
|
procNetstat.IpExt.OutOctets = value
|
||||||
|
case "InMcastOctets":
|
||||||
|
procNetstat.IpExt.InMcastOctets = value
|
||||||
|
case "OutMcastOctets":
|
||||||
|
procNetstat.IpExt.OutMcastOctets = value
|
||||||
|
case "InBcastOctets":
|
||||||
|
procNetstat.IpExt.InBcastOctets = value
|
||||||
|
case "OutBcastOctets":
|
||||||
|
procNetstat.IpExt.OutBcastOctets = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procNetstat.IpExt.InCsumErrors = value
|
||||||
|
case "InNoECTPkts":
|
||||||
|
procNetstat.IpExt.InNoECTPkts = value
|
||||||
|
case "InECT1Pkts":
|
||||||
|
procNetstat.IpExt.InECT1Pkts = value
|
||||||
|
case "InECT0Pkts":
|
||||||
|
procNetstat.IpExt.InECT0Pkts = value
|
||||||
|
case "InCEPkts":
|
||||||
|
procNetstat.IpExt.InCEPkts = value
|
||||||
|
case "ReasmOverlaps":
|
||||||
|
procNetstat.IpExt.ReasmOverlaps = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return procNetstat, scanner.Err()
|
||||||
|
}
|
14
src/vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
14
src/vendor/github.com/prometheus/procfs/proc_psi.go
generated
vendored
@ -35,9 +35,10 @@ import (
|
|||||||
|
|
||||||
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
|
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
|
||||||
|
|
||||||
// PSILine is a single line of values as returned by /proc/pressure/*
|
// PSILine is a single line of values as returned by `/proc/pressure/*`.
|
||||||
// The Avg entries are averages over n seconds, as a percentage
|
//
|
||||||
// The Total line is in microseconds
|
// The Avg entries are averages over n seconds, as a percentage.
|
||||||
|
// The Total line is in microseconds.
|
||||||
type PSILine struct {
|
type PSILine struct {
|
||||||
Avg10 float64
|
Avg10 float64
|
||||||
Avg60 float64
|
Avg60 float64
|
||||||
@ -46,8 +47,9 @@ type PSILine struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PSIStats represent pressure stall information from /proc/pressure/*
|
// PSIStats represent pressure stall information from /proc/pressure/*
|
||||||
// Some indicates the share of time in which at least some tasks are stalled
|
//
|
||||||
// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
|
// "Some" indicates the share of time in which at least some tasks are stalled.
|
||||||
|
// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
|
||||||
type PSIStats struct {
|
type PSIStats struct {
|
||||||
Some *PSILine
|
Some *PSILine
|
||||||
Full *PSILine
|
Full *PSILine
|
||||||
@ -65,7 +67,7 @@ func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
|
|||||||
return parsePSIStats(resource, bytes.NewReader(data))
|
return parsePSIStats(resource, bytes.NewReader(data))
|
||||||
}
|
}
|
||||||
|
|
||||||
// parsePSIStats parses the specified file for pressure stall information
|
// parsePSIStats parses the specified file for pressure stall information.
|
||||||
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
|
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
|
||||||
psiStats := PSIStats{}
|
psiStats := PSIStats{}
|
||||||
|
|
||||||
|
23
src/vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
23
src/vendor/github.com/prometheus/procfs/proc_smaps.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
@ -28,30 +29,30 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// match the header line before each mapped zone in /proc/pid/smaps
|
// match the header line before each mapped zone in `/proc/pid/smaps`.
|
||||||
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
type ProcSMapsRollup struct {
|
type ProcSMapsRollup struct {
|
||||||
// Amount of the mapping that is currently resident in RAM
|
// Amount of the mapping that is currently resident in RAM.
|
||||||
Rss uint64
|
Rss uint64
|
||||||
// Process's proportional share of this mapping
|
// Process's proportional share of this mapping.
|
||||||
Pss uint64
|
Pss uint64
|
||||||
// Size in bytes of clean shared pages
|
// Size in bytes of clean shared pages.
|
||||||
SharedClean uint64
|
SharedClean uint64
|
||||||
// Size in bytes of dirty shared pages
|
// Size in bytes of dirty shared pages.
|
||||||
SharedDirty uint64
|
SharedDirty uint64
|
||||||
// Size in bytes of clean private pages
|
// Size in bytes of clean private pages.
|
||||||
PrivateClean uint64
|
PrivateClean uint64
|
||||||
// Size in bytes of dirty private pages
|
// Size in bytes of dirty private pages.
|
||||||
PrivateDirty uint64
|
PrivateDirty uint64
|
||||||
// Amount of memory currently marked as referenced or accessed
|
// Amount of memory currently marked as referenced or accessed.
|
||||||
Referenced uint64
|
Referenced uint64
|
||||||
// Amount of memory that does not belong to any file
|
// Amount of memory that does not belong to any file.
|
||||||
Anonymous uint64
|
Anonymous uint64
|
||||||
// Amount would-be-anonymous memory currently on swap
|
// Amount would-be-anonymous memory currently on swap.
|
||||||
Swap uint64
|
Swap uint64
|
||||||
// Process's proportional memory on swap
|
// Process's proportional memory on swap.
|
||||||
SwapPss uint64
|
SwapPss uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
353
src/vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
Normal file
353
src/vendor/github.com/prometheus/procfs/proc_snmp.go
generated
vendored
Normal file
@ -0,0 +1,353 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcSnmp models the content of /proc/<pid>/net/snmp.
|
||||||
|
type ProcSnmp struct {
|
||||||
|
// The process ID.
|
||||||
|
PID int
|
||||||
|
Ip
|
||||||
|
Icmp
|
||||||
|
IcmpMsg
|
||||||
|
Tcp
|
||||||
|
Udp
|
||||||
|
UdpLite
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ip struct { // nolint:revive
|
||||||
|
Forwarding float64
|
||||||
|
DefaultTTL float64
|
||||||
|
InReceives float64
|
||||||
|
InHdrErrors float64
|
||||||
|
InAddrErrors float64
|
||||||
|
ForwDatagrams float64
|
||||||
|
InUnknownProtos float64
|
||||||
|
InDiscards float64
|
||||||
|
InDelivers float64
|
||||||
|
OutRequests float64
|
||||||
|
OutDiscards float64
|
||||||
|
OutNoRoutes float64
|
||||||
|
ReasmTimeout float64
|
||||||
|
ReasmReqds float64
|
||||||
|
ReasmOKs float64
|
||||||
|
ReasmFails float64
|
||||||
|
FragOKs float64
|
||||||
|
FragFails float64
|
||||||
|
FragCreates float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Icmp struct {
|
||||||
|
InMsgs float64
|
||||||
|
InErrors float64
|
||||||
|
InCsumErrors float64
|
||||||
|
InDestUnreachs float64
|
||||||
|
InTimeExcds float64
|
||||||
|
InParmProbs float64
|
||||||
|
InSrcQuenchs float64
|
||||||
|
InRedirects float64
|
||||||
|
InEchos float64
|
||||||
|
InEchoReps float64
|
||||||
|
InTimestamps float64
|
||||||
|
InTimestampReps float64
|
||||||
|
InAddrMasks float64
|
||||||
|
InAddrMaskReps float64
|
||||||
|
OutMsgs float64
|
||||||
|
OutErrors float64
|
||||||
|
OutDestUnreachs float64
|
||||||
|
OutTimeExcds float64
|
||||||
|
OutParmProbs float64
|
||||||
|
OutSrcQuenchs float64
|
||||||
|
OutRedirects float64
|
||||||
|
OutEchos float64
|
||||||
|
OutEchoReps float64
|
||||||
|
OutTimestamps float64
|
||||||
|
OutTimestampReps float64
|
||||||
|
OutAddrMasks float64
|
||||||
|
OutAddrMaskReps float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type IcmpMsg struct {
|
||||||
|
InType3 float64
|
||||||
|
OutType3 float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Tcp struct { // nolint:revive
|
||||||
|
RtoAlgorithm float64
|
||||||
|
RtoMin float64
|
||||||
|
RtoMax float64
|
||||||
|
MaxConn float64
|
||||||
|
ActiveOpens float64
|
||||||
|
PassiveOpens float64
|
||||||
|
AttemptFails float64
|
||||||
|
EstabResets float64
|
||||||
|
CurrEstab float64
|
||||||
|
InSegs float64
|
||||||
|
OutSegs float64
|
||||||
|
RetransSegs float64
|
||||||
|
InErrs float64
|
||||||
|
OutRsts float64
|
||||||
|
InCsumErrors float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Udp struct { // nolint:revive
|
||||||
|
InDatagrams float64
|
||||||
|
NoPorts float64
|
||||||
|
InErrors float64
|
||||||
|
OutDatagrams float64
|
||||||
|
RcvbufErrors float64
|
||||||
|
SndbufErrors float64
|
||||||
|
InCsumErrors float64
|
||||||
|
IgnoredMulti float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type UdpLite struct { // nolint:revive
|
||||||
|
InDatagrams float64
|
||||||
|
NoPorts float64
|
||||||
|
InErrors float64
|
||||||
|
OutDatagrams float64
|
||||||
|
RcvbufErrors float64
|
||||||
|
SndbufErrors float64
|
||||||
|
InCsumErrors float64
|
||||||
|
IgnoredMulti float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Proc) Snmp() (ProcSnmp, error) {
|
||||||
|
filename := p.path("net/snmp")
|
||||||
|
data, err := util.ReadFileNoStat(filename)
|
||||||
|
if err != nil {
|
||||||
|
return ProcSnmp{PID: p.PID}, err
|
||||||
|
}
|
||||||
|
procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
|
||||||
|
procSnmp.PID = p.PID
|
||||||
|
return procSnmp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSnmp parses the metrics from proc/<pid>/net/snmp file
|
||||||
|
// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
|
||||||
|
func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
procSnmp = ProcSnmp{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
nameParts := strings.Split(scanner.Text(), " ")
|
||||||
|
scanner.Scan()
|
||||||
|
valueParts := strings.Split(scanner.Text(), " ")
|
||||||
|
// Remove trailing :.
|
||||||
|
protocol := strings.TrimSuffix(nameParts[0], ":")
|
||||||
|
if len(nameParts) != len(valueParts) {
|
||||||
|
return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s",
|
||||||
|
fileName, protocol)
|
||||||
|
}
|
||||||
|
for i := 1; i < len(nameParts); i++ {
|
||||||
|
value, err := strconv.ParseFloat(valueParts[i], 64)
|
||||||
|
if err != nil {
|
||||||
|
return procSnmp, err
|
||||||
|
}
|
||||||
|
key := nameParts[i]
|
||||||
|
|
||||||
|
switch protocol {
|
||||||
|
case "Ip":
|
||||||
|
switch key {
|
||||||
|
case "Forwarding":
|
||||||
|
procSnmp.Ip.Forwarding = value
|
||||||
|
case "DefaultTTL":
|
||||||
|
procSnmp.Ip.DefaultTTL = value
|
||||||
|
case "InReceives":
|
||||||
|
procSnmp.Ip.InReceives = value
|
||||||
|
case "InHdrErrors":
|
||||||
|
procSnmp.Ip.InHdrErrors = value
|
||||||
|
case "InAddrErrors":
|
||||||
|
procSnmp.Ip.InAddrErrors = value
|
||||||
|
case "ForwDatagrams":
|
||||||
|
procSnmp.Ip.ForwDatagrams = value
|
||||||
|
case "InUnknownProtos":
|
||||||
|
procSnmp.Ip.InUnknownProtos = value
|
||||||
|
case "InDiscards":
|
||||||
|
procSnmp.Ip.InDiscards = value
|
||||||
|
case "InDelivers":
|
||||||
|
procSnmp.Ip.InDelivers = value
|
||||||
|
case "OutRequests":
|
||||||
|
procSnmp.Ip.OutRequests = value
|
||||||
|
case "OutDiscards":
|
||||||
|
procSnmp.Ip.OutDiscards = value
|
||||||
|
case "OutNoRoutes":
|
||||||
|
procSnmp.Ip.OutNoRoutes = value
|
||||||
|
case "ReasmTimeout":
|
||||||
|
procSnmp.Ip.ReasmTimeout = value
|
||||||
|
case "ReasmReqds":
|
||||||
|
procSnmp.Ip.ReasmReqds = value
|
||||||
|
case "ReasmOKs":
|
||||||
|
procSnmp.Ip.ReasmOKs = value
|
||||||
|
case "ReasmFails":
|
||||||
|
procSnmp.Ip.ReasmFails = value
|
||||||
|
case "FragOKs":
|
||||||
|
procSnmp.Ip.FragOKs = value
|
||||||
|
case "FragFails":
|
||||||
|
procSnmp.Ip.FragFails = value
|
||||||
|
case "FragCreates":
|
||||||
|
procSnmp.Ip.FragCreates = value
|
||||||
|
}
|
||||||
|
case "Icmp":
|
||||||
|
switch key {
|
||||||
|
case "InMsgs":
|
||||||
|
procSnmp.Icmp.InMsgs = value
|
||||||
|
case "InErrors":
|
||||||
|
procSnmp.Icmp.InErrors = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp.Icmp.InCsumErrors = value
|
||||||
|
case "InDestUnreachs":
|
||||||
|
procSnmp.Icmp.InDestUnreachs = value
|
||||||
|
case "InTimeExcds":
|
||||||
|
procSnmp.Icmp.InTimeExcds = value
|
||||||
|
case "InParmProbs":
|
||||||
|
procSnmp.Icmp.InParmProbs = value
|
||||||
|
case "InSrcQuenchs":
|
||||||
|
procSnmp.Icmp.InSrcQuenchs = value
|
||||||
|
case "InRedirects":
|
||||||
|
procSnmp.Icmp.InRedirects = value
|
||||||
|
case "InEchos":
|
||||||
|
procSnmp.Icmp.InEchos = value
|
||||||
|
case "InEchoReps":
|
||||||
|
procSnmp.Icmp.InEchoReps = value
|
||||||
|
case "InTimestamps":
|
||||||
|
procSnmp.Icmp.InTimestamps = value
|
||||||
|
case "InTimestampReps":
|
||||||
|
procSnmp.Icmp.InTimestampReps = value
|
||||||
|
case "InAddrMasks":
|
||||||
|
procSnmp.Icmp.InAddrMasks = value
|
||||||
|
case "InAddrMaskReps":
|
||||||
|
procSnmp.Icmp.InAddrMaskReps = value
|
||||||
|
case "OutMsgs":
|
||||||
|
procSnmp.Icmp.OutMsgs = value
|
||||||
|
case "OutErrors":
|
||||||
|
procSnmp.Icmp.OutErrors = value
|
||||||
|
case "OutDestUnreachs":
|
||||||
|
procSnmp.Icmp.OutDestUnreachs = value
|
||||||
|
case "OutTimeExcds":
|
||||||
|
procSnmp.Icmp.OutTimeExcds = value
|
||||||
|
case "OutParmProbs":
|
||||||
|
procSnmp.Icmp.OutParmProbs = value
|
||||||
|
case "OutSrcQuenchs":
|
||||||
|
procSnmp.Icmp.OutSrcQuenchs = value
|
||||||
|
case "OutRedirects":
|
||||||
|
procSnmp.Icmp.OutRedirects = value
|
||||||
|
case "OutEchos":
|
||||||
|
procSnmp.Icmp.OutEchos = value
|
||||||
|
case "OutEchoReps":
|
||||||
|
procSnmp.Icmp.OutEchoReps = value
|
||||||
|
case "OutTimestamps":
|
||||||
|
procSnmp.Icmp.OutTimestamps = value
|
||||||
|
case "OutTimestampReps":
|
||||||
|
procSnmp.Icmp.OutTimestampReps = value
|
||||||
|
case "OutAddrMasks":
|
||||||
|
procSnmp.Icmp.OutAddrMasks = value
|
||||||
|
case "OutAddrMaskReps":
|
||||||
|
procSnmp.Icmp.OutAddrMaskReps = value
|
||||||
|
}
|
||||||
|
case "IcmpMsg":
|
||||||
|
switch key {
|
||||||
|
case "InType3":
|
||||||
|
procSnmp.IcmpMsg.InType3 = value
|
||||||
|
case "OutType3":
|
||||||
|
procSnmp.IcmpMsg.OutType3 = value
|
||||||
|
}
|
||||||
|
case "Tcp":
|
||||||
|
switch key {
|
||||||
|
case "RtoAlgorithm":
|
||||||
|
procSnmp.Tcp.RtoAlgorithm = value
|
||||||
|
case "RtoMin":
|
||||||
|
procSnmp.Tcp.RtoMin = value
|
||||||
|
case "RtoMax":
|
||||||
|
procSnmp.Tcp.RtoMax = value
|
||||||
|
case "MaxConn":
|
||||||
|
procSnmp.Tcp.MaxConn = value
|
||||||
|
case "ActiveOpens":
|
||||||
|
procSnmp.Tcp.ActiveOpens = value
|
||||||
|
case "PassiveOpens":
|
||||||
|
procSnmp.Tcp.PassiveOpens = value
|
||||||
|
case "AttemptFails":
|
||||||
|
procSnmp.Tcp.AttemptFails = value
|
||||||
|
case "EstabResets":
|
||||||
|
procSnmp.Tcp.EstabResets = value
|
||||||
|
case "CurrEstab":
|
||||||
|
procSnmp.Tcp.CurrEstab = value
|
||||||
|
case "InSegs":
|
||||||
|
procSnmp.Tcp.InSegs = value
|
||||||
|
case "OutSegs":
|
||||||
|
procSnmp.Tcp.OutSegs = value
|
||||||
|
case "RetransSegs":
|
||||||
|
procSnmp.Tcp.RetransSegs = value
|
||||||
|
case "InErrs":
|
||||||
|
procSnmp.Tcp.InErrs = value
|
||||||
|
case "OutRsts":
|
||||||
|
procSnmp.Tcp.OutRsts = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp.Tcp.InCsumErrors = value
|
||||||
|
}
|
||||||
|
case "Udp":
|
||||||
|
switch key {
|
||||||
|
case "InDatagrams":
|
||||||
|
procSnmp.Udp.InDatagrams = value
|
||||||
|
case "NoPorts":
|
||||||
|
procSnmp.Udp.NoPorts = value
|
||||||
|
case "InErrors":
|
||||||
|
procSnmp.Udp.InErrors = value
|
||||||
|
case "OutDatagrams":
|
||||||
|
procSnmp.Udp.OutDatagrams = value
|
||||||
|
case "RcvbufErrors":
|
||||||
|
procSnmp.Udp.RcvbufErrors = value
|
||||||
|
case "SndbufErrors":
|
||||||
|
procSnmp.Udp.SndbufErrors = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp.Udp.InCsumErrors = value
|
||||||
|
case "IgnoredMulti":
|
||||||
|
procSnmp.Udp.IgnoredMulti = value
|
||||||
|
}
|
||||||
|
case "UdpLite":
|
||||||
|
switch key {
|
||||||
|
case "InDatagrams":
|
||||||
|
procSnmp.UdpLite.InDatagrams = value
|
||||||
|
case "NoPorts":
|
||||||
|
procSnmp.UdpLite.NoPorts = value
|
||||||
|
case "InErrors":
|
||||||
|
procSnmp.UdpLite.InErrors = value
|
||||||
|
case "OutDatagrams":
|
||||||
|
procSnmp.UdpLite.OutDatagrams = value
|
||||||
|
case "RcvbufErrors":
|
||||||
|
procSnmp.UdpLite.RcvbufErrors = value
|
||||||
|
case "SndbufErrors":
|
||||||
|
procSnmp.UdpLite.SndbufErrors = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp.UdpLite.InCsumErrors = value
|
||||||
|
case "IgnoredMulti":
|
||||||
|
procSnmp.UdpLite.IgnoredMulti = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return procSnmp, scanner.Err()
|
||||||
|
}
|
381
src/vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
Normal file
381
src/vendor/github.com/prometheus/procfs/proc_snmp6.go
generated
vendored
Normal file
@ -0,0 +1,381 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProcSnmp6 models the content of /proc/<pid>/net/snmp6.
|
||||||
|
type ProcSnmp6 struct {
|
||||||
|
// The process ID.
|
||||||
|
PID int
|
||||||
|
Ip6
|
||||||
|
Icmp6
|
||||||
|
Udp6
|
||||||
|
UdpLite6
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ip6 struct { // nolint:revive
|
||||||
|
InReceives float64
|
||||||
|
InHdrErrors float64
|
||||||
|
InTooBigErrors float64
|
||||||
|
InNoRoutes float64
|
||||||
|
InAddrErrors float64
|
||||||
|
InUnknownProtos float64
|
||||||
|
InTruncatedPkts float64
|
||||||
|
InDiscards float64
|
||||||
|
InDelivers float64
|
||||||
|
OutForwDatagrams float64
|
||||||
|
OutRequests float64
|
||||||
|
OutDiscards float64
|
||||||
|
OutNoRoutes float64
|
||||||
|
ReasmTimeout float64
|
||||||
|
ReasmReqds float64
|
||||||
|
ReasmOKs float64
|
||||||
|
ReasmFails float64
|
||||||
|
FragOKs float64
|
||||||
|
FragFails float64
|
||||||
|
FragCreates float64
|
||||||
|
InMcastPkts float64
|
||||||
|
OutMcastPkts float64
|
||||||
|
InOctets float64
|
||||||
|
OutOctets float64
|
||||||
|
InMcastOctets float64
|
||||||
|
OutMcastOctets float64
|
||||||
|
InBcastOctets float64
|
||||||
|
OutBcastOctets float64
|
||||||
|
InNoECTPkts float64
|
||||||
|
InECT1Pkts float64
|
||||||
|
InECT0Pkts float64
|
||||||
|
InCEPkts float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Icmp6 struct {
|
||||||
|
InMsgs float64
|
||||||
|
InErrors float64
|
||||||
|
OutMsgs float64
|
||||||
|
OutErrors float64
|
||||||
|
InCsumErrors float64
|
||||||
|
InDestUnreachs float64
|
||||||
|
InPktTooBigs float64
|
||||||
|
InTimeExcds float64
|
||||||
|
InParmProblems float64
|
||||||
|
InEchos float64
|
||||||
|
InEchoReplies float64
|
||||||
|
InGroupMembQueries float64
|
||||||
|
InGroupMembResponses float64
|
||||||
|
InGroupMembReductions float64
|
||||||
|
InRouterSolicits float64
|
||||||
|
InRouterAdvertisements float64
|
||||||
|
InNeighborSolicits float64
|
||||||
|
InNeighborAdvertisements float64
|
||||||
|
InRedirects float64
|
||||||
|
InMLDv2Reports float64
|
||||||
|
OutDestUnreachs float64
|
||||||
|
OutPktTooBigs float64
|
||||||
|
OutTimeExcds float64
|
||||||
|
OutParmProblems float64
|
||||||
|
OutEchos float64
|
||||||
|
OutEchoReplies float64
|
||||||
|
OutGroupMembQueries float64
|
||||||
|
OutGroupMembResponses float64
|
||||||
|
OutGroupMembReductions float64
|
||||||
|
OutRouterSolicits float64
|
||||||
|
OutRouterAdvertisements float64
|
||||||
|
OutNeighborSolicits float64
|
||||||
|
OutNeighborAdvertisements float64
|
||||||
|
OutRedirects float64
|
||||||
|
OutMLDv2Reports float64
|
||||||
|
InType1 float64
|
||||||
|
InType134 float64
|
||||||
|
InType135 float64
|
||||||
|
InType136 float64
|
||||||
|
InType143 float64
|
||||||
|
OutType133 float64
|
||||||
|
OutType135 float64
|
||||||
|
OutType136 float64
|
||||||
|
OutType143 float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Udp6 struct { // nolint:revive
|
||||||
|
InDatagrams float64
|
||||||
|
NoPorts float64
|
||||||
|
InErrors float64
|
||||||
|
OutDatagrams float64
|
||||||
|
RcvbufErrors float64
|
||||||
|
SndbufErrors float64
|
||||||
|
InCsumErrors float64
|
||||||
|
IgnoredMulti float64
|
||||||
|
}
|
||||||
|
|
||||||
|
type UdpLite6 struct { // nolint:revive
|
||||||
|
InDatagrams float64
|
||||||
|
NoPorts float64
|
||||||
|
InErrors float64
|
||||||
|
OutDatagrams float64
|
||||||
|
RcvbufErrors float64
|
||||||
|
SndbufErrors float64
|
||||||
|
InCsumErrors float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Proc) Snmp6() (ProcSnmp6, error) {
|
||||||
|
filename := p.path("net/snmp6")
|
||||||
|
data, err := util.ReadFileNoStat(filename)
|
||||||
|
if err != nil {
|
||||||
|
// On systems with IPv6 disabled, this file won't exist.
|
||||||
|
// Do nothing.
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return ProcSnmp6{PID: p.PID}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return ProcSnmp6{PID: p.PID}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
|
||||||
|
procSnmp6.PID = p.PID
|
||||||
|
return procSnmp6, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSnmp6 parses the metrics from proc/<pid>/net/snmp6 file
|
||||||
|
// and returns a map contains those metrics.
|
||||||
|
func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
|
||||||
|
var (
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
procSnmp6 = ProcSnmp6{}
|
||||||
|
)
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
stat := strings.Fields(scanner.Text())
|
||||||
|
if len(stat) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Expect to have "6" in metric name, skip line otherwise
|
||||||
|
if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
|
||||||
|
protocol := stat[0][:sixIndex+1]
|
||||||
|
key := stat[0][sixIndex+1:]
|
||||||
|
value, err := strconv.ParseFloat(stat[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return procSnmp6, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch protocol {
|
||||||
|
case "Ip6":
|
||||||
|
switch key {
|
||||||
|
case "InReceives":
|
||||||
|
procSnmp6.Ip6.InReceives = value
|
||||||
|
case "InHdrErrors":
|
||||||
|
procSnmp6.Ip6.InHdrErrors = value
|
||||||
|
case "InTooBigErrors":
|
||||||
|
procSnmp6.Ip6.InTooBigErrors = value
|
||||||
|
case "InNoRoutes":
|
||||||
|
procSnmp6.Ip6.InNoRoutes = value
|
||||||
|
case "InAddrErrors":
|
||||||
|
procSnmp6.Ip6.InAddrErrors = value
|
||||||
|
case "InUnknownProtos":
|
||||||
|
procSnmp6.Ip6.InUnknownProtos = value
|
||||||
|
case "InTruncatedPkts":
|
||||||
|
procSnmp6.Ip6.InTruncatedPkts = value
|
||||||
|
case "InDiscards":
|
||||||
|
procSnmp6.Ip6.InDiscards = value
|
||||||
|
case "InDelivers":
|
||||||
|
procSnmp6.Ip6.InDelivers = value
|
||||||
|
case "OutForwDatagrams":
|
||||||
|
procSnmp6.Ip6.OutForwDatagrams = value
|
||||||
|
case "OutRequests":
|
||||||
|
procSnmp6.Ip6.OutRequests = value
|
||||||
|
case "OutDiscards":
|
||||||
|
procSnmp6.Ip6.OutDiscards = value
|
||||||
|
case "OutNoRoutes":
|
||||||
|
procSnmp6.Ip6.OutNoRoutes = value
|
||||||
|
case "ReasmTimeout":
|
||||||
|
procSnmp6.Ip6.ReasmTimeout = value
|
||||||
|
case "ReasmReqds":
|
||||||
|
procSnmp6.Ip6.ReasmReqds = value
|
||||||
|
case "ReasmOKs":
|
||||||
|
procSnmp6.Ip6.ReasmOKs = value
|
||||||
|
case "ReasmFails":
|
||||||
|
procSnmp6.Ip6.ReasmFails = value
|
||||||
|
case "FragOKs":
|
||||||
|
procSnmp6.Ip6.FragOKs = value
|
||||||
|
case "FragFails":
|
||||||
|
procSnmp6.Ip6.FragFails = value
|
||||||
|
case "FragCreates":
|
||||||
|
procSnmp6.Ip6.FragCreates = value
|
||||||
|
case "InMcastPkts":
|
||||||
|
procSnmp6.Ip6.InMcastPkts = value
|
||||||
|
case "OutMcastPkts":
|
||||||
|
procSnmp6.Ip6.OutMcastPkts = value
|
||||||
|
case "InOctets":
|
||||||
|
procSnmp6.Ip6.InOctets = value
|
||||||
|
case "OutOctets":
|
||||||
|
procSnmp6.Ip6.OutOctets = value
|
||||||
|
case "InMcastOctets":
|
||||||
|
procSnmp6.Ip6.InMcastOctets = value
|
||||||
|
case "OutMcastOctets":
|
||||||
|
procSnmp6.Ip6.OutMcastOctets = value
|
||||||
|
case "InBcastOctets":
|
||||||
|
procSnmp6.Ip6.InBcastOctets = value
|
||||||
|
case "OutBcastOctets":
|
||||||
|
procSnmp6.Ip6.OutBcastOctets = value
|
||||||
|
case "InNoECTPkts":
|
||||||
|
procSnmp6.Ip6.InNoECTPkts = value
|
||||||
|
case "InECT1Pkts":
|
||||||
|
procSnmp6.Ip6.InECT1Pkts = value
|
||||||
|
case "InECT0Pkts":
|
||||||
|
procSnmp6.Ip6.InECT0Pkts = value
|
||||||
|
case "InCEPkts":
|
||||||
|
procSnmp6.Ip6.InCEPkts = value
|
||||||
|
|
||||||
|
}
|
||||||
|
case "Icmp6":
|
||||||
|
switch key {
|
||||||
|
case "InMsgs":
|
||||||
|
procSnmp6.Icmp6.InMsgs = value
|
||||||
|
case "InErrors":
|
||||||
|
procSnmp6.Icmp6.InErrors = value
|
||||||
|
case "OutMsgs":
|
||||||
|
procSnmp6.Icmp6.OutMsgs = value
|
||||||
|
case "OutErrors":
|
||||||
|
procSnmp6.Icmp6.OutErrors = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp6.Icmp6.InCsumErrors = value
|
||||||
|
case "InDestUnreachs":
|
||||||
|
procSnmp6.Icmp6.InDestUnreachs = value
|
||||||
|
case "InPktTooBigs":
|
||||||
|
procSnmp6.Icmp6.InPktTooBigs = value
|
||||||
|
case "InTimeExcds":
|
||||||
|
procSnmp6.Icmp6.InTimeExcds = value
|
||||||
|
case "InParmProblems":
|
||||||
|
procSnmp6.Icmp6.InParmProblems = value
|
||||||
|
case "InEchos":
|
||||||
|
procSnmp6.Icmp6.InEchos = value
|
||||||
|
case "InEchoReplies":
|
||||||
|
procSnmp6.Icmp6.InEchoReplies = value
|
||||||
|
case "InGroupMembQueries":
|
||||||
|
procSnmp6.Icmp6.InGroupMembQueries = value
|
||||||
|
case "InGroupMembResponses":
|
||||||
|
procSnmp6.Icmp6.InGroupMembResponses = value
|
||||||
|
case "InGroupMembReductions":
|
||||||
|
procSnmp6.Icmp6.InGroupMembReductions = value
|
||||||
|
case "InRouterSolicits":
|
||||||
|
procSnmp6.Icmp6.InRouterSolicits = value
|
||||||
|
case "InRouterAdvertisements":
|
||||||
|
procSnmp6.Icmp6.InRouterAdvertisements = value
|
||||||
|
case "InNeighborSolicits":
|
||||||
|
procSnmp6.Icmp6.InNeighborSolicits = value
|
||||||
|
case "InNeighborAdvertisements":
|
||||||
|
procSnmp6.Icmp6.InNeighborAdvertisements = value
|
||||||
|
case "InRedirects":
|
||||||
|
procSnmp6.Icmp6.InRedirects = value
|
||||||
|
case "InMLDv2Reports":
|
||||||
|
procSnmp6.Icmp6.InMLDv2Reports = value
|
||||||
|
case "OutDestUnreachs":
|
||||||
|
procSnmp6.Icmp6.OutDestUnreachs = value
|
||||||
|
case "OutPktTooBigs":
|
||||||
|
procSnmp6.Icmp6.OutPktTooBigs = value
|
||||||
|
case "OutTimeExcds":
|
||||||
|
procSnmp6.Icmp6.OutTimeExcds = value
|
||||||
|
case "OutParmProblems":
|
||||||
|
procSnmp6.Icmp6.OutParmProblems = value
|
||||||
|
case "OutEchos":
|
||||||
|
procSnmp6.Icmp6.OutEchos = value
|
||||||
|
case "OutEchoReplies":
|
||||||
|
procSnmp6.Icmp6.OutEchoReplies = value
|
||||||
|
case "OutGroupMembQueries":
|
||||||
|
procSnmp6.Icmp6.OutGroupMembQueries = value
|
||||||
|
case "OutGroupMembResponses":
|
||||||
|
procSnmp6.Icmp6.OutGroupMembResponses = value
|
||||||
|
case "OutGroupMembReductions":
|
||||||
|
procSnmp6.Icmp6.OutGroupMembReductions = value
|
||||||
|
case "OutRouterSolicits":
|
||||||
|
procSnmp6.Icmp6.OutRouterSolicits = value
|
||||||
|
case "OutRouterAdvertisements":
|
||||||
|
procSnmp6.Icmp6.OutRouterAdvertisements = value
|
||||||
|
case "OutNeighborSolicits":
|
||||||
|
procSnmp6.Icmp6.OutNeighborSolicits = value
|
||||||
|
case "OutNeighborAdvertisements":
|
||||||
|
procSnmp6.Icmp6.OutNeighborAdvertisements = value
|
||||||
|
case "OutRedirects":
|
||||||
|
procSnmp6.Icmp6.OutRedirects = value
|
||||||
|
case "OutMLDv2Reports":
|
||||||
|
procSnmp6.Icmp6.OutMLDv2Reports = value
|
||||||
|
case "InType1":
|
||||||
|
procSnmp6.Icmp6.InType1 = value
|
||||||
|
case "InType134":
|
||||||
|
procSnmp6.Icmp6.InType134 = value
|
||||||
|
case "InType135":
|
||||||
|
procSnmp6.Icmp6.InType135 = value
|
||||||
|
case "InType136":
|
||||||
|
procSnmp6.Icmp6.InType136 = value
|
||||||
|
case "InType143":
|
||||||
|
procSnmp6.Icmp6.InType143 = value
|
||||||
|
case "OutType133":
|
||||||
|
procSnmp6.Icmp6.OutType133 = value
|
||||||
|
case "OutType135":
|
||||||
|
procSnmp6.Icmp6.OutType135 = value
|
||||||
|
case "OutType136":
|
||||||
|
procSnmp6.Icmp6.OutType136 = value
|
||||||
|
case "OutType143":
|
||||||
|
procSnmp6.Icmp6.OutType143 = value
|
||||||
|
}
|
||||||
|
case "Udp6":
|
||||||
|
switch key {
|
||||||
|
case "InDatagrams":
|
||||||
|
procSnmp6.Udp6.InDatagrams = value
|
||||||
|
case "NoPorts":
|
||||||
|
procSnmp6.Udp6.NoPorts = value
|
||||||
|
case "InErrors":
|
||||||
|
procSnmp6.Udp6.InErrors = value
|
||||||
|
case "OutDatagrams":
|
||||||
|
procSnmp6.Udp6.OutDatagrams = value
|
||||||
|
case "RcvbufErrors":
|
||||||
|
procSnmp6.Udp6.RcvbufErrors = value
|
||||||
|
case "SndbufErrors":
|
||||||
|
procSnmp6.Udp6.SndbufErrors = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp6.Udp6.InCsumErrors = value
|
||||||
|
case "IgnoredMulti":
|
||||||
|
procSnmp6.Udp6.IgnoredMulti = value
|
||||||
|
}
|
||||||
|
case "UdpLite6":
|
||||||
|
switch key {
|
||||||
|
case "InDatagrams":
|
||||||
|
procSnmp6.UdpLite6.InDatagrams = value
|
||||||
|
case "NoPorts":
|
||||||
|
procSnmp6.UdpLite6.NoPorts = value
|
||||||
|
case "InErrors":
|
||||||
|
procSnmp6.UdpLite6.InErrors = value
|
||||||
|
case "OutDatagrams":
|
||||||
|
procSnmp6.UdpLite6.OutDatagrams = value
|
||||||
|
case "RcvbufErrors":
|
||||||
|
procSnmp6.UdpLite6.RcvbufErrors = value
|
||||||
|
case "SndbufErrors":
|
||||||
|
procSnmp6.UdpLite6.SndbufErrors = value
|
||||||
|
case "InCsumErrors":
|
||||||
|
procSnmp6.UdpLite6.InCsumErrors = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return procSnmp6, scanner.Err()
|
||||||
|
}
|
11
src/vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
11
src/vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -81,10 +81,10 @@ type ProcStat struct {
|
|||||||
STime uint
|
STime uint
|
||||||
// Amount of time that this process's waited-for children have been
|
// Amount of time that this process's waited-for children have been
|
||||||
// scheduled in user mode, measured in clock ticks.
|
// scheduled in user mode, measured in clock ticks.
|
||||||
CUTime uint
|
CUTime int
|
||||||
// Amount of time that this process's waited-for children have been
|
// Amount of time that this process's waited-for children have been
|
||||||
// scheduled in kernel mode, measured in clock ticks.
|
// scheduled in kernel mode, measured in clock ticks.
|
||||||
CSTime uint
|
CSTime int
|
||||||
// For processes running a real-time scheduling policy, this is the negated
|
// For processes running a real-time scheduling policy, this is the negated
|
||||||
// scheduling priority, minus one.
|
// scheduling priority, minus one.
|
||||||
Priority int
|
Priority int
|
||||||
@ -115,7 +115,7 @@ type ProcStat struct {
|
|||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
//
|
//
|
||||||
// Deprecated: use p.Stat() instead
|
// Deprecated: Use p.Stat() instead.
|
||||||
func (p Proc) NewStat() (ProcStat, error) {
|
func (p Proc) NewStat() (ProcStat, error) {
|
||||||
return p.Stat()
|
return p.Stat()
|
||||||
}
|
}
|
||||||
@ -141,6 +141,11 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.Comm = string(data[l+1 : r])
|
s.Comm = string(data[l+1 : r])
|
||||||
|
|
||||||
|
// Check the following resources for the details about the particular stat
|
||||||
|
// fields and their data types:
|
||||||
|
// * https://man7.org/linux/man-pages/man5/proc.5.html
|
||||||
|
// * https://man7.org/linux/man-pages/man3/scanf.3.html
|
||||||
_, err = fmt.Fscan(
|
_, err = fmt.Fscan(
|
||||||
bytes.NewBuffer(data[r+2:]),
|
bytes.NewBuffer(data[r+2:]),
|
||||||
&s.State,
|
&s.State,
|
||||||
|
32
src/vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
32
src/vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -33,37 +33,37 @@ type ProcStatus struct {
|
|||||||
TGID int
|
TGID int
|
||||||
|
|
||||||
// Peak virtual memory size.
|
// Peak virtual memory size.
|
||||||
VmPeak uint64 // nolint:golint
|
VmPeak uint64 // nolint:revive
|
||||||
// Virtual memory size.
|
// Virtual memory size.
|
||||||
VmSize uint64 // nolint:golint
|
VmSize uint64 // nolint:revive
|
||||||
// Locked memory size.
|
// Locked memory size.
|
||||||
VmLck uint64 // nolint:golint
|
VmLck uint64 // nolint:revive
|
||||||
// Pinned memory size.
|
// Pinned memory size.
|
||||||
VmPin uint64 // nolint:golint
|
VmPin uint64 // nolint:revive
|
||||||
// Peak resident set size.
|
// Peak resident set size.
|
||||||
VmHWM uint64 // nolint:golint
|
VmHWM uint64 // nolint:revive
|
||||||
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
// Resident set size (sum of RssAnnon RssFile and RssShmem).
|
||||||
VmRSS uint64 // nolint:golint
|
VmRSS uint64 // nolint:revive
|
||||||
// Size of resident anonymous memory.
|
// Size of resident anonymous memory.
|
||||||
RssAnon uint64 // nolint:golint
|
RssAnon uint64 // nolint:revive
|
||||||
// Size of resident file mappings.
|
// Size of resident file mappings.
|
||||||
RssFile uint64 // nolint:golint
|
RssFile uint64 // nolint:revive
|
||||||
// Size of resident shared memory.
|
// Size of resident shared memory.
|
||||||
RssShmem uint64 // nolint:golint
|
RssShmem uint64 // nolint:revive
|
||||||
// Size of data segments.
|
// Size of data segments.
|
||||||
VmData uint64 // nolint:golint
|
VmData uint64 // nolint:revive
|
||||||
// Size of stack segments.
|
// Size of stack segments.
|
||||||
VmStk uint64 // nolint:golint
|
VmStk uint64 // nolint:revive
|
||||||
// Size of text segments.
|
// Size of text segments.
|
||||||
VmExe uint64 // nolint:golint
|
VmExe uint64 // nolint:revive
|
||||||
// Shared library code size.
|
// Shared library code size.
|
||||||
VmLib uint64 // nolint:golint
|
VmLib uint64 // nolint:revive
|
||||||
// Page table entries size.
|
// Page table entries size.
|
||||||
VmPTE uint64 // nolint:golint
|
VmPTE uint64 // nolint:revive
|
||||||
// Size of second-level page tables.
|
// Size of second-level page tables.
|
||||||
VmPMD uint64 // nolint:golint
|
VmPMD uint64 // nolint:revive
|
||||||
// Swapped-out virtual memory size by anonymous private.
|
// Swapped-out virtual memory size by anonymous private.
|
||||||
VmSwap uint64 // nolint:golint
|
VmSwap uint64 // nolint:revive
|
||||||
// Size of hugetlb memory portions
|
// Size of hugetlb memory portions
|
||||||
HugetlbPages uint64
|
HugetlbPages uint64
|
||||||
|
|
||||||
|
51
src/vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
Normal file
51
src/vendor/github.com/prometheus/procfs/proc_sys.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
func sysctlToPath(sysctl string) string {
|
||||||
|
return strings.Replace(sysctl, ".", "/", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
|
||||||
|
value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return strings.Fields(value), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs FS) SysctlInts(sysctl string) ([]int, error) {
|
||||||
|
fields, err := fs.SysctlStrings(sysctl)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
values := make([]int, len(fields))
|
||||||
|
for i, f := range fields {
|
||||||
|
vp := util.NewValueParser(f)
|
||||||
|
values[i] = vp.Int()
|
||||||
|
if err := vp.Err(); err != nil {
|
||||||
|
return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
6
src/vendor/github.com/prometheus/procfs/schedstat.go
generated
vendored
6
src/vendor/github.com/prometheus/procfs/schedstat.go
generated
vendored
@ -40,7 +40,7 @@ type Schedstat struct {
|
|||||||
CPUs []*SchedstatCPU
|
CPUs []*SchedstatCPU
|
||||||
}
|
}
|
||||||
|
|
||||||
// SchedstatCPU contains the values from one "cpu<N>" line
|
// SchedstatCPU contains the values from one "cpu<N>" line.
|
||||||
type SchedstatCPU struct {
|
type SchedstatCPU struct {
|
||||||
CPUNum string
|
CPUNum string
|
||||||
|
|
||||||
@ -49,14 +49,14 @@ type SchedstatCPU struct {
|
|||||||
RunTimeslices uint64
|
RunTimeslices uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcSchedstat contains the values from /proc/<pid>/schedstat
|
// ProcSchedstat contains the values from `/proc/<pid>/schedstat`.
|
||||||
type ProcSchedstat struct {
|
type ProcSchedstat struct {
|
||||||
RunningNanoseconds uint64
|
RunningNanoseconds uint64
|
||||||
WaitingNanoseconds uint64
|
WaitingNanoseconds uint64
|
||||||
RunTimeslices uint64
|
RunTimeslices uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedstat reads data from /proc/schedstat
|
// Schedstat reads data from `/proc/schedstat`.
|
||||||
func (fs FS) Schedstat() (*Schedstat, error) {
|
func (fs FS) Schedstat() (*Schedstat, error) {
|
||||||
file, err := os.Open(fs.proc.Path("schedstat"))
|
file, err := os.Open(fs.proc.Path("schedstat"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
2
src/vendor/github.com/prometheus/procfs/slab.go
generated
vendored
2
src/vendor/github.com/prometheus/procfs/slab.go
generated
vendored
@ -137,7 +137,7 @@ func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
|
|||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SlabInfo reads data from /proc/slabinfo
|
// SlabInfo reads data from `/proc/slabinfo`.
|
||||||
func (fs FS) SlabInfo() (SlabInfo, error) {
|
func (fs FS) SlabInfo() (SlabInfo, error) {
|
||||||
// TODO: Consider passing options to allow for parsing different
|
// TODO: Consider passing options to allow for parsing different
|
||||||
// slabinfo versions. However, slabinfo 2.1 has been stable since
|
// slabinfo versions. However, slabinfo 2.1 has been stable since
|
||||||
|
160
src/vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
Normal file
160
src/vendor/github.com/prometheus/procfs/softirqs.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Softirqs represents the softirq statistics.
|
||||||
|
type Softirqs struct {
|
||||||
|
Hi []uint64
|
||||||
|
Timer []uint64
|
||||||
|
NetTx []uint64
|
||||||
|
NetRx []uint64
|
||||||
|
Block []uint64
|
||||||
|
IRQPoll []uint64
|
||||||
|
Tasklet []uint64
|
||||||
|
Sched []uint64
|
||||||
|
HRTimer []uint64
|
||||||
|
RCU []uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs FS) Softirqs() (Softirqs, error) {
|
||||||
|
fileName := fs.proc.Path("softirqs")
|
||||||
|
data, err := util.ReadFileNoStat(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return Softirqs{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := bytes.NewReader(data)
|
||||||
|
|
||||||
|
return parseSoftirqs(reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSoftirqs(r io.Reader) (Softirqs, error) {
|
||||||
|
var (
|
||||||
|
softirqs = Softirqs{}
|
||||||
|
scanner = bufio.NewScanner(r)
|
||||||
|
)
|
||||||
|
|
||||||
|
if !scanner.Scan() {
|
||||||
|
return Softirqs{}, fmt.Errorf("softirqs empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
for scanner.Scan() {
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// require at least one cpu
|
||||||
|
if len(parts) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case parts[0] == "HI:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.Hi = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "TIMER:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.Timer = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "NET_TX:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.NetTx = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "NET_RX:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.NetRx = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "BLOCK:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.Block = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "IRQ_POLL:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.IRQPoll = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "TASKLET:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.Tasklet = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "SCHED:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.Sched = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "HRTIMER:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.HRTimer = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case parts[0] == "RCU:":
|
||||||
|
perCPU := parts[1:]
|
||||||
|
softirqs.RCU = make([]uint64, len(perCPU))
|
||||||
|
for i, count := range perCPU {
|
||||||
|
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return softirqs, scanner.Err()
|
||||||
|
}
|
10
src/vendor/github.com/prometheus/procfs/stat.go
generated
vendored
10
src/vendor/github.com/prometheus/procfs/stat.go
generated
vendored
@ -41,7 +41,7 @@ type CPUStat struct {
|
|||||||
|
|
||||||
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
|
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
|
||||||
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
|
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
|
||||||
// It is possible to get per-cpu stats by reading /proc/softirqs
|
// It is possible to get per-cpu stats by reading `/proc/softirqs`.
|
||||||
type SoftIRQStat struct {
|
type SoftIRQStat struct {
|
||||||
Hi uint64
|
Hi uint64
|
||||||
Timer uint64
|
Timer uint64
|
||||||
@ -145,7 +145,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
|
|||||||
// NewStat returns information about current cpu/process statistics.
|
// NewStat returns information about current cpu/process statistics.
|
||||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
//
|
//
|
||||||
// Deprecated: use fs.Stat() instead
|
// Deprecated: Use fs.Stat() instead.
|
||||||
func NewStat() (Stat, error) {
|
func NewStat() (Stat, error) {
|
||||||
fs, err := NewFS(fs.DefaultProcMountPoint)
|
fs, err := NewFS(fs.DefaultProcMountPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -155,15 +155,15 @@ func NewStat() (Stat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns information about current cpu/process statistics.
|
// NewStat returns information about current cpu/process statistics.
|
||||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
//
|
//
|
||||||
// Deprecated: use fs.Stat() instead
|
// Deprecated: Use fs.Stat() instead.
|
||||||
func (fs FS) NewStat() (Stat, error) {
|
func (fs FS) NewStat() (Stat, error) {
|
||||||
return fs.Stat()
|
return fs.Stat()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stat returns information about current cpu/process statistics.
|
// Stat returns information about current cpu/process statistics.
|
||||||
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
|
||||||
func (fs FS) Stat() (Stat, error) {
|
func (fs FS) Stat() (Stat, error) {
|
||||||
fileName := fs.proc.Path("stat")
|
fileName := fs.proc.Path("stat")
|
||||||
data, err := util.ReadFileNoStat(fileName)
|
data, err := util.ReadFileNoStat(fileName)
|
||||||
|
6
src/vendor/github.com/prometheus/procfs/vm.go
generated
vendored
6
src/vendor/github.com/prometheus/procfs/vm.go
generated
vendored
@ -11,13 +11,13 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
@ -29,7 +29,7 @@ import (
|
|||||||
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
// Each setting is exposed as a single file.
|
// Each setting is exposed as a single file.
|
||||||
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
|
||||||
// and numa_zonelist_order (deprecated) which is a string
|
// and numa_zonelist_order (deprecated) which is a string.
|
||||||
type VM struct {
|
type VM struct {
|
||||||
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
|
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
|
||||||
BlockDump *int64 // /proc/sys/vm/block_dump
|
BlockDump *int64 // /proc/sys/vm/block_dump
|
||||||
@ -87,7 +87,7 @@ func (fs FS) VM() (*VM, error) {
|
|||||||
return nil, fmt.Errorf("%s is not a directory", path)
|
return nil, fmt.Errorf("%s is not a directory", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(path)
|
files, err := os.ReadDir(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
5
src/vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
5
src/vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package procfs
|
package procfs
|
||||||
@ -18,7 +19,7 @@ package procfs
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -72,7 +73,7 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
|
|||||||
// structs containing the relevant info. More information available here:
|
// structs containing the relevant info. More information available here:
|
||||||
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
|
||||||
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
|
||||||
data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
|
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
|
return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
|
||||||
}
|
}
|
||||||
|
3
src/vendor/google.golang.org/protobuf/AUTHORS
generated
vendored
3
src/vendor/google.golang.org/protobuf/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code refers to The Go Authors for copyright purposes.
|
|
||||||
# The master list of authors is in the main Go distribution,
|
|
||||||
# visible at https://tip.golang.org/AUTHORS.
|
|
3
src/vendor/google.golang.org/protobuf/CONTRIBUTORS
generated
vendored
3
src/vendor/google.golang.org/protobuf/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
# This source code was written by the Go contributors.
|
|
||||||
# The master list of contributors is in the main Go distribution,
|
|
||||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
|
174
src/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
generated
vendored
174
src/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
generated
vendored
@ -19,7 +19,7 @@ import (
|
|||||||
"google.golang.org/protobuf/internal/pragma"
|
"google.golang.org/protobuf/internal/pragma"
|
||||||
"google.golang.org/protobuf/internal/set"
|
"google.golang.org/protobuf/internal/set"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
|
// unmarshalMessage unmarshals a message into the given protoreflect.Message.
|
||||||
func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
|
func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
|
||||||
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
|
if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
|
||||||
return unmarshal(d, m)
|
return unmarshal(d, m)
|
||||||
}
|
}
|
||||||
@ -159,10 +159,10 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the FieldDescriptor.
|
// Get the FieldDescriptor.
|
||||||
var fd pref.FieldDescriptor
|
var fd protoreflect.FieldDescriptor
|
||||||
if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
|
if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") {
|
||||||
// Only extension names are in [name] format.
|
// Only extension names are in [name] format.
|
||||||
extName := pref.FullName(name[1 : len(name)-1])
|
extName := protoreflect.FullName(name[1 : len(name)-1])
|
||||||
extType, err := d.opts.Resolver.FindExtensionByName(extName)
|
extType, err := d.opts.Resolver.FindExtensionByName(extName)
|
||||||
if err != nil && err != protoregistry.NotFound {
|
if err != nil && err != protoregistry.NotFound {
|
||||||
return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
|
return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err)
|
||||||
@ -240,23 +240,23 @@ func (d decoder) unmarshalMessage(m pref.Message, skipTypeURL bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func isKnownValue(fd pref.FieldDescriptor) bool {
|
func isKnownValue(fd protoreflect.FieldDescriptor) bool {
|
||||||
md := fd.Message()
|
md := fd.Message()
|
||||||
return md != nil && md.FullName() == genid.Value_message_fullname
|
return md != nil && md.FullName() == genid.Value_message_fullname
|
||||||
}
|
}
|
||||||
|
|
||||||
func isNullValue(fd pref.FieldDescriptor) bool {
|
func isNullValue(fd protoreflect.FieldDescriptor) bool {
|
||||||
ed := fd.Enum()
|
ed := fd.Enum()
|
||||||
return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
|
return ed != nil && ed.FullName() == genid.NullValue_enum_fullname
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmarshalSingular unmarshals to the non-repeated field specified
|
// unmarshalSingular unmarshals to the non-repeated field specified
|
||||||
// by the given FieldDescriptor.
|
// by the given FieldDescriptor.
|
||||||
func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) error {
|
func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error {
|
||||||
var val pref.Value
|
var val protoreflect.Value
|
||||||
var err error
|
var err error
|
||||||
switch fd.Kind() {
|
switch fd.Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
val = m.NewField(fd)
|
val = m.NewField(fd)
|
||||||
err = d.unmarshalMessage(val.Message(), false)
|
err = d.unmarshalMessage(val.Message(), false)
|
||||||
default:
|
default:
|
||||||
@ -272,63 +272,63 @@ func (d decoder) unmarshalSingular(m pref.Message, fd pref.FieldDescriptor) erro
|
|||||||
|
|
||||||
// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
|
// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by
|
||||||
// the given FieldDescriptor.
|
// the given FieldDescriptor.
|
||||||
func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
|
func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
const b32 int = 32
|
const b32 int = 32
|
||||||
const b64 int = 64
|
const b64 int = 64
|
||||||
|
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pref.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
kind := fd.Kind()
|
kind := fd.Kind()
|
||||||
switch kind {
|
switch kind {
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
if tok.Kind() == json.Bool {
|
if tok.Kind() == json.Bool {
|
||||||
return pref.ValueOfBool(tok.Bool()), nil
|
return protoreflect.ValueOfBool(tok.Bool()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
if v, ok := unmarshalInt(tok, b32); ok {
|
if v, ok := unmarshalInt(tok, b32); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
if v, ok := unmarshalInt(tok, b64); ok {
|
if v, ok := unmarshalInt(tok, b64); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Uint32Kind, pref.Fixed32Kind:
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
if v, ok := unmarshalUint(tok, b32); ok {
|
if v, ok := unmarshalUint(tok, b32); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Uint64Kind, pref.Fixed64Kind:
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
if v, ok := unmarshalUint(tok, b64); ok {
|
if v, ok := unmarshalUint(tok, b64); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.FloatKind:
|
case protoreflect.FloatKind:
|
||||||
if v, ok := unmarshalFloat(tok, b32); ok {
|
if v, ok := unmarshalFloat(tok, b32); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.DoubleKind:
|
case protoreflect.DoubleKind:
|
||||||
if v, ok := unmarshalFloat(tok, b64); ok {
|
if v, ok := unmarshalFloat(tok, b64); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
if tok.Kind() == json.String {
|
if tok.Kind() == json.String {
|
||||||
return pref.ValueOfString(tok.ParsedString()), nil
|
return protoreflect.ValueOfString(tok.ParsedString()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
if v, ok := unmarshalBytes(tok); ok {
|
if v, ok := unmarshalBytes(tok); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
if v, ok := unmarshalEnum(tok, fd); ok {
|
if v, ok := unmarshalEnum(tok, fd); ok {
|
||||||
return v, nil
|
return v, nil
|
||||||
}
|
}
|
||||||
@ -337,10 +337,10 @@ func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
|
|||||||
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
|
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
|
||||||
}
|
}
|
||||||
|
|
||||||
return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) {
|
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||||
switch tok.Kind() {
|
switch tok.Kind() {
|
||||||
case json.Number:
|
case json.Number:
|
||||||
return getInt(tok, bitSize)
|
return getInt(tok, bitSize)
|
||||||
@ -349,30 +349,30 @@ func unmarshalInt(tok json.Token, bitSize int) (pref.Value, bool) {
|
|||||||
// Decode number from string.
|
// Decode number from string.
|
||||||
s := strings.TrimSpace(tok.ParsedString())
|
s := strings.TrimSpace(tok.ParsedString())
|
||||||
if len(s) != len(tok.ParsedString()) {
|
if len(s) != len(tok.ParsedString()) {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
dec := json.NewDecoder([]byte(s))
|
dec := json.NewDecoder([]byte(s))
|
||||||
tok, err := dec.Read()
|
tok, err := dec.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
return getInt(tok, bitSize)
|
return getInt(tok, bitSize)
|
||||||
}
|
}
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInt(tok json.Token, bitSize int) (pref.Value, bool) {
|
func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||||
n, ok := tok.Int(bitSize)
|
n, ok := tok.Int(bitSize)
|
||||||
if !ok {
|
if !ok {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
if bitSize == 32 {
|
if bitSize == 32 {
|
||||||
return pref.ValueOfInt32(int32(n)), true
|
return protoreflect.ValueOfInt32(int32(n)), true
|
||||||
}
|
}
|
||||||
return pref.ValueOfInt64(n), true
|
return protoreflect.ValueOfInt64(n), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) {
|
func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||||
switch tok.Kind() {
|
switch tok.Kind() {
|
||||||
case json.Number:
|
case json.Number:
|
||||||
return getUint(tok, bitSize)
|
return getUint(tok, bitSize)
|
||||||
@ -381,30 +381,30 @@ func unmarshalUint(tok json.Token, bitSize int) (pref.Value, bool) {
|
|||||||
// Decode number from string.
|
// Decode number from string.
|
||||||
s := strings.TrimSpace(tok.ParsedString())
|
s := strings.TrimSpace(tok.ParsedString())
|
||||||
if len(s) != len(tok.ParsedString()) {
|
if len(s) != len(tok.ParsedString()) {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
dec := json.NewDecoder([]byte(s))
|
dec := json.NewDecoder([]byte(s))
|
||||||
tok, err := dec.Read()
|
tok, err := dec.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
return getUint(tok, bitSize)
|
return getUint(tok, bitSize)
|
||||||
}
|
}
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getUint(tok json.Token, bitSize int) (pref.Value, bool) {
|
func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||||
n, ok := tok.Uint(bitSize)
|
n, ok := tok.Uint(bitSize)
|
||||||
if !ok {
|
if !ok {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
if bitSize == 32 {
|
if bitSize == 32 {
|
||||||
return pref.ValueOfUint32(uint32(n)), true
|
return protoreflect.ValueOfUint32(uint32(n)), true
|
||||||
}
|
}
|
||||||
return pref.ValueOfUint64(n), true
|
return protoreflect.ValueOfUint64(n), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) {
|
func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||||
switch tok.Kind() {
|
switch tok.Kind() {
|
||||||
case json.Number:
|
case json.Number:
|
||||||
return getFloat(tok, bitSize)
|
return getFloat(tok, bitSize)
|
||||||
@ -414,49 +414,49 @@ func unmarshalFloat(tok json.Token, bitSize int) (pref.Value, bool) {
|
|||||||
switch s {
|
switch s {
|
||||||
case "NaN":
|
case "NaN":
|
||||||
if bitSize == 32 {
|
if bitSize == 32 {
|
||||||
return pref.ValueOfFloat32(float32(math.NaN())), true
|
return protoreflect.ValueOfFloat32(float32(math.NaN())), true
|
||||||
}
|
}
|
||||||
return pref.ValueOfFloat64(math.NaN()), true
|
return protoreflect.ValueOfFloat64(math.NaN()), true
|
||||||
case "Infinity":
|
case "Infinity":
|
||||||
if bitSize == 32 {
|
if bitSize == 32 {
|
||||||
return pref.ValueOfFloat32(float32(math.Inf(+1))), true
|
return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true
|
||||||
}
|
}
|
||||||
return pref.ValueOfFloat64(math.Inf(+1)), true
|
return protoreflect.ValueOfFloat64(math.Inf(+1)), true
|
||||||
case "-Infinity":
|
case "-Infinity":
|
||||||
if bitSize == 32 {
|
if bitSize == 32 {
|
||||||
return pref.ValueOfFloat32(float32(math.Inf(-1))), true
|
return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true
|
||||||
}
|
}
|
||||||
return pref.ValueOfFloat64(math.Inf(-1)), true
|
return protoreflect.ValueOfFloat64(math.Inf(-1)), true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode number from string.
|
// Decode number from string.
|
||||||
if len(s) != len(strings.TrimSpace(s)) {
|
if len(s) != len(strings.TrimSpace(s)) {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
dec := json.NewDecoder([]byte(s))
|
dec := json.NewDecoder([]byte(s))
|
||||||
tok, err := dec.Read()
|
tok, err := dec.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
return getFloat(tok, bitSize)
|
return getFloat(tok, bitSize)
|
||||||
}
|
}
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFloat(tok json.Token, bitSize int) (pref.Value, bool) {
|
func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) {
|
||||||
n, ok := tok.Float(bitSize)
|
n, ok := tok.Float(bitSize)
|
||||||
if !ok {
|
if !ok {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
if bitSize == 32 {
|
if bitSize == 32 {
|
||||||
return pref.ValueOfFloat32(float32(n)), true
|
return protoreflect.ValueOfFloat32(float32(n)), true
|
||||||
}
|
}
|
||||||
return pref.ValueOfFloat64(n), true
|
return protoreflect.ValueOfFloat64(n), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalBytes(tok json.Token) (pref.Value, bool) {
|
func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
|
||||||
if tok.Kind() != json.String {
|
if tok.Kind() != json.String {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
s := tok.ParsedString()
|
s := tok.ParsedString()
|
||||||
@ -469,36 +469,36 @@ func unmarshalBytes(tok json.Token) (pref.Value, bool) {
|
|||||||
}
|
}
|
||||||
b, err := enc.DecodeString(s)
|
b, err := enc.DecodeString(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
return pref.ValueOfBytes(b), true
|
return protoreflect.ValueOfBytes(b), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func unmarshalEnum(tok json.Token, fd pref.FieldDescriptor) (pref.Value, bool) {
|
func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
|
||||||
switch tok.Kind() {
|
switch tok.Kind() {
|
||||||
case json.String:
|
case json.String:
|
||||||
// Lookup EnumNumber based on name.
|
// Lookup EnumNumber based on name.
|
||||||
s := tok.ParsedString()
|
s := tok.ParsedString()
|
||||||
if enumVal := fd.Enum().Values().ByName(pref.Name(s)); enumVal != nil {
|
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
|
||||||
return pref.ValueOfEnum(enumVal.Number()), true
|
return protoreflect.ValueOfEnum(enumVal.Number()), true
|
||||||
}
|
}
|
||||||
|
|
||||||
case json.Number:
|
case json.Number:
|
||||||
if n, ok := tok.Int(32); ok {
|
if n, ok := tok.Int(32); ok {
|
||||||
return pref.ValueOfEnum(pref.EnumNumber(n)), true
|
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true
|
||||||
}
|
}
|
||||||
|
|
||||||
case json.Null:
|
case json.Null:
|
||||||
// This is only valid for google.protobuf.NullValue.
|
// This is only valid for google.protobuf.NullValue.
|
||||||
if isNullValue(fd) {
|
if isNullValue(fd) {
|
||||||
return pref.ValueOfEnum(0), true
|
return protoreflect.ValueOfEnum(0), true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return pref.Value{}, false
|
return protoreflect.Value{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
|
func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -508,7 +508,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
switch fd.Kind() {
|
switch fd.Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
for {
|
for {
|
||||||
tok, err := d.Peek()
|
tok, err := d.Peek()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -549,7 +549,7 @@ func (d decoder) unmarshalList(list pref.List, fd pref.FieldDescriptor) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
|
func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -561,18 +561,18 @@ func (d decoder) unmarshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
|
|||||||
// Determine ahead whether map entry is a scalar type or a message type in
|
// Determine ahead whether map entry is a scalar type or a message type in
|
||||||
// order to call the appropriate unmarshalMapValue func inside the for loop
|
// order to call the appropriate unmarshalMapValue func inside the for loop
|
||||||
// below.
|
// below.
|
||||||
var unmarshalMapValue func() (pref.Value, error)
|
var unmarshalMapValue func() (protoreflect.Value, error)
|
||||||
switch fd.MapValue().Kind() {
|
switch fd.MapValue().Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
unmarshalMapValue = func() (pref.Value, error) {
|
unmarshalMapValue = func() (protoreflect.Value, error) {
|
||||||
val := mmap.NewValue()
|
val := mmap.NewValue()
|
||||||
if err := d.unmarshalMessage(val.Message(), false); err != nil {
|
if err := d.unmarshalMessage(val.Message(), false); err != nil {
|
||||||
return pref.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
unmarshalMapValue = func() (pref.Value, error) {
|
unmarshalMapValue = func() (protoreflect.Value, error) {
|
||||||
return d.unmarshalScalar(fd.MapValue())
|
return d.unmarshalScalar(fd.MapValue())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -618,7 +618,7 @@ Loop:
|
|||||||
|
|
||||||
// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
|
// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey.
|
||||||
// A map key type is any integral or string type.
|
// A map key type is any integral or string type.
|
||||||
func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.MapKey, error) {
|
func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) {
|
||||||
const b32 = 32
|
const b32 = 32
|
||||||
const b64 = 64
|
const b64 = 64
|
||||||
const base10 = 10
|
const base10 = 10
|
||||||
@ -626,40 +626,40 @@ func (d decoder) unmarshalMapKey(tok json.Token, fd pref.FieldDescriptor) (pref.
|
|||||||
name := tok.Name()
|
name := tok.Name()
|
||||||
kind := fd.Kind()
|
kind := fd.Kind()
|
||||||
switch kind {
|
switch kind {
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
return pref.ValueOfString(name).MapKey(), nil
|
return protoreflect.ValueOfString(name).MapKey(), nil
|
||||||
|
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
switch name {
|
switch name {
|
||||||
case "true":
|
case "true":
|
||||||
return pref.ValueOfBool(true).MapKey(), nil
|
return protoreflect.ValueOfBool(true).MapKey(), nil
|
||||||
case "false":
|
case "false":
|
||||||
return pref.ValueOfBool(false).MapKey(), nil
|
return protoreflect.ValueOfBool(false).MapKey(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
if n, err := strconv.ParseInt(name, base10, b32); err == nil {
|
if n, err := strconv.ParseInt(name, base10, b32); err == nil {
|
||||||
return pref.ValueOfInt32(int32(n)).MapKey(), nil
|
return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
if n, err := strconv.ParseInt(name, base10, b64); err == nil {
|
if n, err := strconv.ParseInt(name, base10, b64); err == nil {
|
||||||
return pref.ValueOfInt64(int64(n)).MapKey(), nil
|
return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Uint32Kind, pref.Fixed32Kind:
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
if n, err := strconv.ParseUint(name, base10, b32); err == nil {
|
if n, err := strconv.ParseUint(name, base10, b32); err == nil {
|
||||||
return pref.ValueOfUint32(uint32(n)).MapKey(), nil
|
return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Uint64Kind, pref.Fixed64Kind:
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
if n, err := strconv.ParseUint(name, base10, b64); err == nil {
|
if n, err := strconv.ParseUint(name, base10, b64); err == nil {
|
||||||
return pref.ValueOfUint64(uint64(n)).MapKey(), nil
|
return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("invalid kind for map key: %v", kind))
|
panic(fmt.Sprintf("invalid kind for map key: %v", kind))
|
||||||
}
|
}
|
||||||
|
|
||||||
return pref.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
|
return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString())
|
||||||
}
|
}
|
||||||
|
51
src/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
generated
vendored
51
src/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
generated
vendored
@ -18,7 +18,6 @@ import (
|
|||||||
"google.golang.org/protobuf/internal/pragma"
|
"google.golang.org/protobuf/internal/pragma"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -164,8 +163,8 @@ type typeURLFieldRanger struct {
|
|||||||
typeURL string
|
typeURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
|
func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
|
||||||
if !f(typeFieldDesc, pref.ValueOfString(m.typeURL)) {
|
if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
m.FieldRanger.Range(f)
|
m.FieldRanger.Range(f)
|
||||||
@ -173,9 +172,9 @@ func (m typeURLFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool)
|
|||||||
|
|
||||||
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
|
// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
|
||||||
// method to additionally iterate over unpopulated fields.
|
// method to additionally iterate over unpopulated fields.
|
||||||
type unpopulatedFieldRanger struct{ pref.Message }
|
type unpopulatedFieldRanger struct{ protoreflect.Message }
|
||||||
|
|
||||||
func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) bool) {
|
func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
|
||||||
fds := m.Descriptor().Fields()
|
fds := m.Descriptor().Fields()
|
||||||
for i := 0; i < fds.Len(); i++ {
|
for i := 0; i < fds.Len(); i++ {
|
||||||
fd := fds.Get(i)
|
fd := fds.Get(i)
|
||||||
@ -184,10 +183,10 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b
|
|||||||
}
|
}
|
||||||
|
|
||||||
v := m.Get(fd)
|
v := m.Get(fd)
|
||||||
isProto2Scalar := fd.Syntax() == pref.Proto2 && fd.Default().IsValid()
|
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
|
||||||
isSingularMessage := fd.Cardinality() != pref.Repeated && fd.Message() != nil
|
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
|
||||||
if isProto2Scalar || isSingularMessage {
|
if isProto2Scalar || isSingularMessage {
|
||||||
v = pref.Value{} // use invalid value to emit null
|
v = protoreflect.Value{} // use invalid value to emit null
|
||||||
}
|
}
|
||||||
if !f(fd, v) {
|
if !f(fd, v) {
|
||||||
return
|
return
|
||||||
@ -199,7 +198,7 @@ func (m unpopulatedFieldRanger) Range(f func(pref.FieldDescriptor, pref.Value) b
|
|||||||
// marshalMessage marshals the fields in the given protoreflect.Message.
|
// marshalMessage marshals the fields in the given protoreflect.Message.
|
||||||
// If the typeURL is non-empty, then a synthetic "@type" field is injected
|
// If the typeURL is non-empty, then a synthetic "@type" field is injected
|
||||||
// containing the URL as the value.
|
// containing the URL as the value.
|
||||||
func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
|
func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
|
||||||
if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
|
if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) {
|
||||||
return errors.New("no support for proto1 MessageSets")
|
return errors.New("no support for proto1 MessageSets")
|
||||||
}
|
}
|
||||||
@ -220,7 +219,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
order.RangeFields(fields, order.IndexNameFieldOrder, func(fd pref.FieldDescriptor, v pref.Value) bool {
|
order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||||
name := fd.JSONName()
|
name := fd.JSONName()
|
||||||
if e.opts.UseProtoNames {
|
if e.opts.UseProtoNames {
|
||||||
name = fd.TextName()
|
name = fd.TextName()
|
||||||
@ -238,7 +237,7 @@ func (e encoder) marshalMessage(m pref.Message, typeURL string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalValue marshals the given protoreflect.Value.
|
// marshalValue marshals the given protoreflect.Value.
|
||||||
func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
switch {
|
switch {
|
||||||
case fd.IsList():
|
case fd.IsList():
|
||||||
return e.marshalList(val.List(), fd)
|
return e.marshalList(val.List(), fd)
|
||||||
@ -251,44 +250,44 @@ func (e encoder) marshalValue(val pref.Value, fd pref.FieldDescriptor) error {
|
|||||||
|
|
||||||
// marshalSingular marshals the given non-repeated field value. This includes
|
// marshalSingular marshals the given non-repeated field value. This includes
|
||||||
// all scalar types, enums, messages, and groups.
|
// all scalar types, enums, messages, and groups.
|
||||||
func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
if !val.IsValid() {
|
if !val.IsValid() {
|
||||||
e.WriteNull()
|
e.WriteNull()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
switch kind := fd.Kind(); kind {
|
switch kind := fd.Kind(); kind {
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
e.WriteBool(val.Bool())
|
e.WriteBool(val.Bool())
|
||||||
|
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
if e.WriteString(val.String()) != nil {
|
if e.WriteString(val.String()) != nil {
|
||||||
return errors.InvalidUTF8(string(fd.FullName()))
|
return errors.InvalidUTF8(string(fd.FullName()))
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
e.WriteInt(val.Int())
|
e.WriteInt(val.Int())
|
||||||
|
|
||||||
case pref.Uint32Kind, pref.Fixed32Kind:
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
e.WriteUint(val.Uint())
|
e.WriteUint(val.Uint())
|
||||||
|
|
||||||
case pref.Int64Kind, pref.Sint64Kind, pref.Uint64Kind,
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind,
|
||||||
pref.Sfixed64Kind, pref.Fixed64Kind:
|
protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind:
|
||||||
// 64-bit integers are written out as JSON string.
|
// 64-bit integers are written out as JSON string.
|
||||||
e.WriteString(val.String())
|
e.WriteString(val.String())
|
||||||
|
|
||||||
case pref.FloatKind:
|
case protoreflect.FloatKind:
|
||||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||||
e.WriteFloat(val.Float(), 32)
|
e.WriteFloat(val.Float(), 32)
|
||||||
|
|
||||||
case pref.DoubleKind:
|
case protoreflect.DoubleKind:
|
||||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||||
e.WriteFloat(val.Float(), 64)
|
e.WriteFloat(val.Float(), 64)
|
||||||
|
|
||||||
case pref.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
|
e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes()))
|
||||||
|
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
if fd.Enum().FullName() == genid.NullValue_enum_fullname {
|
if fd.Enum().FullName() == genid.NullValue_enum_fullname {
|
||||||
e.WriteNull()
|
e.WriteNull()
|
||||||
} else {
|
} else {
|
||||||
@ -300,7 +299,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
if err := e.marshalMessage(val.Message(), ""); err != nil {
|
if err := e.marshalMessage(val.Message(), ""); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -312,7 +311,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalList marshals the given protoreflect.List.
|
// marshalList marshals the given protoreflect.List.
|
||||||
func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error {
|
||||||
e.StartArray()
|
e.StartArray()
|
||||||
defer e.EndArray()
|
defer e.EndArray()
|
||||||
|
|
||||||
@ -326,12 +325,12 @@ func (e encoder) marshalList(list pref.List, fd pref.FieldDescriptor) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalMap marshals given protoreflect.Map.
|
// marshalMap marshals given protoreflect.Map.
|
||||||
func (e encoder) marshalMap(mmap pref.Map, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
|
||||||
e.StartObject()
|
e.StartObject()
|
||||||
defer e.EndObject()
|
defer e.EndObject()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
order.RangeEntries(mmap, order.GenericKeyOrder, func(k pref.MapKey, v pref.Value) bool {
|
order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool {
|
||||||
if err = e.WriteName(k.String()); err != nil {
|
if err = e.WriteName(k.String()); err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
76
src/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
generated
vendored
76
src/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
generated
vendored
@ -17,14 +17,14 @@ import (
|
|||||||
"google.golang.org/protobuf/internal/genid"
|
"google.golang.org/protobuf/internal/genid"
|
||||||
"google.golang.org/protobuf/internal/strs"
|
"google.golang.org/protobuf/internal/strs"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
type marshalFunc func(encoder, pref.Message) error
|
type marshalFunc func(encoder, protoreflect.Message) error
|
||||||
|
|
||||||
// wellKnownTypeMarshaler returns a marshal function if the message type
|
// wellKnownTypeMarshaler returns a marshal function if the message type
|
||||||
// has specialized serialization behavior. It returns nil otherwise.
|
// has specialized serialization behavior. It returns nil otherwise.
|
||||||
func wellKnownTypeMarshaler(name pref.FullName) marshalFunc {
|
func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc {
|
||||||
if name.Parent() == genid.GoogleProtobuf_package {
|
if name.Parent() == genid.GoogleProtobuf_package {
|
||||||
switch name.Name() {
|
switch name.Name() {
|
||||||
case genid.Any_message_name:
|
case genid.Any_message_name:
|
||||||
@ -58,11 +58,11 @@ func wellKnownTypeMarshaler(name pref.FullName) marshalFunc {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type unmarshalFunc func(decoder, pref.Message) error
|
type unmarshalFunc func(decoder, protoreflect.Message) error
|
||||||
|
|
||||||
// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
|
// wellKnownTypeUnmarshaler returns a unmarshal function if the message type
|
||||||
// has specialized serialization behavior. It returns nil otherwise.
|
// has specialized serialization behavior. It returns nil otherwise.
|
||||||
func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc {
|
func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc {
|
||||||
if name.Parent() == genid.GoogleProtobuf_package {
|
if name.Parent() == genid.GoogleProtobuf_package {
|
||||||
switch name.Name() {
|
switch name.Name() {
|
||||||
case genid.Any_message_name:
|
case genid.Any_message_name:
|
||||||
@ -102,7 +102,7 @@ func wellKnownTypeUnmarshaler(name pref.FullName) unmarshalFunc {
|
|||||||
// custom JSON representation, that representation will be embedded adding a
|
// custom JSON representation, that representation will be embedded adding a
|
||||||
// field `value` which holds the custom JSON in addition to the `@type` field.
|
// field `value` which holds the custom JSON in addition to the `@type` field.
|
||||||
|
|
||||||
func (e encoder) marshalAny(m pref.Message) error {
|
func (e encoder) marshalAny(m protoreflect.Message) error {
|
||||||
fds := m.Descriptor().Fields()
|
fds := m.Descriptor().Fields()
|
||||||
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||||
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||||
@ -163,7 +163,7 @@ func (e encoder) marshalAny(m pref.Message) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalAny(m pref.Message) error {
|
func (d decoder) unmarshalAny(m protoreflect.Message) error {
|
||||||
// Peek to check for json.ObjectOpen to avoid advancing a read.
|
// Peek to check for json.ObjectOpen to avoid advancing a read.
|
||||||
start, err := d.Peek()
|
start, err := d.Peek()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -233,8 +233,8 @@ func (d decoder) unmarshalAny(m pref.Message) error {
|
|||||||
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||||
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||||
|
|
||||||
m.Set(fdType, pref.ValueOfString(typeURL))
|
m.Set(fdType, protoreflect.ValueOfString(typeURL))
|
||||||
m.Set(fdValue, pref.ValueOfBytes(b))
|
m.Set(fdValue, protoreflect.ValueOfBytes(b))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -354,7 +354,7 @@ func (d decoder) skipJSONValue() error {
|
|||||||
|
|
||||||
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
|
// unmarshalAnyValue unmarshals the given custom-type message from the JSON
|
||||||
// object's "value" field.
|
// object's "value" field.
|
||||||
func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) error {
|
func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error {
|
||||||
// Skip ObjectOpen, and start reading the fields.
|
// Skip ObjectOpen, and start reading the fields.
|
||||||
d.Read()
|
d.Read()
|
||||||
|
|
||||||
@ -402,13 +402,13 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m pref.Message) erro
|
|||||||
|
|
||||||
// Wrapper types are encoded as JSON primitives like string, number or boolean.
|
// Wrapper types are encoded as JSON primitives like string, number or boolean.
|
||||||
|
|
||||||
func (e encoder) marshalWrapperType(m pref.Message) error {
|
func (e encoder) marshalWrapperType(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
||||||
val := m.Get(fd)
|
val := m.Get(fd)
|
||||||
return e.marshalSingular(val, fd)
|
return e.marshalSingular(val, fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalWrapperType(m pref.Message) error {
|
func (d decoder) unmarshalWrapperType(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number)
|
||||||
val, err := d.unmarshalScalar(fd)
|
val, err := d.unmarshalScalar(fd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -420,13 +420,13 @@ func (d decoder) unmarshalWrapperType(m pref.Message) error {
|
|||||||
|
|
||||||
// The JSON representation for Empty is an empty JSON object.
|
// The JSON representation for Empty is an empty JSON object.
|
||||||
|
|
||||||
func (e encoder) marshalEmpty(pref.Message) error {
|
func (e encoder) marshalEmpty(protoreflect.Message) error {
|
||||||
e.StartObject()
|
e.StartObject()
|
||||||
e.EndObject()
|
e.EndObject()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalEmpty(pref.Message) error {
|
func (d decoder) unmarshalEmpty(protoreflect.Message) error {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -462,12 +462,12 @@ func (d decoder) unmarshalEmpty(pref.Message) error {
|
|||||||
// The JSON representation for Struct is a JSON object that contains the encoded
|
// The JSON representation for Struct is a JSON object that contains the encoded
|
||||||
// Struct.fields map and follows the serialization rules for a map.
|
// Struct.fields map and follows the serialization rules for a map.
|
||||||
|
|
||||||
func (e encoder) marshalStruct(m pref.Message) error {
|
func (e encoder) marshalStruct(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
||||||
return e.marshalMap(m.Get(fd).Map(), fd)
|
return e.marshalMap(m.Get(fd).Map(), fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalStruct(m pref.Message) error {
|
func (d decoder) unmarshalStruct(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number)
|
||||||
return d.unmarshalMap(m.Mutable(fd).Map(), fd)
|
return d.unmarshalMap(m.Mutable(fd).Map(), fd)
|
||||||
}
|
}
|
||||||
@ -476,12 +476,12 @@ func (d decoder) unmarshalStruct(m pref.Message) error {
|
|||||||
// ListValue.values repeated field and follows the serialization rules for a
|
// ListValue.values repeated field and follows the serialization rules for a
|
||||||
// repeated field.
|
// repeated field.
|
||||||
|
|
||||||
func (e encoder) marshalListValue(m pref.Message) error {
|
func (e encoder) marshalListValue(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
||||||
return e.marshalList(m.Get(fd).List(), fd)
|
return e.marshalList(m.Get(fd).List(), fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalListValue(m pref.Message) error {
|
func (d decoder) unmarshalListValue(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number)
|
||||||
return d.unmarshalList(m.Mutable(fd).List(), fd)
|
return d.unmarshalList(m.Mutable(fd).List(), fd)
|
||||||
}
|
}
|
||||||
@ -490,7 +490,7 @@ func (d decoder) unmarshalListValue(m pref.Message) error {
|
|||||||
// set. Each of the field in the oneof has its own custom serialization rule. A
|
// set. Each of the field in the oneof has its own custom serialization rule. A
|
||||||
// Value message needs to be a oneof field set, else it is an error.
|
// Value message needs to be a oneof field set, else it is an error.
|
||||||
|
|
||||||
func (e encoder) marshalKnownValue(m pref.Message) error {
|
func (e encoder) marshalKnownValue(m protoreflect.Message) error {
|
||||||
od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
|
od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name)
|
||||||
fd := m.WhichOneof(od)
|
fd := m.WhichOneof(od)
|
||||||
if fd == nil {
|
if fd == nil {
|
||||||
@ -504,19 +504,19 @@ func (e encoder) marshalKnownValue(m pref.Message) error {
|
|||||||
return e.marshalSingular(m.Get(fd), fd)
|
return e.marshalSingular(m.Get(fd), fd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalKnownValue(m pref.Message) error {
|
func (d decoder) unmarshalKnownValue(m protoreflect.Message) error {
|
||||||
tok, err := d.Peek()
|
tok, err := d.Peek()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var fd pref.FieldDescriptor
|
var fd protoreflect.FieldDescriptor
|
||||||
var val pref.Value
|
var val protoreflect.Value
|
||||||
switch tok.Kind() {
|
switch tok.Kind() {
|
||||||
case json.Null:
|
case json.Null:
|
||||||
d.Read()
|
d.Read()
|
||||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number)
|
||||||
val = pref.ValueOfEnum(0)
|
val = protoreflect.ValueOfEnum(0)
|
||||||
|
|
||||||
case json.Bool:
|
case json.Bool:
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
@ -524,7 +524,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number)
|
||||||
val = pref.ValueOfBool(tok.Bool())
|
val = protoreflect.ValueOfBool(tok.Bool())
|
||||||
|
|
||||||
case json.Number:
|
case json.Number:
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
@ -550,7 +550,7 @@ func (d decoder) unmarshalKnownValue(m pref.Message) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number)
|
||||||
val = pref.ValueOfString(tok.ParsedString())
|
val = protoreflect.ValueOfString(tok.ParsedString())
|
||||||
|
|
||||||
case json.ObjectOpen:
|
case json.ObjectOpen:
|
||||||
fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
|
fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number)
|
||||||
@ -591,7 +591,7 @@ const (
|
|||||||
maxSecondsInDuration = 315576000000
|
maxSecondsInDuration = 315576000000
|
||||||
)
|
)
|
||||||
|
|
||||||
func (e encoder) marshalDuration(m pref.Message) error {
|
func (e encoder) marshalDuration(m protoreflect.Message) error {
|
||||||
fds := m.Descriptor().Fields()
|
fds := m.Descriptor().Fields()
|
||||||
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
||||||
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
||||||
@ -623,7 +623,7 @@ func (e encoder) marshalDuration(m pref.Message) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalDuration(m pref.Message) error {
|
func (d decoder) unmarshalDuration(m protoreflect.Message) error {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -646,8 +646,8 @@ func (d decoder) unmarshalDuration(m pref.Message) error {
|
|||||||
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number)
|
||||||
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number)
|
||||||
|
|
||||||
m.Set(fdSeconds, pref.ValueOfInt64(secs))
|
m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
|
||||||
m.Set(fdNanos, pref.ValueOfInt32(nanos))
|
m.Set(fdNanos, protoreflect.ValueOfInt32(nanos))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -779,7 +779,7 @@ const (
|
|||||||
minTimestampSeconds = -62135596800
|
minTimestampSeconds = -62135596800
|
||||||
)
|
)
|
||||||
|
|
||||||
func (e encoder) marshalTimestamp(m pref.Message) error {
|
func (e encoder) marshalTimestamp(m protoreflect.Message) error {
|
||||||
fds := m.Descriptor().Fields()
|
fds := m.Descriptor().Fields()
|
||||||
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||||||
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
||||||
@ -805,7 +805,7 @@ func (e encoder) marshalTimestamp(m pref.Message) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalTimestamp(m pref.Message) error {
|
func (d decoder) unmarshalTimestamp(m protoreflect.Message) error {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -829,8 +829,8 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error {
|
|||||||
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number)
|
||||||
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number)
|
||||||
|
|
||||||
m.Set(fdSeconds, pref.ValueOfInt64(secs))
|
m.Set(fdSeconds, protoreflect.ValueOfInt64(secs))
|
||||||
m.Set(fdNanos, pref.ValueOfInt32(int32(t.Nanosecond())))
|
m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond())))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -839,14 +839,14 @@ func (d decoder) unmarshalTimestamp(m pref.Message) error {
|
|||||||
// lower-camel naming conventions. Encoding should fail if the path name would
|
// lower-camel naming conventions. Encoding should fail if the path name would
|
||||||
// end up differently after a round-trip.
|
// end up differently after a round-trip.
|
||||||
|
|
||||||
func (e encoder) marshalFieldMask(m pref.Message) error {
|
func (e encoder) marshalFieldMask(m protoreflect.Message) error {
|
||||||
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
|
fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number)
|
||||||
list := m.Get(fd).List()
|
list := m.Get(fd).List()
|
||||||
paths := make([]string, 0, list.Len())
|
paths := make([]string, 0, list.Len())
|
||||||
|
|
||||||
for i := 0; i < list.Len(); i++ {
|
for i := 0; i < list.Len(); i++ {
|
||||||
s := list.Get(i).String()
|
s := list.Get(i).String()
|
||||||
if !pref.FullName(s).IsValid() {
|
if !protoreflect.FullName(s).IsValid() {
|
||||||
return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
|
return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s)
|
||||||
}
|
}
|
||||||
// Return error if conversion to camelCase is not reversible.
|
// Return error if conversion to camelCase is not reversible.
|
||||||
@ -861,7 +861,7 @@ func (e encoder) marshalFieldMask(m pref.Message) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d decoder) unmarshalFieldMask(m pref.Message) error {
|
func (d decoder) unmarshalFieldMask(m protoreflect.Message) error {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -880,10 +880,10 @@ func (d decoder) unmarshalFieldMask(m pref.Message) error {
|
|||||||
|
|
||||||
for _, s0 := range paths {
|
for _, s0 := range paths {
|
||||||
s := strs.JSONSnakeCase(s0)
|
s := strs.JSONSnakeCase(s0)
|
||||||
if strings.Contains(s0, "_") || !pref.FullName(s).IsValid() {
|
if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() {
|
||||||
return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
|
return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0)
|
||||||
}
|
}
|
||||||
list.Append(pref.ValueOfString(s))
|
list.Append(protoreflect.ValueOfString(s))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
116
src/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
generated
vendored
116
src/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
generated
vendored
@ -17,7 +17,7 @@ import (
|
|||||||
"google.golang.org/protobuf/internal/set"
|
"google.golang.org/protobuf/internal/set"
|
||||||
"google.golang.org/protobuf/internal/strs"
|
"google.golang.org/protobuf/internal/strs"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// unmarshalMessage unmarshals into the given protoreflect.Message.
|
// unmarshalMessage unmarshals into the given protoreflect.Message.
|
||||||
func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
|
func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error {
|
||||||
messageDesc := m.Descriptor()
|
messageDesc := m.Descriptor()
|
||||||
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
||||||
return errors.New("no support for proto1 MessageSets")
|
return errors.New("no support for proto1 MessageSets")
|
||||||
@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the field descriptor.
|
// Resolve the field descriptor.
|
||||||
var name pref.Name
|
var name protoreflect.Name
|
||||||
var fd pref.FieldDescriptor
|
var fd protoreflect.FieldDescriptor
|
||||||
var xt pref.ExtensionType
|
var xt protoreflect.ExtensionType
|
||||||
var xtErr error
|
var xtErr error
|
||||||
var isFieldNumberName bool
|
var isFieldNumberName bool
|
||||||
|
|
||||||
switch tok.NameKind() {
|
switch tok.NameKind() {
|
||||||
case text.IdentName:
|
case text.IdentName:
|
||||||
name = pref.Name(tok.IdentName())
|
name = protoreflect.Name(tok.IdentName())
|
||||||
fd = fieldDescs.ByTextName(string(name))
|
fd = fieldDescs.ByTextName(string(name))
|
||||||
|
|
||||||
case text.TypeName:
|
case text.TypeName:
|
||||||
// Handle extensions only. This code path is not for Any.
|
// Handle extensions only. This code path is not for Any.
|
||||||
xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName()))
|
xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName()))
|
||||||
|
|
||||||
case text.FieldNumber:
|
case text.FieldNumber:
|
||||||
isFieldNumberName = true
|
isFieldNumberName = true
|
||||||
num := pref.FieldNumber(tok.FieldNumber())
|
num := protoreflect.FieldNumber(tok.FieldNumber())
|
||||||
if !num.IsValid() {
|
if !num.IsValid() {
|
||||||
return d.newError(tok.Pos(), "invalid field number: %d", num)
|
return d.newError(tok.Pos(), "invalid field number: %d", num)
|
||||||
}
|
}
|
||||||
@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
|
|||||||
switch {
|
switch {
|
||||||
case fd.IsList():
|
case fd.IsList():
|
||||||
kind := fd.Kind()
|
kind := fd.Kind()
|
||||||
if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
|
if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() {
|
||||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
kind := fd.Kind()
|
kind := fd.Kind()
|
||||||
if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
|
if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() {
|
||||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
|
|||||||
|
|
||||||
// unmarshalSingular unmarshals a non-repeated field value specified by the
|
// unmarshalSingular unmarshals a non-repeated field value specified by the
|
||||||
// given FieldDescriptor.
|
// given FieldDescriptor.
|
||||||
func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
|
func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error {
|
||||||
var val pref.Value
|
var val protoreflect.Value
|
||||||
var err error
|
var err error
|
||||||
switch fd.Kind() {
|
switch fd.Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
val = m.NewField(fd)
|
val = m.NewField(fd)
|
||||||
err = d.unmarshalMessage(val.Message(), true)
|
err = d.unmarshalMessage(val.Message(), true)
|
||||||
default:
|
default:
|
||||||
@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro
|
|||||||
|
|
||||||
// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
|
// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
|
||||||
// given FieldDescriptor.
|
// given FieldDescriptor.
|
||||||
func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
|
func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
|
||||||
tok, err := d.Read()
|
tok, err := d.Read()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pref.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if tok.Kind() != text.Scalar {
|
if tok.Kind() != text.Scalar {
|
||||||
return pref.Value{}, d.unexpectedTokenError(tok)
|
return protoreflect.Value{}, d.unexpectedTokenError(tok)
|
||||||
}
|
}
|
||||||
|
|
||||||
kind := fd.Kind()
|
kind := fd.Kind()
|
||||||
switch kind {
|
switch kind {
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
if b, ok := tok.Bool(); ok {
|
if b, ok := tok.Bool(); ok {
|
||||||
return pref.ValueOfBool(b), nil
|
return protoreflect.ValueOfBool(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
if n, ok := tok.Int32(); ok {
|
if n, ok := tok.Int32(); ok {
|
||||||
return pref.ValueOfInt32(n), nil
|
return protoreflect.ValueOfInt32(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
if n, ok := tok.Int64(); ok {
|
if n, ok := tok.Int64(); ok {
|
||||||
return pref.ValueOfInt64(n), nil
|
return protoreflect.ValueOfInt64(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Uint32Kind, pref.Fixed32Kind:
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
if n, ok := tok.Uint32(); ok {
|
if n, ok := tok.Uint32(); ok {
|
||||||
return pref.ValueOfUint32(n), nil
|
return protoreflect.ValueOfUint32(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.Uint64Kind, pref.Fixed64Kind:
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
if n, ok := tok.Uint64(); ok {
|
if n, ok := tok.Uint64(); ok {
|
||||||
return pref.ValueOfUint64(n), nil
|
return protoreflect.ValueOfUint64(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.FloatKind:
|
case protoreflect.FloatKind:
|
||||||
if n, ok := tok.Float32(); ok {
|
if n, ok := tok.Float32(); ok {
|
||||||
return pref.ValueOfFloat32(n), nil
|
return protoreflect.ValueOfFloat32(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.DoubleKind:
|
case protoreflect.DoubleKind:
|
||||||
if n, ok := tok.Float64(); ok {
|
if n, ok := tok.Float64(); ok {
|
||||||
return pref.ValueOfFloat64(n), nil
|
return protoreflect.ValueOfFloat64(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
if s, ok := tok.String(); ok {
|
if s, ok := tok.String(); ok {
|
||||||
if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
|
if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
|
||||||
return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
|
return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
|
||||||
}
|
}
|
||||||
return pref.ValueOfString(s), nil
|
return protoreflect.ValueOfString(s), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
if b, ok := tok.String(); ok {
|
if b, ok := tok.String(); ok {
|
||||||
return pref.ValueOfBytes([]byte(b)), nil
|
return protoreflect.ValueOfBytes([]byte(b)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
if lit, ok := tok.Enum(); ok {
|
if lit, ok := tok.Enum(); ok {
|
||||||
// Lookup EnumNumber based on name.
|
// Lookup EnumNumber based on name.
|
||||||
if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
|
if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil {
|
||||||
return pref.ValueOfEnum(enumVal.Number()), nil
|
return protoreflect.ValueOfEnum(enumVal.Number()), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if num, ok := tok.Int32(); ok {
|
if num, ok := tok.Int32(); ok {
|
||||||
return pref.ValueOfEnum(pref.EnumNumber(num)), nil
|
return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("invalid scalar kind %v", kind))
|
panic(fmt.Sprintf("invalid scalar kind %v", kind))
|
||||||
}
|
}
|
||||||
|
|
||||||
return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
||||||
}
|
}
|
||||||
|
|
||||||
// unmarshalList unmarshals into given protoreflect.List. A list value can
|
// unmarshalList unmarshals into given protoreflect.List. A list value can
|
||||||
// either be in [] syntax or simply just a single scalar/message value.
|
// either be in [] syntax or simply just a single scalar/message value.
|
||||||
func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
|
func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error {
|
||||||
tok, err := d.Peek()
|
tok, err := d.Peek()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
switch fd.Kind() {
|
switch fd.Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
switch tok.Kind() {
|
switch tok.Kind() {
|
||||||
case text.ListOpen:
|
case text.ListOpen:
|
||||||
d.Read()
|
d.Read()
|
||||||
@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
|
|||||||
|
|
||||||
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
|
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
|
||||||
// textproto message containing {key: <kvalue>, value: <mvalue>}.
|
// textproto message containing {key: <kvalue>, value: <mvalue>}.
|
||||||
func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
|
func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error {
|
||||||
// Determine ahead whether map entry is a scalar type or a message type in
|
// Determine ahead whether map entry is a scalar type or a message type in
|
||||||
// order to call the appropriate unmarshalMapValue func inside
|
// order to call the appropriate unmarshalMapValue func inside
|
||||||
// unmarshalMapEntry.
|
// unmarshalMapEntry.
|
||||||
var unmarshalMapValue func() (pref.Value, error)
|
var unmarshalMapValue func() (protoreflect.Value, error)
|
||||||
switch fd.MapValue().Kind() {
|
switch fd.MapValue().Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
unmarshalMapValue = func() (pref.Value, error) {
|
unmarshalMapValue = func() (protoreflect.Value, error) {
|
||||||
pval := mmap.NewValue()
|
pval := mmap.NewValue()
|
||||||
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
|
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
|
||||||
return pref.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
return pval, nil
|
return pval, nil
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
unmarshalMapValue = func() (pref.Value, error) {
|
unmarshalMapValue = func() (protoreflect.Value, error) {
|
||||||
return d.unmarshalScalar(fd.MapValue())
|
return d.unmarshalScalar(fd.MapValue())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
|
|||||||
|
|
||||||
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
|
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
|
||||||
// textproto message containing {key: <kvalue>, value: <mvalue>}.
|
// textproto message containing {key: <kvalue>, value: <mvalue>}.
|
||||||
func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
|
func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error {
|
||||||
var key pref.MapKey
|
var key protoreflect.MapKey
|
||||||
var pval pref.Value
|
var pval protoreflect.Value
|
||||||
Loop:
|
Loop:
|
||||||
for {
|
for {
|
||||||
// Read field name.
|
// Read field name.
|
||||||
@ -520,7 +520,7 @@ Loop:
|
|||||||
return d.unexpectedTokenError(tok)
|
return d.unexpectedTokenError(tok)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch name := pref.Name(tok.IdentName()); name {
|
switch name := protoreflect.Name(tok.IdentName()); name {
|
||||||
case genid.MapEntry_Key_field_name:
|
case genid.MapEntry_Key_field_name:
|
||||||
if !tok.HasSeparator() {
|
if !tok.HasSeparator() {
|
||||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||||
@ -535,7 +535,7 @@ Loop:
|
|||||||
key = val.MapKey()
|
key = val.MapKey()
|
||||||
|
|
||||||
case genid.MapEntry_Value_field_name:
|
case genid.MapEntry_Value_field_name:
|
||||||
if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
|
if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) {
|
||||||
if !tok.HasSeparator() {
|
if !tok.HasSeparator() {
|
||||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||||
}
|
}
|
||||||
@ -561,7 +561,7 @@ Loop:
|
|||||||
}
|
}
|
||||||
if !pval.IsValid() {
|
if !pval.IsValid() {
|
||||||
switch fd.MapValue().Kind() {
|
switch fd.MapValue().Kind() {
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
// If value field is not set for message/group types, construct an
|
// If value field is not set for message/group types, construct an
|
||||||
// empty one as default.
|
// empty one as default.
|
||||||
pval = mmap.NewValue()
|
pval = mmap.NewValue()
|
||||||
@ -575,7 +575,7 @@ Loop:
|
|||||||
|
|
||||||
// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
|
// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
|
||||||
// or non-expanded form.
|
// or non-expanded form.
|
||||||
func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
|
func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error {
|
||||||
var typeURL string
|
var typeURL string
|
||||||
var bValue []byte
|
var bValue []byte
|
||||||
var seenTypeUrl bool
|
var seenTypeUrl bool
|
||||||
@ -619,7 +619,7 @@ Loop:
|
|||||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch name := pref.Name(tok.IdentName()); name {
|
switch name := protoreflect.Name(tok.IdentName()); name {
|
||||||
case genid.Any_TypeUrl_field_name:
|
case genid.Any_TypeUrl_field_name:
|
||||||
if seenTypeUrl {
|
if seenTypeUrl {
|
||||||
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
|
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
|
||||||
@ -686,10 +686,10 @@ Loop:
|
|||||||
|
|
||||||
fds := m.Descriptor().Fields()
|
fds := m.Descriptor().Fields()
|
||||||
if len(typeURL) > 0 {
|
if len(typeURL) > 0 {
|
||||||
m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL))
|
m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL))
|
||||||
}
|
}
|
||||||
if len(bValue) > 0 {
|
if len(bValue) > 0 {
|
||||||
m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue))
|
m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
39
src/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
generated
vendored
39
src/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
|||||||
"google.golang.org/protobuf/internal/strs"
|
"google.golang.org/protobuf/internal/strs"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -150,7 +149,7 @@ type encoder struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalMessage marshals the given protoreflect.Message.
|
// marshalMessage marshals the given protoreflect.Message.
|
||||||
func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
|
func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error {
|
||||||
messageDesc := m.Descriptor()
|
messageDesc := m.Descriptor()
|
||||||
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
||||||
return errors.New("no support for proto1 MessageSets")
|
return errors.New("no support for proto1 MessageSets")
|
||||||
@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalField marshals the given field with protoreflect.Value.
|
// marshalField marshals the given field with protoreflect.Value.
|
||||||
func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
switch {
|
switch {
|
||||||
case fd.IsList():
|
case fd.IsList():
|
||||||
return e.marshalList(name, val.List(), fd)
|
return e.marshalList(name, val.List(), fd)
|
||||||
@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript
|
|||||||
|
|
||||||
// marshalSingular marshals the given non-repeated field value. This includes
|
// marshalSingular marshals the given non-repeated field value. This includes
|
||||||
// all scalar types, enums, messages, and groups.
|
// all scalar types, enums, messages, and groups.
|
||||||
func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error {
|
||||||
kind := fd.Kind()
|
kind := fd.Kind()
|
||||||
switch kind {
|
switch kind {
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
e.WriteBool(val.Bool())
|
e.WriteBool(val.Bool())
|
||||||
|
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
s := val.String()
|
s := val.String()
|
||||||
if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
|
if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
|
||||||
return errors.InvalidUTF8(string(fd.FullName()))
|
return errors.InvalidUTF8(string(fd.FullName()))
|
||||||
}
|
}
|
||||||
e.WriteString(s)
|
e.WriteString(s)
|
||||||
|
|
||||||
case pref.Int32Kind, pref.Int64Kind,
|
case protoreflect.Int32Kind, protoreflect.Int64Kind,
|
||||||
pref.Sint32Kind, pref.Sint64Kind,
|
protoreflect.Sint32Kind, protoreflect.Sint64Kind,
|
||||||
pref.Sfixed32Kind, pref.Sfixed64Kind:
|
protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
|
||||||
e.WriteInt(val.Int())
|
e.WriteInt(val.Int())
|
||||||
|
|
||||||
case pref.Uint32Kind, pref.Uint64Kind,
|
case protoreflect.Uint32Kind, protoreflect.Uint64Kind,
|
||||||
pref.Fixed32Kind, pref.Fixed64Kind:
|
protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
|
||||||
e.WriteUint(val.Uint())
|
e.WriteUint(val.Uint())
|
||||||
|
|
||||||
case pref.FloatKind:
|
case protoreflect.FloatKind:
|
||||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||||
e.WriteFloat(val.Float(), 32)
|
e.WriteFloat(val.Float(), 32)
|
||||||
|
|
||||||
case pref.DoubleKind:
|
case protoreflect.DoubleKind:
|
||||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||||
e.WriteFloat(val.Float(), 64)
|
e.WriteFloat(val.Float(), 64)
|
||||||
|
|
||||||
case pref.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
e.WriteString(string(val.Bytes()))
|
e.WriteString(string(val.Bytes()))
|
||||||
|
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
num := val.Enum()
|
num := val.Enum()
|
||||||
if desc := fd.Enum().Values().ByNumber(num); desc != nil {
|
if desc := fd.Enum().Values().ByNumber(num); desc != nil {
|
||||||
e.WriteLiteral(string(desc.Name()))
|
e.WriteLiteral(string(desc.Name()))
|
||||||
@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
|
|||||||
e.WriteInt(int64(num))
|
e.WriteInt(int64(num))
|
||||||
}
|
}
|
||||||
|
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
return e.marshalMessage(val.Message(), true)
|
return e.marshalMessage(val.Message(), true)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalList marshals the given protoreflect.List as multiple name-value fields.
|
// marshalList marshals the given protoreflect.List as multiple name-value fields.
|
||||||
func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error {
|
||||||
size := list.Len()
|
size := list.Len()
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
e.WriteName(name)
|
e.WriteName(name)
|
||||||
@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto
|
|||||||
}
|
}
|
||||||
|
|
||||||
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
|
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
|
||||||
func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
|
func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error {
|
||||||
var err error
|
var err error
|
||||||
order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool {
|
order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool {
|
||||||
e.WriteName(name)
|
e.WriteName(name)
|
||||||
e.StartMessage()
|
e.StartMessage()
|
||||||
defer e.EndMessage()
|
defer e.EndMessage()
|
||||||
@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) {
|
|||||||
|
|
||||||
// marshalAny marshals the given google.protobuf.Any message in expanded form.
|
// marshalAny marshals the given google.protobuf.Any message in expanded form.
|
||||||
// It returns true if it was able to marshal, else false.
|
// It returns true if it was able to marshal, else false.
|
||||||
func (e encoder) marshalAny(any pref.Message) bool {
|
func (e encoder) marshalAny(any protoreflect.Message) bool {
|
||||||
// Construct the embedded message.
|
// Construct the embedded message.
|
||||||
fds := any.Descriptor().Fields()
|
fds := any.Descriptor().Fields()
|
||||||
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||||
|
4
src/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
generated
vendored
4
src/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
generated
vendored
@ -516,6 +516,7 @@ func EncodeTag(num Number, typ Type) uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
|
// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
|
||||||
|
//
|
||||||
// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
|
// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
|
||||||
// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
|
// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
|
||||||
func DecodeZigZag(x uint64) int64 {
|
func DecodeZigZag(x uint64) int64 {
|
||||||
@ -523,6 +524,7 @@ func DecodeZigZag(x uint64) int64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
|
// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
|
||||||
|
//
|
||||||
// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
|
// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
|
||||||
// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
|
// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
|
||||||
func EncodeZigZag(x int64) uint64 {
|
func EncodeZigZag(x int64) uint64 {
|
||||||
@ -530,6 +532,7 @@ func EncodeZigZag(x int64) uint64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DecodeBool decodes a uint64 as a bool.
|
// DecodeBool decodes a uint64 as a bool.
|
||||||
|
//
|
||||||
// Input: { 0, 1, 2, …}
|
// Input: { 0, 1, 2, …}
|
||||||
// Output: {false, true, true, …}
|
// Output: {false, true, true, …}
|
||||||
func DecodeBool(x uint64) bool {
|
func DecodeBool(x uint64) bool {
|
||||||
@ -537,6 +540,7 @@ func DecodeBool(x uint64) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EncodeBool encodes a bool as a uint64.
|
// EncodeBool encodes a bool as a uint64.
|
||||||
|
//
|
||||||
// Input: {false, true}
|
// Input: {false, true}
|
||||||
// Output: { 0, 1}
|
// Output: { 0, 1}
|
||||||
func EncodeBool(x bool) uint64 {
|
func EncodeBool(x bool) uint64 {
|
||||||
|
66
src/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
generated
vendored
66
src/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
generated
vendored
@ -14,7 +14,7 @@ import (
|
|||||||
|
|
||||||
"google.golang.org/protobuf/internal/detrand"
|
"google.golang.org/protobuf/internal/detrand"
|
||||||
"google.golang.org/protobuf/internal/pragma"
|
"google.golang.org/protobuf/internal/pragma"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
type list interface {
|
type list interface {
|
||||||
@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
|||||||
if isRoot {
|
if isRoot {
|
||||||
var name string
|
var name string
|
||||||
switch vs.(type) {
|
switch vs.(type) {
|
||||||
case pref.Names:
|
case protoreflect.Names:
|
||||||
name = "Names"
|
name = "Names"
|
||||||
case pref.FieldNumbers:
|
case protoreflect.FieldNumbers:
|
||||||
name = "FieldNumbers"
|
name = "FieldNumbers"
|
||||||
case pref.FieldRanges:
|
case protoreflect.FieldRanges:
|
||||||
name = "FieldRanges"
|
name = "FieldRanges"
|
||||||
case pref.EnumRanges:
|
case protoreflect.EnumRanges:
|
||||||
name = "EnumRanges"
|
name = "EnumRanges"
|
||||||
case pref.FileImports:
|
case protoreflect.FileImports:
|
||||||
name = "FileImports"
|
name = "FileImports"
|
||||||
case pref.Descriptor:
|
case protoreflect.Descriptor:
|
||||||
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
|
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
|
||||||
default:
|
default:
|
||||||
name = reflect.ValueOf(vs).Elem().Type().Name()
|
name = reflect.ValueOf(vs).Elem().Type().Name()
|
||||||
@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
|||||||
|
|
||||||
var ss []string
|
var ss []string
|
||||||
switch vs := vs.(type) {
|
switch vs := vs.(type) {
|
||||||
case pref.Names:
|
case protoreflect.Names:
|
||||||
for i := 0; i < vs.Len(); i++ {
|
for i := 0; i < vs.Len(); i++ {
|
||||||
ss = append(ss, fmt.Sprint(vs.Get(i)))
|
ss = append(ss, fmt.Sprint(vs.Get(i)))
|
||||||
}
|
}
|
||||||
return start + joinStrings(ss, false) + end
|
return start + joinStrings(ss, false) + end
|
||||||
case pref.FieldNumbers:
|
case protoreflect.FieldNumbers:
|
||||||
for i := 0; i < vs.Len(); i++ {
|
for i := 0; i < vs.Len(); i++ {
|
||||||
ss = append(ss, fmt.Sprint(vs.Get(i)))
|
ss = append(ss, fmt.Sprint(vs.Get(i)))
|
||||||
}
|
}
|
||||||
return start + joinStrings(ss, false) + end
|
return start + joinStrings(ss, false) + end
|
||||||
case pref.FieldRanges:
|
case protoreflect.FieldRanges:
|
||||||
for i := 0; i < vs.Len(); i++ {
|
for i := 0; i < vs.Len(); i++ {
|
||||||
r := vs.Get(i)
|
r := vs.Get(i)
|
||||||
if r[0]+1 == r[1] {
|
if r[0]+1 == r[1] {
|
||||||
@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return start + joinStrings(ss, false) + end
|
return start + joinStrings(ss, false) + end
|
||||||
case pref.EnumRanges:
|
case protoreflect.EnumRanges:
|
||||||
for i := 0; i < vs.Len(); i++ {
|
for i := 0; i < vs.Len(); i++ {
|
||||||
r := vs.Get(i)
|
r := vs.Get(i)
|
||||||
if r[0] == r[1] {
|
if r[0] == r[1] {
|
||||||
@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return start + joinStrings(ss, false) + end
|
return start + joinStrings(ss, false) + end
|
||||||
case pref.FileImports:
|
case protoreflect.FileImports:
|
||||||
for i := 0; i < vs.Len(); i++ {
|
for i := 0; i < vs.Len(); i++ {
|
||||||
var rs records
|
var rs records
|
||||||
rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
|
rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
|
||||||
@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
|||||||
}
|
}
|
||||||
return start + joinStrings(ss, allowMulti) + end
|
return start + joinStrings(ss, allowMulti) + end
|
||||||
default:
|
default:
|
||||||
_, isEnumValue := vs.(pref.EnumValueDescriptors)
|
_, isEnumValue := vs.(protoreflect.EnumValueDescriptors)
|
||||||
for i := 0; i < vs.Len(); i++ {
|
for i := 0; i < vs.Len(); i++ {
|
||||||
m := reflect.ValueOf(vs).MethodByName("Get")
|
m := reflect.ValueOf(vs).MethodByName("Get")
|
||||||
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
|
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
|
||||||
ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
|
ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue))
|
||||||
}
|
}
|
||||||
return start + joinStrings(ss, allowMulti && isEnumValue) + end
|
return start + joinStrings(ss, allowMulti && isEnumValue) + end
|
||||||
}
|
}
|
||||||
@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
|||||||
//
|
//
|
||||||
// Using a list allows us to print the accessors in a sensible order.
|
// Using a list allows us to print the accessors in a sensible order.
|
||||||
var descriptorAccessors = map[reflect.Type][]string{
|
var descriptorAccessors = map[reflect.Type][]string{
|
||||||
reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
|
reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
|
||||||
reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
|
reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
|
||||||
reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
|
reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
|
||||||
reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
|
reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
|
||||||
reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
|
reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
|
||||||
reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
|
reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"},
|
||||||
reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
|
reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"},
|
||||||
reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
|
reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
|
||||||
}
|
}
|
||||||
|
|
||||||
func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
|
func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
|
||||||
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
|
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
|
||||||
}
|
}
|
||||||
func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
|
func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
|
||||||
rv := reflect.ValueOf(t)
|
rv := reflect.ValueOf(t)
|
||||||
rt := rv.MethodByName("ProtoType").Type().In(0)
|
rt := rv.MethodByName("ProtoType").Type().In(0)
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
|
|||||||
start = rt.Name() + "{"
|
start = rt.Name() + "{"
|
||||||
}
|
}
|
||||||
|
|
||||||
_, isFile := t.(pref.FileDescriptor)
|
_, isFile := t.(protoreflect.FileDescriptor)
|
||||||
rs := records{allowMulti: allowMulti}
|
rs := records{allowMulti: allowMulti}
|
||||||
if t.IsPlaceholder() {
|
if t.IsPlaceholder() {
|
||||||
if isFile {
|
if isFile {
|
||||||
@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
|
|||||||
rs.Append(rv, "Name")
|
rs.Append(rv, "Name")
|
||||||
}
|
}
|
||||||
switch t := t.(type) {
|
switch t := t.(type) {
|
||||||
case pref.FieldDescriptor:
|
case protoreflect.FieldDescriptor:
|
||||||
for _, s := range descriptorAccessors[rt] {
|
for _, s := range descriptorAccessors[rt] {
|
||||||
switch s {
|
switch s {
|
||||||
case "MapKey":
|
case "MapKey":
|
||||||
@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
|
|||||||
case "MapValue":
|
case "MapValue":
|
||||||
if v := t.MapValue(); v != nil {
|
if v := t.MapValue(); v != nil {
|
||||||
switch v.Kind() {
|
switch v.Kind() {
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
|
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
|
||||||
case pref.MessageKind, pref.GroupKind:
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
|
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
|
||||||
default:
|
default:
|
||||||
rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
|
rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
|
||||||
@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
|
|||||||
rs.Append(rv, s)
|
rs.Append(rv, s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case pref.OneofDescriptor:
|
case protoreflect.OneofDescriptor:
|
||||||
var ss []string
|
var ss []string
|
||||||
fs := t.Fields()
|
fs := t.Fields()
|
||||||
for i := 0; i < fs.Len(); i++ {
|
for i := 0; i < fs.Len(); i++ {
|
||||||
@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
|
|||||||
if !rv.IsValid() {
|
if !rv.IsValid() {
|
||||||
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
|
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
|
||||||
}
|
}
|
||||||
if _, ok := rv.Interface().(pref.Value); ok {
|
if _, ok := rv.Interface().(protoreflect.Value); ok {
|
||||||
rv = rv.MethodByName("Interface").Call(nil)[0]
|
rv = rv.MethodByName("Interface").Call(nil)[0]
|
||||||
if !rv.IsNil() {
|
if !rv.IsNil() {
|
||||||
rv = rv.Elem()
|
rv = rv.Elem()
|
||||||
@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
|
|||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case list:
|
case list:
|
||||||
s = formatListOpt(v, false, rs.allowMulti)
|
s = formatListOpt(v, false, rs.allowMulti)
|
||||||
case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
|
case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor:
|
||||||
s = string(v.(pref.Descriptor).Name())
|
s = string(v.(protoreflect.Descriptor).Name())
|
||||||
case pref.Descriptor:
|
case protoreflect.Descriptor:
|
||||||
s = string(v.FullName())
|
s = string(v.FullName())
|
||||||
case string:
|
case string:
|
||||||
s = strconv.Quote(v)
|
s = strconv.Quote(v)
|
||||||
|
78
src/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
generated
vendored
78
src/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go
generated
vendored
@ -15,8 +15,8 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
ptext "google.golang.org/protobuf/internal/encoding/text"
|
ptext "google.golang.org/protobuf/internal/encoding/text"
|
||||||
errors "google.golang.org/protobuf/internal/errors"
|
"google.golang.org/protobuf/internal/errors"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Format is the serialization format used to represent the default value.
|
// Format is the serialization format used to represent the default value.
|
||||||
@ -35,56 +35,56 @@ const (
|
|||||||
|
|
||||||
// Unmarshal deserializes the default string s according to the given kind k.
|
// Unmarshal deserializes the default string s according to the given kind k.
|
||||||
// When k is an enum, a list of enum value descriptors must be provided.
|
// When k is an enum, a list of enum value descriptors must be provided.
|
||||||
func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) {
|
func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) {
|
||||||
switch k {
|
switch k {
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
if f == GoTag {
|
if f == GoTag {
|
||||||
switch s {
|
switch s {
|
||||||
case "1":
|
case "1":
|
||||||
return pref.ValueOfBool(true), nil, nil
|
return protoreflect.ValueOfBool(true), nil, nil
|
||||||
case "0":
|
case "0":
|
||||||
return pref.ValueOfBool(false), nil, nil
|
return protoreflect.ValueOfBool(false), nil, nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
switch s {
|
switch s {
|
||||||
case "true":
|
case "true":
|
||||||
return pref.ValueOfBool(true), nil, nil
|
return protoreflect.ValueOfBool(true), nil, nil
|
||||||
case "false":
|
case "false":
|
||||||
return pref.ValueOfBool(false), nil, nil
|
return protoreflect.ValueOfBool(false), nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
if f == GoTag {
|
if f == GoTag {
|
||||||
// Go tags use the numeric form of the enum value.
|
// Go tags use the numeric form of the enum value.
|
||||||
if n, err := strconv.ParseInt(s, 10, 32); err == nil {
|
if n, err := strconv.ParseInt(s, 10, 32); err == nil {
|
||||||
if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil {
|
if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil {
|
||||||
return pref.ValueOfEnum(ev.Number()), ev, nil
|
return protoreflect.ValueOfEnum(ev.Number()), ev, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Descriptor default_value use the enum identifier.
|
// Descriptor default_value use the enum identifier.
|
||||||
ev := evs.ByName(pref.Name(s))
|
ev := evs.ByName(protoreflect.Name(s))
|
||||||
if ev != nil {
|
if ev != nil {
|
||||||
return pref.ValueOfEnum(ev.Number()), ev, nil
|
return protoreflect.ValueOfEnum(ev.Number()), ev, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
if v, err := strconv.ParseInt(s, 10, 32); err == nil {
|
if v, err := strconv.ParseInt(s, 10, 32); err == nil {
|
||||||
return pref.ValueOfInt32(int32(v)), nil, nil
|
return protoreflect.ValueOfInt32(int32(v)), nil, nil
|
||||||
}
|
}
|
||||||
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
|
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||||
return pref.ValueOfInt64(int64(v)), nil, nil
|
return protoreflect.ValueOfInt64(int64(v)), nil, nil
|
||||||
}
|
}
|
||||||
case pref.Uint32Kind, pref.Fixed32Kind:
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
if v, err := strconv.ParseUint(s, 10, 32); err == nil {
|
if v, err := strconv.ParseUint(s, 10, 32); err == nil {
|
||||||
return pref.ValueOfUint32(uint32(v)), nil, nil
|
return protoreflect.ValueOfUint32(uint32(v)), nil, nil
|
||||||
}
|
}
|
||||||
case pref.Uint64Kind, pref.Fixed64Kind:
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
if v, err := strconv.ParseUint(s, 10, 64); err == nil {
|
if v, err := strconv.ParseUint(s, 10, 64); err == nil {
|
||||||
return pref.ValueOfUint64(uint64(v)), nil, nil
|
return protoreflect.ValueOfUint64(uint64(v)), nil, nil
|
||||||
}
|
}
|
||||||
case pref.FloatKind, pref.DoubleKind:
|
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||||
var v float64
|
var v float64
|
||||||
var err error
|
var err error
|
||||||
switch s {
|
switch s {
|
||||||
@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (
|
|||||||
v, err = strconv.ParseFloat(s, 64)
|
v, err = strconv.ParseFloat(s, 64)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if k == pref.FloatKind {
|
if k == protoreflect.FloatKind {
|
||||||
return pref.ValueOfFloat32(float32(v)), nil, nil
|
return protoreflect.ValueOfFloat32(float32(v)), nil, nil
|
||||||
} else {
|
} else {
|
||||||
return pref.ValueOfFloat64(float64(v)), nil, nil
|
return protoreflect.ValueOfFloat64(float64(v)), nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
// String values are already unescaped and can be used as is.
|
// String values are already unescaped and can be used as is.
|
||||||
return pref.ValueOfString(s), nil, nil
|
return protoreflect.ValueOfString(s), nil, nil
|
||||||
case pref.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
if b, ok := unmarshalBytes(s); ok {
|
if b, ok := unmarshalBytes(s); ok {
|
||||||
return pref.ValueOfBytes(b), nil, nil
|
return protoreflect.ValueOfBytes(b), nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
|
return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marshal serializes v as the default string according to the given kind k.
|
// Marshal serializes v as the default string according to the given kind k.
|
||||||
// When specifying the Descriptor format for an enum kind, the associated
|
// When specifying the Descriptor format for an enum kind, the associated
|
||||||
// enum value descriptor must be provided.
|
// enum value descriptor must be provided.
|
||||||
func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) {
|
func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) {
|
||||||
switch k {
|
switch k {
|
||||||
case pref.BoolKind:
|
case protoreflect.BoolKind:
|
||||||
if f == GoTag {
|
if f == GoTag {
|
||||||
if v.Bool() {
|
if v.Bool() {
|
||||||
return "1", nil
|
return "1", nil
|
||||||
@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (
|
|||||||
return "false", nil
|
return "false", nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case pref.EnumKind:
|
case protoreflect.EnumKind:
|
||||||
if f == GoTag {
|
if f == GoTag {
|
||||||
return strconv.FormatInt(int64(v.Enum()), 10), nil
|
return strconv.FormatInt(int64(v.Enum()), 10), nil
|
||||||
} else {
|
} else {
|
||||||
return string(ev.Name()), nil
|
return string(ev.Name()), nil
|
||||||
}
|
}
|
||||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
return strconv.FormatInt(v.Int(), 10), nil
|
return strconv.FormatInt(v.Int(), 10), nil
|
||||||
case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind:
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
return strconv.FormatUint(v.Uint(), 10), nil
|
return strconv.FormatUint(v.Uint(), 10), nil
|
||||||
case pref.FloatKind, pref.DoubleKind:
|
case protoreflect.FloatKind, protoreflect.DoubleKind:
|
||||||
f := v.Float()
|
f := v.Float()
|
||||||
switch {
|
switch {
|
||||||
case math.IsInf(f, -1):
|
case math.IsInf(f, -1):
|
||||||
@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (
|
|||||||
case math.IsNaN(f):
|
case math.IsNaN(f):
|
||||||
return "nan", nil
|
return "nan", nil
|
||||||
default:
|
default:
|
||||||
if k == pref.FloatKind {
|
if k == protoreflect.FloatKind {
|
||||||
return strconv.FormatFloat(f, 'g', -1, 32), nil
|
return strconv.FormatFloat(f, 'g', -1, 32), nil
|
||||||
} else {
|
} else {
|
||||||
return strconv.FormatFloat(f, 'g', -1, 64), nil
|
return strconv.FormatFloat(f, 'g', -1, 64), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case pref.StringKind:
|
case protoreflect.StringKind:
|
||||||
// String values are serialized as is without any escaping.
|
// String values are serialized as is without any escaping.
|
||||||
return v.String(), nil
|
return v.String(), nil
|
||||||
case pref.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
if s, ok := marshalBytes(v.Bytes()); ok {
|
if s, ok := marshalBytes(v.Bytes()); ok {
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
7
src/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
generated
vendored
7
src/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
|||||||
|
|
||||||
"google.golang.org/protobuf/encoding/protowire"
|
"google.golang.org/protobuf/encoding/protowire"
|
||||||
"google.golang.org/protobuf/internal/errors"
|
"google.golang.org/protobuf/internal/errors"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The MessageSet wire format is equivalent to a message defined as follows,
|
// The MessageSet wire format is equivalent to a message defined as follows,
|
||||||
@ -33,6 +33,7 @@ const (
|
|||||||
// ExtensionName is the field name for extensions of MessageSet.
|
// ExtensionName is the field name for extensions of MessageSet.
|
||||||
//
|
//
|
||||||
// A valid MessageSet extension must be of the form:
|
// A valid MessageSet extension must be of the form:
|
||||||
|
//
|
||||||
// message MyMessage {
|
// message MyMessage {
|
||||||
// extend proto2.bridge.MessageSet {
|
// extend proto2.bridge.MessageSet {
|
||||||
// optional MyMessage message_set_extension = 1234;
|
// optional MyMessage message_set_extension = 1234;
|
||||||
@ -42,13 +43,13 @@ const (
|
|||||||
const ExtensionName = "message_set_extension"
|
const ExtensionName = "message_set_extension"
|
||||||
|
|
||||||
// IsMessageSet returns whether the message uses the MessageSet wire format.
|
// IsMessageSet returns whether the message uses the MessageSet wire format.
|
||||||
func IsMessageSet(md pref.MessageDescriptor) bool {
|
func IsMessageSet(md protoreflect.MessageDescriptor) bool {
|
||||||
xmd, ok := md.(interface{ IsMessageSet() bool })
|
xmd, ok := md.(interface{ IsMessageSet() bool })
|
||||||
return ok && xmd.IsMessageSet()
|
return ok && xmd.IsMessageSet()
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsMessageSetExtension reports this field properly extends a MessageSet.
|
// IsMessageSetExtension reports this field properly extends a MessageSet.
|
||||||
func IsMessageSetExtension(fd pref.FieldDescriptor) bool {
|
func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool {
|
||||||
switch {
|
switch {
|
||||||
case fd.Name() != ExtensionName:
|
case fd.Name() != ExtensionName:
|
||||||
return false
|
return false
|
||||||
|
96
src/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
generated
vendored
96
src/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
generated
vendored
@ -11,10 +11,10 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
defval "google.golang.org/protobuf/internal/encoding/defval"
|
"google.golang.org/protobuf/internal/encoding/defval"
|
||||||
fdesc "google.golang.org/protobuf/internal/filedesc"
|
"google.golang.org/protobuf/internal/filedesc"
|
||||||
"google.golang.org/protobuf/internal/strs"
|
"google.golang.org/protobuf/internal/strs"
|
||||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
)
|
)
|
||||||
|
|
||||||
var byteType = reflect.TypeOf(byte(0))
|
var byteType = reflect.TypeOf(byte(0))
|
||||||
@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0))
|
|||||||
// This does not populate the Enum or Message (except for weak message).
|
// This does not populate the Enum or Message (except for weak message).
|
||||||
//
|
//
|
||||||
// This function is a best effort attempt; parsing errors are ignored.
|
// This function is a best effort attempt; parsing errors are ignored.
|
||||||
func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor {
|
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
|
||||||
f := new(fdesc.Field)
|
f := new(filedesc.Field)
|
||||||
f.L0.ParentFile = fdesc.SurrogateProto2
|
f.L0.ParentFile = filedesc.SurrogateProto2
|
||||||
for len(tag) > 0 {
|
for len(tag) > 0 {
|
||||||
i := strings.IndexByte(tag, ',')
|
i := strings.IndexByte(tag, ',')
|
||||||
if i < 0 {
|
if i < 0 {
|
||||||
@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
|
|||||||
}
|
}
|
||||||
switch s := tag[:i]; {
|
switch s := tag[:i]; {
|
||||||
case strings.HasPrefix(s, "name="):
|
case strings.HasPrefix(s, "name="):
|
||||||
f.L0.FullName = pref.FullName(s[len("name="):])
|
f.L0.FullName = protoreflect.FullName(s[len("name="):])
|
||||||
case strings.Trim(s, "0123456789") == "":
|
case strings.Trim(s, "0123456789") == "":
|
||||||
n, _ := strconv.ParseUint(s, 10, 32)
|
n, _ := strconv.ParseUint(s, 10, 32)
|
||||||
f.L1.Number = pref.FieldNumber(n)
|
f.L1.Number = protoreflect.FieldNumber(n)
|
||||||
case s == "opt":
|
case s == "opt":
|
||||||
f.L1.Cardinality = pref.Optional
|
f.L1.Cardinality = protoreflect.Optional
|
||||||
case s == "req":
|
case s == "req":
|
||||||
f.L1.Cardinality = pref.Required
|
f.L1.Cardinality = protoreflect.Required
|
||||||
case s == "rep":
|
case s == "rep":
|
||||||
f.L1.Cardinality = pref.Repeated
|
f.L1.Cardinality = protoreflect.Repeated
|
||||||
case s == "varint":
|
case s == "varint":
|
||||||
switch goType.Kind() {
|
switch goType.Kind() {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
f.L1.Kind = pref.BoolKind
|
f.L1.Kind = protoreflect.BoolKind
|
||||||
case reflect.Int32:
|
case reflect.Int32:
|
||||||
f.L1.Kind = pref.Int32Kind
|
f.L1.Kind = protoreflect.Int32Kind
|
||||||
case reflect.Int64:
|
case reflect.Int64:
|
||||||
f.L1.Kind = pref.Int64Kind
|
f.L1.Kind = protoreflect.Int64Kind
|
||||||
case reflect.Uint32:
|
case reflect.Uint32:
|
||||||
f.L1.Kind = pref.Uint32Kind
|
f.L1.Kind = protoreflect.Uint32Kind
|
||||||
case reflect.Uint64:
|
case reflect.Uint64:
|
||||||
f.L1.Kind = pref.Uint64Kind
|
f.L1.Kind = protoreflect.Uint64Kind
|
||||||
}
|
}
|
||||||
case s == "zigzag32":
|
case s == "zigzag32":
|
||||||
if goType.Kind() == reflect.Int32 {
|
if goType.Kind() == reflect.Int32 {
|
||||||
f.L1.Kind = pref.Sint32Kind
|
f.L1.Kind = protoreflect.Sint32Kind
|
||||||
}
|
}
|
||||||
case s == "zigzag64":
|
case s == "zigzag64":
|
||||||
if goType.Kind() == reflect.Int64 {
|
if goType.Kind() == reflect.Int64 {
|
||||||
f.L1.Kind = pref.Sint64Kind
|
f.L1.Kind = protoreflect.Sint64Kind
|
||||||
}
|
}
|
||||||
case s == "fixed32":
|
case s == "fixed32":
|
||||||
switch goType.Kind() {
|
switch goType.Kind() {
|
||||||
case reflect.Int32:
|
case reflect.Int32:
|
||||||
f.L1.Kind = pref.Sfixed32Kind
|
f.L1.Kind = protoreflect.Sfixed32Kind
|
||||||
case reflect.Uint32:
|
case reflect.Uint32:
|
||||||
f.L1.Kind = pref.Fixed32Kind
|
f.L1.Kind = protoreflect.Fixed32Kind
|
||||||
case reflect.Float32:
|
case reflect.Float32:
|
||||||
f.L1.Kind = pref.FloatKind
|
f.L1.Kind = protoreflect.FloatKind
|
||||||
}
|
}
|
||||||
case s == "fixed64":
|
case s == "fixed64":
|
||||||
switch goType.Kind() {
|
switch goType.Kind() {
|
||||||
case reflect.Int64:
|
case reflect.Int64:
|
||||||
f.L1.Kind = pref.Sfixed64Kind
|
f.L1.Kind = protoreflect.Sfixed64Kind
|
||||||
case reflect.Uint64:
|
case reflect.Uint64:
|
||||||
f.L1.Kind = pref.Fixed64Kind
|
f.L1.Kind = protoreflect.Fixed64Kind
|
||||||
case reflect.Float64:
|
case reflect.Float64:
|
||||||
f.L1.Kind = pref.DoubleKind
|
f.L1.Kind = protoreflect.DoubleKind
|
||||||
}
|
}
|
||||||
case s == "bytes":
|
case s == "bytes":
|
||||||
switch {
|
switch {
|
||||||
case goType.Kind() == reflect.String:
|
case goType.Kind() == reflect.String:
|
||||||
f.L1.Kind = pref.StringKind
|
f.L1.Kind = protoreflect.StringKind
|
||||||
case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
|
case goType.Kind() == reflect.Slice && goType.Elem() == byteType:
|
||||||
f.L1.Kind = pref.BytesKind
|
f.L1.Kind = protoreflect.BytesKind
|
||||||
default:
|
default:
|
||||||
f.L1.Kind = pref.MessageKind
|
f.L1.Kind = protoreflect.MessageKind
|
||||||
}
|
}
|
||||||
case s == "group":
|
case s == "group":
|
||||||
f.L1.Kind = pref.GroupKind
|
f.L1.Kind = protoreflect.GroupKind
|
||||||
case strings.HasPrefix(s, "enum="):
|
case strings.HasPrefix(s, "enum="):
|
||||||
f.L1.Kind = pref.EnumKind
|
f.L1.Kind = protoreflect.EnumKind
|
||||||
case strings.HasPrefix(s, "json="):
|
case strings.HasPrefix(s, "json="):
|
||||||
jsonName := s[len("json="):]
|
jsonName := s[len("json="):]
|
||||||
if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
|
if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) {
|
||||||
@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
|
|||||||
f.L1.IsPacked = true
|
f.L1.IsPacked = true
|
||||||
case strings.HasPrefix(s, "weak="):
|
case strings.HasPrefix(s, "weak="):
|
||||||
f.L1.IsWeak = true
|
f.L1.IsWeak = true
|
||||||
f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):]))
|
f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
|
||||||
case strings.HasPrefix(s, "def="):
|
case strings.HasPrefix(s, "def="):
|
||||||
// The default tag is special in that everything afterwards is the
|
// The default tag is special in that everything afterwards is the
|
||||||
// default regardless of the presence of commas.
|
// default regardless of the presence of commas.
|
||||||
s, i = tag[len("def="):], len(tag)
|
s, i = tag[len("def="):], len(tag)
|
||||||
v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
|
v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag)
|
||||||
f.L1.Default = fdesc.DefaultValue(v, ev)
|
f.L1.Default = filedesc.DefaultValue(v, ev)
|
||||||
case s == "proto3":
|
case s == "proto3":
|
||||||
f.L0.ParentFile = fdesc.SurrogateProto3
|
f.L0.ParentFile = filedesc.SurrogateProto3
|
||||||
}
|
}
|
||||||
tag = strings.TrimPrefix(tag[i:], ",")
|
tag = strings.TrimPrefix(tag[i:], ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The generator uses the group message name instead of the field name.
|
// The generator uses the group message name instead of the field name.
|
||||||
// We obtain the real field name by lowercasing the group name.
|
// We obtain the real field name by lowercasing the group name.
|
||||||
if f.L1.Kind == pref.GroupKind {
|
if f.L1.Kind == protoreflect.GroupKind {
|
||||||
f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName)))
|
f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName)))
|
||||||
}
|
}
|
||||||
return f
|
return f
|
||||||
}
|
}
|
||||||
@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p
|
|||||||
// Depending on the context on how Marshal is called, there are different ways
|
// Depending on the context on how Marshal is called, there are different ways
|
||||||
// through which that information is determined. As such it is the caller's
|
// through which that information is determined. As such it is the caller's
|
||||||
// responsibility to provide a function to obtain that information.
|
// responsibility to provide a function to obtain that information.
|
||||||
func Marshal(fd pref.FieldDescriptor, enumName string) string {
|
func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
|
||||||
var tag []string
|
var tag []string
|
||||||
switch fd.Kind() {
|
switch fd.Kind() {
|
||||||
case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind:
|
case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind:
|
||||||
tag = append(tag, "varint")
|
tag = append(tag, "varint")
|
||||||
case pref.Sint32Kind:
|
case protoreflect.Sint32Kind:
|
||||||
tag = append(tag, "zigzag32")
|
tag = append(tag, "zigzag32")
|
||||||
case pref.Sint64Kind:
|
case protoreflect.Sint64Kind:
|
||||||
tag = append(tag, "zigzag64")
|
tag = append(tag, "zigzag64")
|
||||||
case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind:
|
case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind:
|
||||||
tag = append(tag, "fixed32")
|
tag = append(tag, "fixed32")
|
||||||
case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind:
|
case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind:
|
||||||
tag = append(tag, "fixed64")
|
tag = append(tag, "fixed64")
|
||||||
case pref.StringKind, pref.BytesKind, pref.MessageKind:
|
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind:
|
||||||
tag = append(tag, "bytes")
|
tag = append(tag, "bytes")
|
||||||
case pref.GroupKind:
|
case protoreflect.GroupKind:
|
||||||
tag = append(tag, "group")
|
tag = append(tag, "group")
|
||||||
}
|
}
|
||||||
tag = append(tag, strconv.Itoa(int(fd.Number())))
|
tag = append(tag, strconv.Itoa(int(fd.Number())))
|
||||||
switch fd.Cardinality() {
|
switch fd.Cardinality() {
|
||||||
case pref.Optional:
|
case protoreflect.Optional:
|
||||||
tag = append(tag, "opt")
|
tag = append(tag, "opt")
|
||||||
case pref.Required:
|
case protoreflect.Required:
|
||||||
tag = append(tag, "req")
|
tag = append(tag, "req")
|
||||||
case pref.Repeated:
|
case protoreflect.Repeated:
|
||||||
tag = append(tag, "rep")
|
tag = append(tag, "rep")
|
||||||
}
|
}
|
||||||
if fd.IsPacked() {
|
if fd.IsPacked() {
|
||||||
tag = append(tag, "packed")
|
tag = append(tag, "packed")
|
||||||
}
|
}
|
||||||
name := string(fd.Name())
|
name := string(fd.Name())
|
||||||
if fd.Kind() == pref.GroupKind {
|
if fd.Kind() == protoreflect.GroupKind {
|
||||||
// The name of the FieldDescriptor for a group field is
|
// The name of the FieldDescriptor for a group field is
|
||||||
// lowercased. To find the original capitalization, we
|
// lowercased. To find the original capitalization, we
|
||||||
// look in the field's MessageType.
|
// look in the field's MessageType.
|
||||||
@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string {
|
|||||||
// The previous implementation does not tag extension fields as proto3,
|
// The previous implementation does not tag extension fields as proto3,
|
||||||
// even when the field is defined in a proto3 file. Match that behavior
|
// even when the field is defined in a proto3 file. Match that behavior
|
||||||
// for consistency.
|
// for consistency.
|
||||||
if fd.Syntax() == pref.Proto3 && !fd.IsExtension() {
|
if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() {
|
||||||
tag = append(tag, "proto3")
|
tag = append(tag, "proto3")
|
||||||
}
|
}
|
||||||
if fd.Kind() == pref.EnumKind && enumName != "" {
|
if fd.Kind() == protoreflect.EnumKind && enumName != "" {
|
||||||
tag = append(tag, "enum="+enumName)
|
tag = append(tag, "enum="+enumName)
|
||||||
}
|
}
|
||||||
if fd.ContainingOneof() != nil {
|
if fd.ContainingOneof() != nil {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user