Browse Source

feature: introduce Promethes (#976)

pull/2095/head
Gautier DI FOLCO 6 months ago
parent
commit
5c2ceccee6
No account linked to committer's email address
100 changed files with 22522 additions and 2 deletions
  1. 5
    0
      CHANGELOG.md
  2. 8
    0
      default.yaml
  3. 11
    2
      go.mod
  4. 30
    0
      go.sum
  5. 7
    0
      irc/config.go
  6. 30
    0
      irc/server.go
  7. 20
    0
      vendor/github.com/beorn7/perks/LICENSE
  8. 2388
    0
      vendor/github.com/beorn7/perks/quantile/exampledata.txt
  9. 316
    0
      vendor/github.com/beorn7/perks/quantile/stream.go
  10. 22
    0
      vendor/github.com/cespare/xxhash/v2/LICENSE.txt
  11. 72
    0
      vendor/github.com/cespare/xxhash/v2/README.md
  12. 10
    0
      vendor/github.com/cespare/xxhash/v2/testall.sh
  13. 228
    0
      vendor/github.com/cespare/xxhash/v2/xxhash.go
  14. 209
    0
      vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
  15. 183
    0
      vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
  16. 15
    0
      vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
  17. 76
    0
      vendor/github.com/cespare/xxhash/v2/xxhash_other.go
  18. 16
    0
      vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
  19. 58
    0
      vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
  20. 3
    0
      vendor/github.com/golang/protobuf/AUTHORS
  21. 3
    0
      vendor/github.com/golang/protobuf/CONTRIBUTORS
  22. 28
    0
      vendor/github.com/golang/protobuf/LICENSE
  23. 324
    0
      vendor/github.com/golang/protobuf/proto/buffer.go
  24. 63
    0
      vendor/github.com/golang/protobuf/proto/defaults.go
  25. 113
    0
      vendor/github.com/golang/protobuf/proto/deprecated.go
  26. 58
    0
      vendor/github.com/golang/protobuf/proto/discard.go
  27. 356
    0
      vendor/github.com/golang/protobuf/proto/extensions.go
  28. 306
    0
      vendor/github.com/golang/protobuf/proto/properties.go
  29. 167
    0
      vendor/github.com/golang/protobuf/proto/proto.go
  30. 317
    0
      vendor/github.com/golang/protobuf/proto/registry.go
  31. 801
    0
      vendor/github.com/golang/protobuf/proto/text_decode.go
  32. 560
    0
      vendor/github.com/golang/protobuf/proto/text_encode.go
  33. 78
    0
      vendor/github.com/golang/protobuf/proto/wire.go
  34. 34
    0
      vendor/github.com/golang/protobuf/proto/wrappers.go
  35. 201
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
  36. 1
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
  37. 1
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
  38. 7
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
  39. 75
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
  40. 16
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
  41. 46
    0
      vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
  42. 201
    0
      vendor/github.com/prometheus/client_golang/LICENSE
  43. 23
    0
      vendor/github.com/prometheus/client_golang/NOTICE
  44. 1
    0
      vendor/github.com/prometheus/client_golang/prometheus/.gitignore
  45. 1
    0
      vendor/github.com/prometheus/client_golang/prometheus/README.md
  46. 38
    0
      vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
  47. 128
    0
      vendor/github.com/prometheus/client_golang/prometheus/collector.go
  48. 358
    0
      vendor/github.com/prometheus/client_golang/prometheus/counter.go
  49. 207
    0
      vendor/github.com/prometheus/client_golang/prometheus/desc.go
  50. 210
    0
      vendor/github.com/prometheus/client_golang/prometheus/doc.go
  51. 86
    0
      vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
  52. 42
    0
      vendor/github.com/prometheus/client_golang/prometheus/fnv.go
  53. 311
    0
      vendor/github.com/prometheus/client_golang/prometheus/gauge.go
  54. 26
    0
      vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
  55. 23
    0
      vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
  56. 281
    0
      vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
  57. 122
    0
      vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
  58. 567
    0
      vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
  59. 1531
    0
      vendor/github.com/prometheus/client_golang/prometheus/histogram.go
  60. 60
    0
      vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
  61. 654
    0
      vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
  62. 32
    0
      vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
  63. 142
    0
      vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
  64. 101
    0
      vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
  65. 186
    0
      vendor/github.com/prometheus/client_golang/prometheus/labels.go
  66. 257
    0
      vendor/github.com/prometheus/client_golang/prometheus/metric.go
  67. 25
    0
      vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
  68. 22
    0
      vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
  69. 64
    0
      vendor/github.com/prometheus/client_golang/prometheus/observer.go
  70. 164
    0
      vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
  71. 26
    0
      vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
  72. 66
    0
      vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
  73. 116
    0
      vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
  74. 374
    0
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
  75. 408
    0
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
  76. 249
    0
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
  77. 576
    0
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
  78. 84
    0
      vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
  79. 1075
    0
      vendor/github.com/prometheus/client_golang/prometheus/registry.go
  80. 785
    0
      vendor/github.com/prometheus/client_golang/prometheus/summary.go
  81. 81
    0
      vendor/github.com/prometheus/client_golang/prometheus/timer.go
  82. 42
    0
      vendor/github.com/prometheus/client_golang/prometheus/untyped.go
  83. 274
    0
      vendor/github.com/prometheus/client_golang/prometheus/value.go
  84. 709
    0
      vendor/github.com/prometheus/client_golang/prometheus/vec.go
  85. 23
    0
      vendor/github.com/prometheus/client_golang/prometheus/vnext.go
  86. 214
    0
      vendor/github.com/prometheus/client_golang/prometheus/wrap.go
  87. 201
    0
      vendor/github.com/prometheus/client_model/LICENSE
  88. 5
    0
      vendor/github.com/prometheus/client_model/NOTICE
  89. 1373
    0
      vendor/github.com/prometheus/client_model/go/metrics.pb.go
  90. 201
    0
      vendor/github.com/prometheus/common/LICENSE
  91. 5
    0
      vendor/github.com/prometheus/common/NOTICE
  92. 428
    0
      vendor/github.com/prometheus/common/expfmt/decode.go
  93. 165
    0
      vendor/github.com/prometheus/common/expfmt/encode.go
  94. 43
    0
      vendor/github.com/prometheus/common/expfmt/expfmt.go
  95. 37
    0
      vendor/github.com/prometheus/common/expfmt/fuzz.go
  96. 527
    0
      vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
  97. 464
    0
      vendor/github.com/prometheus/common/expfmt/text_create.go
  98. 779
    0
      vendor/github.com/prometheus/common/expfmt/text_parse.go
  99. 67
    0
      vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
  100. 0
    0
      vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go

+ 5
- 0
CHANGELOG.md View File

@@ -1,6 +1,11 @@
1 1
 # Changelog
2 2
 All notable changes to Ergo will be documented in this file.
3 3
 
4
+## [2.13.0] - TODO
5
+
6
+### Config changes
7
+* Add `prometheus`
8
+
4 9
 ## [2.12.0] - 2023-10-10
5 10
 
6 11
 We're pleased to be publishing v2.12.0, a new stable release. This is another bugfix release aimed at improving client compatibility and keeping up with the IRCv3 specification process.

+ 8
- 0
default.yaml View File

@@ -877,6 +877,14 @@ fakelag:
877 877
         "MONITOR":     1
878 878
         "WHO":         4
879 879
 
880
+# prometheus: exports metrics
881
+prometheus:
882
+    # whether to enable prometheus
883
+    enabled: true
884
+
885
+    # listen address
886
+listen: "localhost:2112"
887
+
880 888
 # the roleplay commands are semi-standardized extensions to IRC that allow
881 889
 # sending and receiving messages from pseudo-nicknames. this can be used either
882 890
 # for actual roleplaying, or for bridging IRC with other protocols.

+ 11
- 2
go.mod View File

@@ -23,11 +23,19 @@ require (
23 23
 	github.com/xdg-go/scram v1.0.2
24 24
 	golang.org/x/crypto v0.5.0
25 25
 	golang.org/x/term v0.7.0
26
-	golang.org/x/text v0.6.0
26
+	golang.org/x/text v0.9.0
27 27
 	gopkg.in/yaml.v2 v2.4.0
28 28
 )
29 29
 
30 30
 require (
31
+	github.com/beorn7/perks v1.0.1 // indirect
32
+	github.com/cespare/xxhash/v2 v2.2.0 // indirect
33
+	github.com/golang/protobuf v1.5.3 // indirect
34
+	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
35
+	github.com/prometheus/client_golang v1.17.0 // indirect
36
+	github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
37
+	github.com/prometheus/common v0.44.0 // indirect
38
+	github.com/prometheus/procfs v0.11.1 // indirect
31 39
 	github.com/tidwall/btree v1.4.2 // indirect
32 40
 	github.com/tidwall/gjson v1.14.3 // indirect
33 41
 	github.com/tidwall/grect v0.1.4 // indirect
@@ -36,7 +44,8 @@ require (
36 44
 	github.com/tidwall/rtred v0.1.2 // indirect
37 45
 	github.com/tidwall/tinyqueue v0.1.1 // indirect
38 46
 	github.com/xdg-go/pbkdf2 v1.0.0 // indirect
39
-	golang.org/x/sys v0.7.0 // indirect
47
+	golang.org/x/sys v0.11.0 // indirect
48
+	google.golang.org/protobuf v1.31.0 // indirect
40 49
 )
41 50
 
42 51
 replace github.com/gorilla/websocket => github.com/ergochat/websocket v1.4.2-oragono1

+ 30
- 0
go.sum View File

@@ -2,8 +2,13 @@ code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48 h1:/EMHruHCFXR9
2 2
 code.cloudfoundry.org/bytefmt v0.0.0-20200131002437-cf55d5288a48/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc=
3 3
 github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw=
4 4
 github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962/go.mod h1:kC29dT1vFpj7py2OvG1khBdQpo3kInWP+6QipLbdngo=
5
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
6
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
7
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
8
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
5 9
 github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
6 10
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
11
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
7 12
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
8 13
 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
9 14
 github.com/ergochat/confusables v0.0.0-20201108231250-4ab98ab61fb1 h1:WLHTOodthVyv5NvYLIvWl112kSFv5IInKKrRN2qpons=
@@ -28,8 +33,14 @@ github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
28 33
 github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
29 34
 github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
30 35
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
36
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
37
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
38
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
39
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
31 40
 github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
32 41
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
42
+github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
43
+github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
33 44
 github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd h1:+iAPaTbi1gZpcpDwe/BW1fx7Xoesv69hLNGPheoyhBs=
34 45
 github.com/okzk/sdnotify v0.0.0-20180710141335-d9becc38acbd/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ=
35 46
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -40,6 +51,14 @@ github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg=
40 51
 github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
41 52
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
42 53
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
54
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
55
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
56
+github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
57
+github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
58
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
59
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
60
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
61
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
43 62
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
44 63
 github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
45 64
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -72,20 +91,31 @@ golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
72 91
 golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
73 92
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
74 93
 golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
94
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
75 95
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
96
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
76 97
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
77 98
 golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
78 99
 golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
79 100
 golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
101
+golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
102
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
80 103
 golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
81 104
 golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
82 105
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
83 106
 golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
84 107
 golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
85 108
 golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
109
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
110
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
86 111
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
87 112
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
88 113
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
114
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
115
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
116
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
117
+google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
118
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
89 119
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
90 120
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
91 121
 gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=

+ 7
- 0
irc/config.go View File

@@ -536,6 +536,11 @@ type TorListenersConfig struct {
536 536
 	MaxConnectionsPerDuration int           `yaml:"max-connections-per-duration"`
537 537
 }
538 538
 
539
+type PrometheusConfig struct {
540
+	Enabled bool
541
+	Listen  string
542
+}
543
+
539 544
 // Config defines the overall configuration.
540 545
 type Config struct {
541 546
 	AllowEnvironmentOverrides bool `yaml:"allow-environment-overrides"`
@@ -667,6 +672,8 @@ type Config struct {
667 672
 
668 673
 	Fakelag FakelagConfig
669 674
 
675
+	Prometheus PrometheusConfig
676
+
670 677
 	History struct {
671 678
 		Enabled          bool
672 679
 		ChannelLength    int              `yaml:"channel-length"`

+ 30
- 0
irc/server.go View File

@@ -22,6 +22,7 @@ import (
22 22
 
23 23
 	"github.com/ergochat/irc-go/ircfmt"
24 24
 	"github.com/okzk/sdnotify"
25
+	"github.com/prometheus/client_golang/prometheus/promhttp"
25 26
 	"github.com/tidwall/buntdb"
26 27
 
27 28
 	"github.com/ergochat/ergo/irc/bunt"
@@ -84,6 +85,7 @@ type Server struct {
84 85
 	rehashMutex       sync.Mutex // tier 4
85 86
 	rehashSignal      chan os.Signal
86 87
 	pprofServer       *http.Server
88
+	prometheusServer  *http.Server
87 89
 	exitSignals       chan os.Signal
88 90
 	tracebackSignal   chan os.Signal
89 91
 	snomasks          SnoManager
@@ -780,6 +782,7 @@ func (server *Server) applyConfig(config *Config) (err error) {
780 782
 
781 783
 	server.setupPprofListener(config)
782 784
 
785
+	server.setupPrometheusListener(config)
783 786
 	// set RPL_ISUPPORT
784 787
 	var newISupportReplies [][]string
785 788
 	if oldConfig != nil {
@@ -842,6 +845,33 @@ func (server *Server) setupPprofListener(config *Config) {
842 845
 	}
843 846
 }
844 847
 
848
+func (server *Server) setupPrometheusListener(config *Config) {
849
+	promConfig := config.Prometheus
850
+	if !promConfig.Enabled {
851
+		return
852
+	}
853
+
854
+	listen := promConfig.Listen
855
+	if listen == "" {
856
+		server.logger.Error("prometheus", "Prometheus listener failed", "Prometheus is enabled, but listen is not specified")
857
+		return
858
+	}
859
+
860
+	promHandler := http.NewServeMux()
861
+	promHandler.Handle("/metrics", promhttp.Handler())
862
+	ps := http.Server{
863
+		Addr:    listen,
864
+		Handler: promHandler,
865
+	}
866
+	go func() {
867
+		if err := ps.ListenAndServe(); err != nil {
868
+			server.logger.Error("server", "Prometheus listener failed", err.Error())
869
+		}
870
+	}()
871
+	server.prometheusServer = &ps
872
+	server.logger.Info("server", "Started Prometheus listener", server.prometheusServer.Addr)
873
+}
874
+
845 875
 func (server *Server) loadDatastore(config *Config) error {
846 876
 	// open the datastore and load server state for which it (rather than config)
847 877
 	// is the source of truth

+ 20
- 0
vendor/github.com/beorn7/perks/LICENSE View File

@@ -0,0 +1,20 @@
1
+Copyright (C) 2013 Blake Mizerany
2
+
3
+Permission is hereby granted, free of charge, to any person obtaining
4
+a copy of this software and associated documentation files (the
5
+"Software"), to deal in the Software without restriction, including
6
+without limitation the rights to use, copy, modify, merge, publish,
7
+distribute, sublicense, and/or sell copies of the Software, and to
8
+permit persons to whom the Software is furnished to do so, subject to
9
+the following conditions:
10
+
11
+The above copyright notice and this permission notice shall be
12
+included in all copies or substantial portions of the Software.
13
+
14
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 2388
- 0
vendor/github.com/beorn7/perks/quantile/exampledata.txt
File diff suppressed because it is too large
View File


+ 316
- 0
vendor/github.com/beorn7/perks/quantile/stream.go View File

@@ -0,0 +1,316 @@
1
+// Package quantile computes approximate quantiles over an unbounded data
2
+// stream within low memory and CPU bounds.
3
+//
4
+// A small amount of accuracy is traded to achieve the above properties.
5
+//
6
+// Multiple streams can be merged before calling Query to generate a single set
7
+// of results. This is meaningful when the streams represent the same type of
8
+// data. See Merge and Samples.
9
+//
10
+// For more detailed information about the algorithm used, see:
11
+//
12
+// Effective Computation of Biased Quantiles over Data Streams
13
+//
14
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
15
+package quantile
16
+
17
+import (
18
+	"math"
19
+	"sort"
20
+)
21
+
22
+// Sample holds an observed value and meta information for compression. JSON
23
+// tags have been added for convenience.
24
+type Sample struct {
25
+	Value float64 `json:",string"`
26
+	Width float64 `json:",string"`
27
+	Delta float64 `json:",string"`
28
+}
29
+
30
+// Samples represents a slice of samples. It implements sort.Interface.
31
+type Samples []Sample
32
+
33
+func (a Samples) Len() int           { return len(a) }
34
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
35
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
36
+
37
+type invariant func(s *stream, r float64) float64
38
+
39
+// NewLowBiased returns an initialized Stream for low-biased quantiles
40
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
41
+// error guarantees can still be given even for the lower ranks of the data
42
+// distribution.
43
+//
44
+// The provided epsilon is a relative error, i.e. the true quantile of a value
45
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
46
+//
47
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
48
+// properties.
49
+func NewLowBiased(epsilon float64) *Stream {
50
+	ƒ := func(s *stream, r float64) float64 {
51
+		return 2 * epsilon * r
52
+	}
53
+	return newStream(ƒ)
54
+}
55
+
56
+// NewHighBiased returns an initialized Stream for high-biased quantiles
57
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
58
+// error guarantees can still be given even for the higher ranks of the data
59
+// distribution.
60
+//
61
+// The provided epsilon is a relative error, i.e. the true quantile of a value
62
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
63
+//
64
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
65
+// properties.
66
+func NewHighBiased(epsilon float64) *Stream {
67
+	ƒ := func(s *stream, r float64) float64 {
68
+		return 2 * epsilon * (s.n - r)
69
+	}
70
+	return newStream(ƒ)
71
+}
72
+
73
+// NewTargeted returns an initialized Stream concerned with a particular set of
74
+// quantile values that are supplied a priori. Knowing these a priori reduces
75
+// space and computation time. The targets map maps the desired quantiles to
76
+// their absolute errors, i.e. the true quantile of a value returned by a query
77
+// is guaranteed to be within (Quantile±Epsilon).
78
+//
79
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
80
+func NewTargeted(targetMap map[float64]float64) *Stream {
81
+	// Convert map to slice to avoid slow iterations on a map.
82
+	// ƒ is called on the hot path, so converting the map to a slice
83
+	// beforehand results in significant CPU savings.
84
+	targets := targetMapToSlice(targetMap)
85
+
86
+	ƒ := func(s *stream, r float64) float64 {
87
+		var m = math.MaxFloat64
88
+		var f float64
89
+		for _, t := range targets {
90
+			if t.quantile*s.n <= r {
91
+				f = (2 * t.epsilon * r) / t.quantile
92
+			} else {
93
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
94
+			}
95
+			if f < m {
96
+				m = f
97
+			}
98
+		}
99
+		return m
100
+	}
101
+	return newStream(ƒ)
102
+}
103
+
104
+type target struct {
105
+	quantile float64
106
+	epsilon  float64
107
+}
108
+
109
+func targetMapToSlice(targetMap map[float64]float64) []target {
110
+	targets := make([]target, 0, len(targetMap))
111
+
112
+	for quantile, epsilon := range targetMap {
113
+		t := target{
114
+			quantile: quantile,
115
+			epsilon:  epsilon,
116
+		}
117
+		targets = append(targets, t)
118
+	}
119
+
120
+	return targets
121
+}
122
+
123
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
124
+// design. Take care when using across multiple goroutines.
125
+type Stream struct {
126
+	*stream
127
+	b      Samples
128
+	sorted bool
129
+}
130
+
131
+func newStream(ƒ invariant) *Stream {
132
+	x := &stream{ƒ: ƒ}
133
+	return &Stream{x, make(Samples, 0, 500), true}
134
+}
135
+
136
+// Insert inserts v into the stream.
137
+func (s *Stream) Insert(v float64) {
138
+	s.insert(Sample{Value: v, Width: 1})
139
+}
140
+
141
+func (s *Stream) insert(sample Sample) {
142
+	s.b = append(s.b, sample)
143
+	s.sorted = false
144
+	if len(s.b) == cap(s.b) {
145
+		s.flush()
146
+	}
147
+}
148
+
149
+// Query returns the computed qth percentiles value. If s was created with
150
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
151
+// will return an unspecified result.
152
+func (s *Stream) Query(q float64) float64 {
153
+	if !s.flushed() {
154
+		// Fast path when there hasn't been enough data for a flush;
155
+		// this also yields better accuracy for small sets of data.
156
+		l := len(s.b)
157
+		if l == 0 {
158
+			return 0
159
+		}
160
+		i := int(math.Ceil(float64(l) * q))
161
+		if i > 0 {
162
+			i -= 1
163
+		}
164
+		s.maybeSort()
165
+		return s.b[i].Value
166
+	}
167
+	s.flush()
168
+	return s.stream.query(q)
169
+}
170
+
171
+// Merge merges samples into the underlying streams samples. This is handy when
172
+// merging multiple streams from separate threads, database shards, etc.
173
+//
174
+// ATTENTION: This method is broken and does not yield correct results. The
175
+// underlying algorithm is not capable of merging streams correctly.
176
+func (s *Stream) Merge(samples Samples) {
177
+	sort.Sort(samples)
178
+	s.stream.merge(samples)
179
+}
180
+
181
+// Reset reinitializes and clears the list reusing the samples buffer memory.
182
+func (s *Stream) Reset() {
183
+	s.stream.reset()
184
+	s.b = s.b[:0]
185
+}
186
+
187
+// Samples returns stream samples held by s.
188
+func (s *Stream) Samples() Samples {
189
+	if !s.flushed() {
190
+		return s.b
191
+	}
192
+	s.flush()
193
+	return s.stream.samples()
194
+}
195
+
196
+// Count returns the total number of samples observed in the stream
197
+// since initialization.
198
+func (s *Stream) Count() int {
199
+	return len(s.b) + s.stream.count()
200
+}
201
+
202
+func (s *Stream) flush() {
203
+	s.maybeSort()
204
+	s.stream.merge(s.b)
205
+	s.b = s.b[:0]
206
+}
207
+
208
+func (s *Stream) maybeSort() {
209
+	if !s.sorted {
210
+		s.sorted = true
211
+		sort.Sort(s.b)
212
+	}
213
+}
214
+
215
+func (s *Stream) flushed() bool {
216
+	return len(s.stream.l) > 0
217
+}
218
+
219
+type stream struct {
220
+	n float64
221
+	l []Sample
222
+	ƒ invariant
223
+}
224
+
225
+func (s *stream) reset() {
226
+	s.l = s.l[:0]
227
+	s.n = 0
228
+}
229
+
230
+func (s *stream) insert(v float64) {
231
+	s.merge(Samples{{v, 1, 0}})
232
+}
233
+
234
+func (s *stream) merge(samples Samples) {
235
+	// TODO(beorn7): This tries to merge not only individual samples, but
236
+	// whole summaries. The paper doesn't mention merging summaries at
237
+	// all. Unittests show that the merging is inaccurate. Find out how to
238
+	// do merges properly.
239
+	var r float64
240
+	i := 0
241
+	for _, sample := range samples {
242
+		for ; i < len(s.l); i++ {
243
+			c := s.l[i]
244
+			if c.Value > sample.Value {
245
+				// Insert at position i.
246
+				s.l = append(s.l, Sample{})
247
+				copy(s.l[i+1:], s.l[i:])
248
+				s.l[i] = Sample{
249
+					sample.Value,
250
+					sample.Width,
251
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
252
+					// TODO(beorn7): How to calculate delta correctly?
253
+				}
254
+				i++
255
+				goto inserted
256
+			}
257
+			r += c.Width
258
+		}
259
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
260
+		i++
261
+	inserted:
262
+		s.n += sample.Width
263
+		r += sample.Width
264
+	}
265
+	s.compress()
266
+}
267
+
268
+func (s *stream) count() int {
269
+	return int(s.n)
270
+}
271
+
272
+func (s *stream) query(q float64) float64 {
273
+	t := math.Ceil(q * s.n)
274
+	t += math.Ceil(s.ƒ(s, t) / 2)
275
+	p := s.l[0]
276
+	var r float64
277
+	for _, c := range s.l[1:] {
278
+		r += p.Width
279
+		if r+c.Width+c.Delta > t {
280
+			return p.Value
281
+		}
282
+		p = c
283
+	}
284
+	return p.Value
285
+}
286
+
287
+func (s *stream) compress() {
288
+	if len(s.l) < 2 {
289
+		return
290
+	}
291
+	x := s.l[len(s.l)-1]
292
+	xi := len(s.l) - 1
293
+	r := s.n - 1 - x.Width
294
+
295
+	for i := len(s.l) - 2; i >= 0; i-- {
296
+		c := s.l[i]
297
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
298
+			x.Width += c.Width
299
+			s.l[xi] = x
300
+			// Remove element at i.
301
+			copy(s.l[i:], s.l[i+1:])
302
+			s.l = s.l[:len(s.l)-1]
303
+			xi -= 1
304
+		} else {
305
+			x = c
306
+			xi = i
307
+		}
308
+		r -= c.Width
309
+	}
310
+}
311
+
312
+func (s *stream) samples() Samples {
313
+	samples := make(Samples, len(s.l))
314
+	copy(samples, s.l)
315
+	return samples
316
+}

+ 22
- 0
vendor/github.com/cespare/xxhash/v2/LICENSE.txt View File

@@ -0,0 +1,22 @@
1
+Copyright (c) 2016 Caleb Spare
2
+
3
+MIT License
4
+
5
+Permission is hereby granted, free of charge, to any person obtaining
6
+a copy of this software and associated documentation files (the
7
+"Software"), to deal in the Software without restriction, including
8
+without limitation the rights to use, copy, modify, merge, publish,
9
+distribute, sublicense, and/or sell copies of the Software, and to
10
+permit persons to whom the Software is furnished to do so, subject to
11
+the following conditions:
12
+
13
+The above copyright notice and this permission notice shall be
14
+included in all copies or substantial portions of the Software.
15
+
16
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 72
- 0
vendor/github.com/cespare/xxhash/v2/README.md View File

@@ -0,0 +1,72 @@
1
+# xxhash
2
+
3
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
4
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
5
+
6
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
7
+high-quality hashing algorithm that is much faster than anything in the Go
8
+standard library.
9
+
10
+This package provides a straightforward API:
11
+
12
+```
13
+func Sum64(b []byte) uint64
14
+func Sum64String(s string) uint64
15
+type Digest struct{ ... }
16
+    func New() *Digest
17
+```
18
+
19
+The `Digest` type implements hash.Hash64. Its key methods are:
20
+
21
+```
22
+func (*Digest) Write([]byte) (int, error)
23
+func (*Digest) WriteString(string) (int, error)
24
+func (*Digest) Sum64() uint64
25
+```
26
+
27
+The package is written with optimized pure Go and also contains even faster
28
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
29
+opts into using the Go code even on those architectures.
30
+
31
+[xxHash]: http://cyan4973.github.io/xxHash/
32
+
33
+## Compatibility
34
+
35
+This package is in a module and the latest code is in version 2 of the module.
36
+You need a version of Go with at least "minimal module compatibility" to use
37
+github.com/cespare/xxhash/v2:
38
+
39
+* 1.9.7+ for Go 1.9
40
+* 1.10.3+ for Go 1.10
41
+* Go 1.11 or later
42
+
43
+I recommend using the latest release of Go.
44
+
45
+## Benchmarks
46
+
47
+Here are some quick benchmarks comparing the pure-Go and assembly
48
+implementations of Sum64.
49
+
50
+| input size | purego    | asm       |
51
+| ---------- | --------- | --------- |
52
+| 4 B        |  1.3 GB/s |  1.2 GB/s |
53
+| 16 B       |  2.9 GB/s |  3.5 GB/s |
54
+| 100 B      |  6.9 GB/s |  8.1 GB/s |
55
+| 4 KB       | 11.7 GB/s | 16.7 GB/s |
56
+| 10 MB      | 12.0 GB/s | 17.3 GB/s |
57
+
58
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
59
+CPU using the following commands under Go 1.19.2:
60
+
61
+```
62
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
63
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
64
+```
65
+
66
+## Projects using this package
67
+
68
+- [InfluxDB](https://github.com/influxdata/influxdb)
69
+- [Prometheus](https://github.com/prometheus/prometheus)
70
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
71
+- [FreeCache](https://github.com/coocood/freecache)
72
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)

+ 10
- 0
vendor/github.com/cespare/xxhash/v2/testall.sh View File

@@ -0,0 +1,10 @@
1
+#!/bin/bash
2
+set -eu -o pipefail
3
+
4
+# Small convenience script for running the tests with various combinations of
5
+# arch/tags. This assumes we're running on amd64 and have qemu available.
6
+
7
+go test ./...
8
+go test -tags purego ./...
9
+GOARCH=arm64 go test
10
+GOARCH=arm64 go test -tags purego

+ 228
- 0
vendor/github.com/cespare/xxhash/v2/xxhash.go View File

@@ -0,0 +1,228 @@
1
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
2
+// at http://cyan4973.github.io/xxHash/.
3
+package xxhash
4
+
5
+import (
6
+	"encoding/binary"
7
+	"errors"
8
+	"math/bits"
9
+)
10
+
11
+const (
12
+	prime1 uint64 = 11400714785074694791
13
+	prime2 uint64 = 14029467366897019727
14
+	prime3 uint64 = 1609587929392839161
15
+	prime4 uint64 = 9650029242287828579
16
+	prime5 uint64 = 2870177450012600261
17
+)
18
+
19
+// Store the primes in an array as well.
20
+//
21
+// The consts are used when possible in Go code to avoid MOVs but we need a
22
+// contiguous array of the assembly code.
23
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
24
+
25
+// Digest implements hash.Hash64.
26
+type Digest struct {
27
+	v1    uint64
28
+	v2    uint64
29
+	v3    uint64
30
+	v4    uint64
31
+	total uint64
32
+	mem   [32]byte
33
+	n     int // how much of mem is used
34
+}
35
+
36
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
37
+func New() *Digest {
38
+	var d Digest
39
+	d.Reset()
40
+	return &d
41
+}
42
+
43
+// Reset clears the Digest's state so that it can be reused.
44
+func (d *Digest) Reset() {
45
+	d.v1 = primes[0] + prime2
46
+	d.v2 = prime2
47
+	d.v3 = 0
48
+	d.v4 = -primes[0]
49
+	d.total = 0
50
+	d.n = 0
51
+}
52
+
53
+// Size always returns 8 bytes.
54
+func (d *Digest) Size() int { return 8 }
55
+
56
+// BlockSize always returns 32 bytes.
57
+func (d *Digest) BlockSize() int { return 32 }
58
+
59
+// Write adds more data to d. It always returns len(b), nil.
60
+func (d *Digest) Write(b []byte) (n int, err error) {
61
+	n = len(b)
62
+	d.total += uint64(n)
63
+
64
+	memleft := d.mem[d.n&(len(d.mem)-1):]
65
+
66
+	if d.n+n < 32 {
67
+		// This new data doesn't even fill the current block.
68
+		copy(memleft, b)
69
+		d.n += n
70
+		return
71
+	}
72
+
73
+	if d.n > 0 {
74
+		// Finish off the partial block.
75
+		c := copy(memleft, b)
76
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
77
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
78
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
79
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
80
+		b = b[c:]
81
+		d.n = 0
82
+	}
83
+
84
+	if len(b) >= 32 {
85
+		// One or more full blocks left.
86
+		nw := writeBlocks(d, b)
87
+		b = b[nw:]
88
+	}
89
+
90
+	// Store any remaining partial block.
91
+	copy(d.mem[:], b)
92
+	d.n = len(b)
93
+
94
+	return
95
+}
96
+
97
+// Sum appends the current hash to b and returns the resulting slice.
98
+func (d *Digest) Sum(b []byte) []byte {
99
+	s := d.Sum64()
100
+	return append(
101
+		b,
102
+		byte(s>>56),
103
+		byte(s>>48),
104
+		byte(s>>40),
105
+		byte(s>>32),
106
+		byte(s>>24),
107
+		byte(s>>16),
108
+		byte(s>>8),
109
+		byte(s),
110
+	)
111
+}
112
+
113
+// Sum64 returns the current hash.
114
+func (d *Digest) Sum64() uint64 {
115
+	var h uint64
116
+
117
+	if d.total >= 32 {
118
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
119
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
120
+		h = mergeRound(h, v1)
121
+		h = mergeRound(h, v2)
122
+		h = mergeRound(h, v3)
123
+		h = mergeRound(h, v4)
124
+	} else {
125
+		h = d.v3 + prime5
126
+	}
127
+
128
+	h += d.total
129
+
130
+	b := d.mem[:d.n&(len(d.mem)-1)]
131
+	for ; len(b) >= 8; b = b[8:] {
132
+		k1 := round(0, u64(b[:8]))
133
+		h ^= k1
134
+		h = rol27(h)*prime1 + prime4
135
+	}
136
+	if len(b) >= 4 {
137
+		h ^= uint64(u32(b[:4])) * prime1
138
+		h = rol23(h)*prime2 + prime3
139
+		b = b[4:]
140
+	}
141
+	for ; len(b) > 0; b = b[1:] {
142
+		h ^= uint64(b[0]) * prime5
143
+		h = rol11(h) * prime1
144
+	}
145
+
146
+	h ^= h >> 33
147
+	h *= prime2
148
+	h ^= h >> 29
149
+	h *= prime3
150
+	h ^= h >> 32
151
+
152
+	return h
153
+}
154
+
155
+const (
156
+	magic         = "xxh\x06"
157
+	marshaledSize = len(magic) + 8*5 + 32
158
+)
159
+
160
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
161
+func (d *Digest) MarshalBinary() ([]byte, error) {
162
+	b := make([]byte, 0, marshaledSize)
163
+	b = append(b, magic...)
164
+	b = appendUint64(b, d.v1)
165
+	b = appendUint64(b, d.v2)
166
+	b = appendUint64(b, d.v3)
167
+	b = appendUint64(b, d.v4)
168
+	b = appendUint64(b, d.total)
169
+	b = append(b, d.mem[:d.n]...)
170
+	b = b[:len(b)+len(d.mem)-d.n]
171
+	return b, nil
172
+}
173
+
174
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
175
+func (d *Digest) UnmarshalBinary(b []byte) error {
176
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
177
+		return errors.New("xxhash: invalid hash state identifier")
178
+	}
179
+	if len(b) != marshaledSize {
180
+		return errors.New("xxhash: invalid hash state size")
181
+	}
182
+	b = b[len(magic):]
183
+	b, d.v1 = consumeUint64(b)
184
+	b, d.v2 = consumeUint64(b)
185
+	b, d.v3 = consumeUint64(b)
186
+	b, d.v4 = consumeUint64(b)
187
+	b, d.total = consumeUint64(b)
188
+	copy(d.mem[:], b)
189
+	d.n = int(d.total % uint64(len(d.mem)))
190
+	return nil
191
+}
192
+
193
+func appendUint64(b []byte, x uint64) []byte {
194
+	var a [8]byte
195
+	binary.LittleEndian.PutUint64(a[:], x)
196
+	return append(b, a[:]...)
197
+}
198
+
199
+func consumeUint64(b []byte) ([]byte, uint64) {
200
+	x := u64(b)
201
+	return b[8:], x
202
+}
203
+
204
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
205
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
206
+
207
+func round(acc, input uint64) uint64 {
208
+	acc += input * prime2
209
+	acc = rol31(acc)
210
+	acc *= prime1
211
+	return acc
212
+}
213
+
214
+func mergeRound(acc, val uint64) uint64 {
215
+	val = round(0, val)
216
+	acc ^= val
217
+	acc = acc*prime1 + prime4
218
+	return acc
219
+}
220
+
221
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
222
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
223
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
224
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
225
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
226
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
227
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
228
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

+ 209
- 0
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s View File

@@ -0,0 +1,209 @@
1
+//go:build !appengine && gc && !purego
2
+// +build !appengine
3
+// +build gc
4
+// +build !purego
5
+
6
+#include "textflag.h"
7
+
8
+// Registers:
9
+#define h      AX
10
+#define d      AX
11
+#define p      SI // pointer to advance through b
12
+#define n      DX
13
+#define end    BX // loop end
14
+#define v1     R8
15
+#define v2     R9
16
+#define v3     R10
17
+#define v4     R11
18
+#define x      R12
19
+#define prime1 R13
20
+#define prime2 R14
21
+#define prime4 DI
22
+
23
+#define round(acc, x) \
24
+	IMULQ prime2, x   \
25
+	ADDQ  x, acc      \
26
+	ROLQ  $31, acc    \
27
+	IMULQ prime1, acc
28
+
29
+// round0 performs the operation x = round(0, x).
30
+#define round0(x) \
31
+	IMULQ prime2, x \
32
+	ROLQ  $31, x    \
33
+	IMULQ prime1, x
34
+
35
+// mergeRound applies a merge round on the two registers acc and x.
36
+// It assumes that prime1, prime2, and prime4 have been loaded.
37
+#define mergeRound(acc, x) \
38
+	round0(x)         \
39
+	XORQ  x, acc      \
40
+	IMULQ prime1, acc \
41
+	ADDQ  prime4, acc
42
+
43
+// blockLoop processes as many 32-byte blocks as possible,
44
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
45
+// to process.
46
+#define blockLoop() \
47
+loop:  \
48
+	MOVQ +0(p), x  \
49
+	round(v1, x)   \
50
+	MOVQ +8(p), x  \
51
+	round(v2, x)   \
52
+	MOVQ +16(p), x \
53
+	round(v3, x)   \
54
+	MOVQ +24(p), x \
55
+	round(v4, x)   \
56
+	ADDQ $32, p    \
57
+	CMPQ p, end    \
58
+	JLE  loop
59
+
60
+// func Sum64(b []byte) uint64
61
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
62
+	// Load fixed primes.
63
+	MOVQ ·primes+0(SB), prime1
64
+	MOVQ ·primes+8(SB), prime2
65
+	MOVQ ·primes+24(SB), prime4
66
+
67
+	// Load slice.
68
+	MOVQ b_base+0(FP), p
69
+	MOVQ b_len+8(FP), n
70
+	LEAQ (p)(n*1), end
71
+
72
+	// The first loop limit will be len(b)-32.
73
+	SUBQ $32, end
74
+
75
+	// Check whether we have at least one block.
76
+	CMPQ n, $32
77
+	JLT  noBlocks
78
+
79
+	// Set up initial state (v1, v2, v3, v4).
80
+	MOVQ prime1, v1
81
+	ADDQ prime2, v1
82
+	MOVQ prime2, v2
83
+	XORQ v3, v3
84
+	XORQ v4, v4
85
+	SUBQ prime1, v4
86
+
87
+	blockLoop()
88
+
89
+	MOVQ v1, h
90
+	ROLQ $1, h
91
+	MOVQ v2, x
92
+	ROLQ $7, x
93
+	ADDQ x, h
94
+	MOVQ v3, x
95
+	ROLQ $12, x
96
+	ADDQ x, h
97
+	MOVQ v4, x
98
+	ROLQ $18, x
99
+	ADDQ x, h
100
+
101
+	mergeRound(h, v1)
102
+	mergeRound(h, v2)
103
+	mergeRound(h, v3)
104
+	mergeRound(h, v4)
105
+
106
+	JMP afterBlocks
107
+
108
+noBlocks:
109
+	MOVQ ·primes+32(SB), h
110
+
111
+afterBlocks:
112
+	ADDQ n, h
113
+
114
+	ADDQ $24, end
115
+	CMPQ p, end
116
+	JG   try4
117
+
118
+loop8:
119
+	MOVQ  (p), x
120
+	ADDQ  $8, p
121
+	round0(x)
122
+	XORQ  x, h
123
+	ROLQ  $27, h
124
+	IMULQ prime1, h
125
+	ADDQ  prime4, h
126
+
127
+	CMPQ p, end
128
+	JLE  loop8
129
+
130
+try4:
131
+	ADDQ $4, end
132
+	CMPQ p, end
133
+	JG   try1
134
+
135
+	MOVL  (p), x
136
+	ADDQ  $4, p
137
+	IMULQ prime1, x
138
+	XORQ  x, h
139
+
140
+	ROLQ  $23, h
141
+	IMULQ prime2, h
142
+	ADDQ  ·primes+16(SB), h
143
+
144
+try1:
145
+	ADDQ $4, end
146
+	CMPQ p, end
147
+	JGE  finalize
148
+
149
+loop1:
150
+	MOVBQZX (p), x
151
+	ADDQ    $1, p
152
+	IMULQ   ·primes+32(SB), x
153
+	XORQ    x, h
154
+	ROLQ    $11, h
155
+	IMULQ   prime1, h
156
+
157
+	CMPQ p, end
158
+	JL   loop1
159
+
160
+finalize:
161
+	MOVQ  h, x
162
+	SHRQ  $33, x
163
+	XORQ  x, h
164
+	IMULQ prime2, h
165
+	MOVQ  h, x
166
+	SHRQ  $29, x
167
+	XORQ  x, h
168
+	IMULQ ·primes+16(SB), h
169
+	MOVQ  h, x
170
+	SHRQ  $32, x
171
+	XORQ  x, h
172
+
173
+	MOVQ h, ret+24(FP)
174
+	RET
175
+
176
+// func writeBlocks(d *Digest, b []byte) int
177
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
178
+	// Load fixed primes needed for round.
179
+	MOVQ ·primes+0(SB), prime1
180
+	MOVQ ·primes+8(SB), prime2
181
+
182
+	// Load slice.
183
+	MOVQ b_base+8(FP), p
184
+	MOVQ b_len+16(FP), n
185
+	LEAQ (p)(n*1), end
186
+	SUBQ $32, end
187
+
188
+	// Load vN from d.
189
+	MOVQ s+0(FP), d
190
+	MOVQ 0(d), v1
191
+	MOVQ 8(d), v2
192
+	MOVQ 16(d), v3
193
+	MOVQ 24(d), v4
194
+
195
+	// We don't need to check the loop condition here; this function is
196
+	// always called with at least one block of data to process.
197
+	blockLoop()
198
+
199
+	// Copy vN back to d.
200
+	MOVQ v1, 0(d)
201
+	MOVQ v2, 8(d)
202
+	MOVQ v3, 16(d)
203
+	MOVQ v4, 24(d)
204
+
205
+	// The number of bytes written is p minus the old base pointer.
206
+	SUBQ b_base+8(FP), p
207
+	MOVQ p, ret+32(FP)
208
+
209
+	RET

+ 183
- 0
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s View File

@@ -0,0 +1,183 @@
1
+//go:build !appengine && gc && !purego
2
+// +build !appengine
3
+// +build gc
4
+// +build !purego
5
+
6
+#include "textflag.h"
7
+
8
+// Registers:
9
+#define digest	R1
10
+#define h	R2 // return value
11
+#define p	R3 // input pointer
12
+#define n	R4 // input length
13
+#define nblocks	R5 // n / 32
14
+#define prime1	R7
15
+#define prime2	R8
16
+#define prime3	R9
17
+#define prime4	R10
18
+#define prime5	R11
19
+#define v1	R12
20
+#define v2	R13
21
+#define v3	R14
22
+#define v4	R15
23
+#define x1	R20
24
+#define x2	R21
25
+#define x3	R22
26
+#define x4	R23
27
+
28
+#define round(acc, x) \
29
+	MADD prime2, acc, x, acc \
30
+	ROR  $64-31, acc         \
31
+	MUL  prime1, acc
32
+
33
+// round0 performs the operation x = round(0, x).
34
+#define round0(x) \
35
+	MUL prime2, x \
36
+	ROR $64-31, x \
37
+	MUL prime1, x
38
+
39
+#define mergeRound(acc, x) \
40
+	round0(x)                     \
41
+	EOR  x, acc                   \
42
+	MADD acc, prime4, prime1, acc
43
+
44
+// blockLoop processes as many 32-byte blocks as possible,
45
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
46
+#define blockLoop() \
47
+	LSR     $5, n, nblocks  \
48
+	PCALIGN $16             \
49
+	loop:                   \
50
+	LDP.P   16(p), (x1, x2) \
51
+	LDP.P   16(p), (x3, x4) \
52
+	round(v1, x1)           \
53
+	round(v2, x2)           \
54
+	round(v3, x3)           \
55
+	round(v4, x4)           \
56
+	SUB     $1, nblocks     \
57
+	CBNZ    nblocks, loop
58
+
59
+// func Sum64(b []byte) uint64
60
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
61
+	LDP b_base+0(FP), (p, n)
62
+
63
+	LDP  ·primes+0(SB), (prime1, prime2)
64
+	LDP  ·primes+16(SB), (prime3, prime4)
65
+	MOVD ·primes+32(SB), prime5
66
+
67
+	CMP  $32, n
68
+	CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
69
+	BLT  afterLoop
70
+
71
+	ADD  prime1, prime2, v1
72
+	MOVD prime2, v2
73
+	MOVD $0, v3
74
+	NEG  prime1, v4
75
+
76
+	blockLoop()
77
+
78
+	ROR $64-1, v1, x1
79
+	ROR $64-7, v2, x2
80
+	ADD x1, x2
81
+	ROR $64-12, v3, x3
82
+	ROR $64-18, v4, x4
83
+	ADD x3, x4
84
+	ADD x2, x4, h
85
+
86
+	mergeRound(h, v1)
87
+	mergeRound(h, v2)
88
+	mergeRound(h, v3)
89
+	mergeRound(h, v4)
90
+
91
+afterLoop:
92
+	ADD n, h
93
+
94
+	TBZ   $4, n, try8
95
+	LDP.P 16(p), (x1, x2)
96
+
97
+	round0(x1)
98
+
99
+	// NOTE: here and below, sequencing the EOR after the ROR (using a
100
+	// rotated register) is worth a small but measurable speedup for small
101
+	// inputs.
102
+	ROR  $64-27, h
103
+	EOR  x1 @> 64-27, h, h
104
+	MADD h, prime4, prime1, h
105
+
106
+	round0(x2)
107
+	ROR  $64-27, h
108
+	EOR  x2 @> 64-27, h, h
109
+	MADD h, prime4, prime1, h
110
+
111
+try8:
112
+	TBZ    $3, n, try4
113
+	MOVD.P 8(p), x1
114
+
115
+	round0(x1)
116
+	ROR  $64-27, h
117
+	EOR  x1 @> 64-27, h, h
118
+	MADD h, prime4, prime1, h
119
+
120
+try4:
121
+	TBZ     $2, n, try2
122
+	MOVWU.P 4(p), x2
123
+
124
+	MUL  prime1, x2
125
+	ROR  $64-23, h
126
+	EOR  x2 @> 64-23, h, h
127
+	MADD h, prime3, prime2, h
128
+
129
+try2:
130
+	TBZ     $1, n, try1
131
+	MOVHU.P 2(p), x3
132
+	AND     $255, x3, x1
133
+	LSR     $8, x3, x2
134
+
135
+	MUL prime5, x1
136
+	ROR $64-11, h
137
+	EOR x1 @> 64-11, h, h
138
+	MUL prime1, h
139
+
140
+	MUL prime5, x2
141
+	ROR $64-11, h
142
+	EOR x2 @> 64-11, h, h
143
+	MUL prime1, h
144
+
145
+try1:
146
+	TBZ   $0, n, finalize
147
+	MOVBU (p), x4
148
+
149
+	MUL prime5, x4
150
+	ROR $64-11, h
151
+	EOR x4 @> 64-11, h, h
152
+	MUL prime1, h
153
+
154
+finalize:
155
+	EOR h >> 33, h
156
+	MUL prime2, h
157
+	EOR h >> 29, h
158
+	MUL prime3, h
159
+	EOR h >> 32, h
160
+
161
+	MOVD h, ret+24(FP)
162
+	RET
163
+
164
+// func writeBlocks(d *Digest, b []byte) int
165
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
166
+	LDP ·primes+0(SB), (prime1, prime2)
167
+
168
+	// Load state. Assume v[1-4] are stored contiguously.
169
+	MOVD d+0(FP), digest
170
+	LDP  0(digest), (v1, v2)
171
+	LDP  16(digest), (v3, v4)
172
+
173
+	LDP b_base+8(FP), (p, n)
174
+
175
+	blockLoop()
176
+
177
+	// Store updated state.
178
+	STP (v1, v2), 0(digest)
179
+	STP (v3, v4), 16(digest)
180
+
181
+	BIC  $31, n
182
+	MOVD n, ret+32(FP)
183
+	RET

+ 15
- 0
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go View File

@@ -0,0 +1,15 @@
1
+//go:build (amd64 || arm64) && !appengine && gc && !purego
2
+// +build amd64 arm64
3
+// +build !appengine
4
+// +build gc
5
+// +build !purego
6
+
7
+package xxhash
8
+
9
+// Sum64 computes the 64-bit xxHash digest of b.
10
+//
11
+//go:noescape
12
+func Sum64(b []byte) uint64
13
+
14
+//go:noescape
15
+func writeBlocks(d *Digest, b []byte) int

+ 76
- 0
vendor/github.com/cespare/xxhash/v2/xxhash_other.go View File

@@ -0,0 +1,76 @@
1
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
2
+// +build !amd64,!arm64 appengine !gc purego
3
+
4
+package xxhash
5
+
6
+// Sum64 computes the 64-bit xxHash digest of b.
7
+func Sum64(b []byte) uint64 {
8
+	// A simpler version would be
9
+	//   d := New()
10
+	//   d.Write(b)
11
+	//   return d.Sum64()
12
+	// but this is faster, particularly for small inputs.
13
+
14
+	n := len(b)
15
+	var h uint64
16
+
17
+	if n >= 32 {
18
+		v1 := primes[0] + prime2
19
+		v2 := prime2
20
+		v3 := uint64(0)
21
+		v4 := -primes[0]
22
+		for len(b) >= 32 {
23
+			v1 = round(v1, u64(b[0:8:len(b)]))
24
+			v2 = round(v2, u64(b[8:16:len(b)]))
25
+			v3 = round(v3, u64(b[16:24:len(b)]))
26
+			v4 = round(v4, u64(b[24:32:len(b)]))
27
+			b = b[32:len(b):len(b)]
28
+		}
29
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
30
+		h = mergeRound(h, v1)
31
+		h = mergeRound(h, v2)
32
+		h = mergeRound(h, v3)
33
+		h = mergeRound(h, v4)
34
+	} else {
35
+		h = prime5
36
+	}
37
+
38
+	h += uint64(n)
39
+
40
+	for ; len(b) >= 8; b = b[8:] {
41
+		k1 := round(0, u64(b[:8]))
42
+		h ^= k1
43
+		h = rol27(h)*prime1 + prime4
44
+	}
45
+	if len(b) >= 4 {
46
+		h ^= uint64(u32(b[:4])) * prime1
47
+		h = rol23(h)*prime2 + prime3
48
+		b = b[4:]
49
+	}
50
+	for ; len(b) > 0; b = b[1:] {
51
+		h ^= uint64(b[0]) * prime5
52
+		h = rol11(h) * prime1
53
+	}
54
+
55
+	h ^= h >> 33
56
+	h *= prime2
57
+	h ^= h >> 29
58
+	h *= prime3
59
+	h ^= h >> 32
60
+
61
+	return h
62
+}
63
+
64
+func writeBlocks(d *Digest, b []byte) int {
65
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
66
+	n := len(b)
67
+	for len(b) >= 32 {
68
+		v1 = round(v1, u64(b[0:8:len(b)]))
69
+		v2 = round(v2, u64(b[8:16:len(b)]))
70
+		v3 = round(v3, u64(b[16:24:len(b)]))
71
+		v4 = round(v4, u64(b[24:32:len(b)]))
72
+		b = b[32:len(b):len(b)]
73
+	}
74
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
75
+	return n - len(b)
76
+}

+ 16
- 0
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go View File

@@ -0,0 +1,16 @@
1
+//go:build appengine
2
+// +build appengine
3
+
4
+// This file contains the safe implementations of otherwise unsafe-using code.
5
+
6
+package xxhash
7
+
8
+// Sum64String computes the 64-bit xxHash digest of s.
9
+func Sum64String(s string) uint64 {
10
+	return Sum64([]byte(s))
11
+}
12
+
13
+// WriteString adds more data to d. It always returns len(s), nil.
14
+func (d *Digest) WriteString(s string) (n int, err error) {
15
+	return d.Write([]byte(s))
16
+}

+ 58
- 0
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go View File

@@ -0,0 +1,58 @@
1
+//go:build !appengine
2
+// +build !appengine
3
+
4
+// This file encapsulates usage of unsafe.
5
+// xxhash_safe.go contains the safe implementations.
6
+
7
+package xxhash
8
+
9
+import (
10
+	"unsafe"
11
+)
12
+
13
+// In the future it's possible that compiler optimizations will make these
14
+// XxxString functions unnecessary by realizing that calls such as
15
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
16
+// If that happens, even if we keep these functions they can be replaced with
17
+// the trivial safe code.
18
+
19
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
20
+//
21
+//   var b []byte
22
+//   bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
23
+//   bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
24
+//   bh.Len = len(s)
25
+//   bh.Cap = len(s)
26
+//
27
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
28
+// weight to this sequence of expressions that any function that uses it will
29
+// not be inlined. Instead, the functions below use a different unsafe
30
+// conversion designed to minimize the inliner weight and allow both to be
31
+// inlined. There is also a test (TestInlining) which verifies that these are
32
+// inlined.
33
+//
34
+// See https://github.com/golang/go/issues/42739 for discussion.
35
+
36
+// Sum64String computes the 64-bit xxHash digest of s.
37
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
38
+func Sum64String(s string) uint64 {
39
+	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
40
+	return Sum64(b)
41
+}
42
+
43
+// WriteString adds more data to d. It always returns len(s), nil.
44
+// It may be faster than Write([]byte(s)) by avoiding a copy.
45
+func (d *Digest) WriteString(s string) (n int, err error) {
46
+	d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
47
+	// d.Write always returns len(s), nil.
48
+	// Ignoring the return output and returning these fixed values buys a
49
+	// savings of 6 in the inliner's cost model.
50
+	return len(s), nil
51
+}
52
+
53
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
54
+// of the first two words is the same as the layout of a string.
55
+type sliceHeader struct {
56
+	s   string
57
+	cap int
58
+}

+ 3
- 0
vendor/github.com/golang/protobuf/AUTHORS View File

@@ -0,0 +1,3 @@
1
+# This source code refers to The Go Authors for copyright purposes.
2
+# The master list of authors is in the main Go distribution,
3
+# visible at http://tip.golang.org/AUTHORS.

+ 3
- 0
vendor/github.com/golang/protobuf/CONTRIBUTORS View File

@@ -0,0 +1,3 @@
1
+# This source code was written by the Go contributors.
2
+# The master list of contributors is in the main Go distribution,
3
+# visible at http://tip.golang.org/CONTRIBUTORS.

+ 28
- 0
vendor/github.com/golang/protobuf/LICENSE View File

@@ -0,0 +1,28 @@
1
+Copyright 2010 The Go Authors.  All rights reserved.
2
+
3
+Redistribution and use in source and binary forms, with or without
4
+modification, are permitted provided that the following conditions are
5
+met:
6
+
7
+    * Redistributions of source code must retain the above copyright
8
+notice, this list of conditions and the following disclaimer.
9
+    * Redistributions in binary form must reproduce the above
10
+copyright notice, this list of conditions and the following disclaimer
11
+in the documentation and/or other materials provided with the
12
+distribution.
13
+    * Neither the name of Google Inc. nor the names of its
14
+contributors may be used to endorse or promote products derived from
15
+this software without specific prior written permission.
16
+
17
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+

+ 324
- 0
vendor/github.com/golang/protobuf/proto/buffer.go View File

@@ -0,0 +1,324 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"errors"
9
+	"fmt"
10
+
11
+	"google.golang.org/protobuf/encoding/prototext"
12
+	"google.golang.org/protobuf/encoding/protowire"
13
+	"google.golang.org/protobuf/runtime/protoimpl"
14
+)
15
+
16
+const (
17
+	WireVarint     = 0
18
+	WireFixed32    = 5
19
+	WireFixed64    = 1
20
+	WireBytes      = 2
21
+	WireStartGroup = 3
22
+	WireEndGroup   = 4
23
+)
24
+
25
+// EncodeVarint returns the varint encoded bytes of v.
26
+func EncodeVarint(v uint64) []byte {
27
+	return protowire.AppendVarint(nil, v)
28
+}
29
+
30
+// SizeVarint returns the length of the varint encoded bytes of v.
31
+// This is equal to len(EncodeVarint(v)).
32
+func SizeVarint(v uint64) int {
33
+	return protowire.SizeVarint(v)
34
+}
35
+
36
+// DecodeVarint parses a varint encoded integer from b,
37
+// returning the integer value and the length of the varint.
38
+// It returns (0, 0) if there is a parse error.
39
+func DecodeVarint(b []byte) (uint64, int) {
40
+	v, n := protowire.ConsumeVarint(b)
41
+	if n < 0 {
42
+		return 0, 0
43
+	}
44
+	return v, n
45
+}
46
+
47
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
48
+// It may be reused between invocations to reduce memory usage.
49
+type Buffer struct {
50
+	buf           []byte
51
+	idx           int
52
+	deterministic bool
53
+}
54
+
55
+// NewBuffer allocates a new Buffer initialized with buf,
56
+// where the contents of buf are considered the unread portion of the buffer.
57
+func NewBuffer(buf []byte) *Buffer {
58
+	return &Buffer{buf: buf}
59
+}
60
+
61
+// SetDeterministic specifies whether to use deterministic serialization.
62
+//
63
+// Deterministic serialization guarantees that for a given binary, equal
64
+// messages will always be serialized to the same bytes. This implies:
65
+//
66
+//   - Repeated serialization of a message will return the same bytes.
67
+//   - Different processes of the same binary (which may be executing on
68
+//     different machines) will serialize equal messages to the same bytes.
69
+//
70
+// Note that the deterministic serialization is NOT canonical across
71
+// languages. It is not guaranteed to remain stable over time. It is unstable
72
+// across different builds with schema changes due to unknown fields.
73
+// Users who need canonical serialization (e.g., persistent storage in a
74
+// canonical form, fingerprinting, etc.) should define their own
75
+// canonicalization specification and implement their own serializer rather
76
+// than relying on this API.
77
+//
78
+// If deterministic serialization is requested, map entries will be sorted
79
+// by keys in lexographical order. This is an implementation detail and
80
+// subject to change.
81
+func (b *Buffer) SetDeterministic(deterministic bool) {
82
+	b.deterministic = deterministic
83
+}
84
+
85
+// SetBuf sets buf as the internal buffer,
86
+// where the contents of buf are considered the unread portion of the buffer.
87
+func (b *Buffer) SetBuf(buf []byte) {
88
+	b.buf = buf
89
+	b.idx = 0
90
+}
91
+
92
+// Reset clears the internal buffer of all written and unread data.
93
+func (b *Buffer) Reset() {
94
+	b.buf = b.buf[:0]
95
+	b.idx = 0
96
+}
97
+
98
+// Bytes returns the internal buffer.
99
+func (b *Buffer) Bytes() []byte {
100
+	return b.buf
101
+}
102
+
103
+// Unread returns the unread portion of the buffer.
104
+func (b *Buffer) Unread() []byte {
105
+	return b.buf[b.idx:]
106
+}
107
+
108
+// Marshal appends the wire-format encoding of m to the buffer.
109
+func (b *Buffer) Marshal(m Message) error {
110
+	var err error
111
+	b.buf, err = marshalAppend(b.buf, m, b.deterministic)
112
+	return err
113
+}
114
+
115
+// Unmarshal parses the wire-format message in the buffer and
116
+// places the decoded results in m.
117
+// It does not reset m before unmarshaling.
118
+func (b *Buffer) Unmarshal(m Message) error {
119
+	err := UnmarshalMerge(b.Unread(), m)
120
+	b.idx = len(b.buf)
121
+	return err
122
+}
123
+
124
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
125
+
126
+func (m *unknownFields) String() string { panic("not implemented") }
127
+func (m *unknownFields) Reset()         { panic("not implemented") }
128
+func (m *unknownFields) ProtoMessage()  { panic("not implemented") }
129
+
130
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
131
+// to stdout. This is only intended for debugging.
132
+func (*Buffer) DebugPrint(s string, b []byte) {
133
+	m := MessageReflect(new(unknownFields))
134
+	m.SetUnknown(b)
135
+	b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
136
+	fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
137
+}
138
+
139
+// EncodeVarint appends an unsigned varint encoding to the buffer.
140
+func (b *Buffer) EncodeVarint(v uint64) error {
141
+	b.buf = protowire.AppendVarint(b.buf, v)
142
+	return nil
143
+}
144
+
145
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
146
+func (b *Buffer) EncodeZigzag32(v uint64) error {
147
+	return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
148
+}
149
+
150
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
151
+func (b *Buffer) EncodeZigzag64(v uint64) error {
152
+	return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
153
+}
154
+
155
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
156
+func (b *Buffer) EncodeFixed32(v uint64) error {
157
+	b.buf = protowire.AppendFixed32(b.buf, uint32(v))
158
+	return nil
159
+}
160
+
161
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
162
+func (b *Buffer) EncodeFixed64(v uint64) error {
163
+	b.buf = protowire.AppendFixed64(b.buf, uint64(v))
164
+	return nil
165
+}
166
+
167
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
168
+func (b *Buffer) EncodeRawBytes(v []byte) error {
169
+	b.buf = protowire.AppendBytes(b.buf, v)
170
+	return nil
171
+}
172
+
173
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
174
+// It does not validate whether v contains valid UTF-8.
175
+func (b *Buffer) EncodeStringBytes(v string) error {
176
+	b.buf = protowire.AppendString(b.buf, v)
177
+	return nil
178
+}
179
+
180
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
181
+func (b *Buffer) EncodeMessage(m Message) error {
182
+	var err error
183
+	b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
184
+	b.buf, err = marshalAppend(b.buf, m, b.deterministic)
185
+	return err
186
+}
187
+
188
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
189
+func (b *Buffer) DecodeVarint() (uint64, error) {
190
+	v, n := protowire.ConsumeVarint(b.buf[b.idx:])
191
+	if n < 0 {
192
+		return 0, protowire.ParseError(n)
193
+	}
194
+	b.idx += n
195
+	return uint64(v), nil
196
+}
197
+
198
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
199
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
200
+	v, err := b.DecodeVarint()
201
+	if err != nil {
202
+		return 0, err
203
+	}
204
+	return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
205
+}
206
+
207
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
208
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
209
+	v, err := b.DecodeVarint()
210
+	if err != nil {
211
+		return 0, err
212
+	}
213
+	return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
214
+}
215
+
216
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
217
+func (b *Buffer) DecodeFixed32() (uint64, error) {
218
+	v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
219
+	if n < 0 {
220
+		return 0, protowire.ParseError(n)
221
+	}
222
+	b.idx += n
223
+	return uint64(v), nil
224
+}
225
+
226
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
227
+func (b *Buffer) DecodeFixed64() (uint64, error) {
228
+	v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
229
+	if n < 0 {
230
+		return 0, protowire.ParseError(n)
231
+	}
232
+	b.idx += n
233
+	return uint64(v), nil
234
+}
235
+
236
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
237
+// If alloc is specified, it returns a copy the raw bytes
238
+// rather than a sub-slice of the buffer.
239
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
240
+	v, n := protowire.ConsumeBytes(b.buf[b.idx:])
241
+	if n < 0 {
242
+		return nil, protowire.ParseError(n)
243
+	}
244
+	b.idx += n
245
+	if alloc {
246
+		v = append([]byte(nil), v...)
247
+	}
248
+	return v, nil
249
+}
250
+
251
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
252
+// It does not validate whether the raw bytes contain valid UTF-8.
253
+func (b *Buffer) DecodeStringBytes() (string, error) {
254
+	v, n := protowire.ConsumeString(b.buf[b.idx:])
255
+	if n < 0 {
256
+		return "", protowire.ParseError(n)
257
+	}
258
+	b.idx += n
259
+	return v, nil
260
+}
261
+
262
+// DecodeMessage consumes a length-prefixed message from the buffer.
263
+// It does not reset m before unmarshaling.
264
+func (b *Buffer) DecodeMessage(m Message) error {
265
+	v, err := b.DecodeRawBytes(false)
266
+	if err != nil {
267
+		return err
268
+	}
269
+	return UnmarshalMerge(v, m)
270
+}
271
+
272
+// DecodeGroup consumes a message group from the buffer.
273
+// It assumes that the start group marker has already been consumed and
274
+// consumes all bytes until (and including the end group marker).
275
+// It does not reset m before unmarshaling.
276
+func (b *Buffer) DecodeGroup(m Message) error {
277
+	v, n, err := consumeGroup(b.buf[b.idx:])
278
+	if err != nil {
279
+		return err
280
+	}
281
+	b.idx += n
282
+	return UnmarshalMerge(v, m)
283
+}
284
+
285
+// consumeGroup parses b until it finds an end group marker, returning
286
+// the raw bytes of the message (excluding the end group marker) and the
287
+// the total length of the message (including the end group marker).
288
+func consumeGroup(b []byte) ([]byte, int, error) {
289
+	b0 := b
290
+	depth := 1 // assume this follows a start group marker
291
+	for {
292
+		_, wtyp, tagLen := protowire.ConsumeTag(b)
293
+		if tagLen < 0 {
294
+			return nil, 0, protowire.ParseError(tagLen)
295
+		}
296
+		b = b[tagLen:]
297
+
298
+		var valLen int
299
+		switch wtyp {
300
+		case protowire.VarintType:
301
+			_, valLen = protowire.ConsumeVarint(b)
302
+		case protowire.Fixed32Type:
303
+			_, valLen = protowire.ConsumeFixed32(b)
304
+		case protowire.Fixed64Type:
305
+			_, valLen = protowire.ConsumeFixed64(b)
306
+		case protowire.BytesType:
307
+			_, valLen = protowire.ConsumeBytes(b)
308
+		case protowire.StartGroupType:
309
+			depth++
310
+		case protowire.EndGroupType:
311
+			depth--
312
+		default:
313
+			return nil, 0, errors.New("proto: cannot parse reserved wire type")
314
+		}
315
+		if valLen < 0 {
316
+			return nil, 0, protowire.ParseError(valLen)
317
+		}
318
+		b = b[valLen:]
319
+
320
+		if depth == 0 {
321
+			return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
322
+		}
323
+	}
324
+}

+ 63
- 0
vendor/github.com/golang/protobuf/proto/defaults.go View File

@@ -0,0 +1,63 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"google.golang.org/protobuf/reflect/protoreflect"
9
+)
10
+
11
+// SetDefaults sets unpopulated scalar fields to their default values.
12
+// Fields within a oneof are not set even if they have a default value.
13
+// SetDefaults is recursively called upon any populated message fields.
14
+func SetDefaults(m Message) {
15
+	if m != nil {
16
+		setDefaults(MessageReflect(m))
17
+	}
18
+}
19
+
20
+func setDefaults(m protoreflect.Message) {
21
+	fds := m.Descriptor().Fields()
22
+	for i := 0; i < fds.Len(); i++ {
23
+		fd := fds.Get(i)
24
+		if !m.Has(fd) {
25
+			if fd.HasDefault() && fd.ContainingOneof() == nil {
26
+				v := fd.Default()
27
+				if fd.Kind() == protoreflect.BytesKind {
28
+					v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
29
+				}
30
+				m.Set(fd, v)
31
+			}
32
+			continue
33
+		}
34
+	}
35
+
36
+	m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
37
+		switch {
38
+		// Handle singular message.
39
+		case fd.Cardinality() != protoreflect.Repeated:
40
+			if fd.Message() != nil {
41
+				setDefaults(m.Get(fd).Message())
42
+			}
43
+		// Handle list of messages.
44
+		case fd.IsList():
45
+			if fd.Message() != nil {
46
+				ls := m.Get(fd).List()
47
+				for i := 0; i < ls.Len(); i++ {
48
+					setDefaults(ls.Get(i).Message())
49
+				}
50
+			}
51
+		// Handle map of messages.
52
+		case fd.IsMap():
53
+			if fd.MapValue().Message() != nil {
54
+				ms := m.Get(fd).Map()
55
+				ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
56
+					setDefaults(v.Message())
57
+					return true
58
+				})
59
+			}
60
+		}
61
+		return true
62
+	})
63
+}

+ 113
- 0
vendor/github.com/golang/protobuf/proto/deprecated.go View File

@@ -0,0 +1,113 @@
1
+// Copyright 2018 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"encoding/json"
9
+	"errors"
10
+	"fmt"
11
+	"strconv"
12
+
13
+	protoV2 "google.golang.org/protobuf/proto"
14
+)
15
+
16
+var (
17
+	// Deprecated: No longer returned.
18
+	ErrNil = errors.New("proto: Marshal called with nil")
19
+
20
+	// Deprecated: No longer returned.
21
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
22
+
23
+	// Deprecated: No longer returned.
24
+	ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
25
+)
26
+
27
+// Deprecated: Do not use.
28
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
29
+
30
+// Deprecated: Do not use.
31
+func GetStats() Stats { return Stats{} }
32
+
33
+// Deprecated: Do not use.
34
+func MarshalMessageSet(interface{}) ([]byte, error) {
35
+	return nil, errors.New("proto: not implemented")
36
+}
37
+
38
+// Deprecated: Do not use.
39
+func UnmarshalMessageSet([]byte, interface{}) error {
40
+	return errors.New("proto: not implemented")
41
+}
42
+
43
+// Deprecated: Do not use.
44
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
45
+	return nil, errors.New("proto: not implemented")
46
+}
47
+
48
+// Deprecated: Do not use.
49
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
50
+	return errors.New("proto: not implemented")
51
+}
52
+
53
+// Deprecated: Do not use.
54
+func RegisterMessageSetType(Message, int32, string) {}
55
+
56
+// Deprecated: Do not use.
57
+func EnumName(m map[int32]string, v int32) string {
58
+	s, ok := m[v]
59
+	if ok {
60
+		return s
61
+	}
62
+	return strconv.Itoa(int(v))
63
+}
64
+
65
+// Deprecated: Do not use.
66
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
67
+	if data[0] == '"' {
68
+		// New style: enums are strings.
69
+		var repr string
70
+		if err := json.Unmarshal(data, &repr); err != nil {
71
+			return -1, err
72
+		}
73
+		val, ok := m[repr]
74
+		if !ok {
75
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
76
+		}
77
+		return val, nil
78
+	}
79
+	// Old style: enums are ints.
80
+	var val int32
81
+	if err := json.Unmarshal(data, &val); err != nil {
82
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
83
+	}
84
+	return val, nil
85
+}
86
+
87
+// Deprecated: Do not use; this type existed for intenal-use only.
88
+type InternalMessageInfo struct{}
89
+
90
+// Deprecated: Do not use; this method existed for intenal-use only.
91
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
92
+	DiscardUnknown(m)
93
+}
94
+
95
+// Deprecated: Do not use; this method existed for intenal-use only.
96
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
97
+	return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
98
+}
99
+
100
+// Deprecated: Do not use; this method existed for intenal-use only.
101
+func (*InternalMessageInfo) Merge(dst, src Message) {
102
+	protoV2.Merge(MessageV2(dst), MessageV2(src))
103
+}
104
+
105
+// Deprecated: Do not use; this method existed for intenal-use only.
106
+func (*InternalMessageInfo) Size(m Message) int {
107
+	return protoV2.Size(MessageV2(m))
108
+}
109
+
110
+// Deprecated: Do not use; this method existed for intenal-use only.
111
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
112
+	return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
113
+}

+ 58
- 0
vendor/github.com/golang/protobuf/proto/discard.go View File

@@ -0,0 +1,58 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"google.golang.org/protobuf/reflect/protoreflect"
9
+)
10
+
11
+// DiscardUnknown recursively discards all unknown fields from this message
12
+// and all embedded messages.
13
+//
14
+// When unmarshaling a message with unrecognized fields, the tags and values
15
+// of such fields are preserved in the Message. This allows a later call to
16
+// marshal to be able to produce a message that continues to have those
17
+// unrecognized fields. To avoid this, DiscardUnknown is used to
18
+// explicitly clear the unknown fields after unmarshaling.
19
+func DiscardUnknown(m Message) {
20
+	if m != nil {
21
+		discardUnknown(MessageReflect(m))
22
+	}
23
+}
24
+
25
+func discardUnknown(m protoreflect.Message) {
26
+	m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
27
+		switch {
28
+		// Handle singular message.
29
+		case fd.Cardinality() != protoreflect.Repeated:
30
+			if fd.Message() != nil {
31
+				discardUnknown(m.Get(fd).Message())
32
+			}
33
+		// Handle list of messages.
34
+		case fd.IsList():
35
+			if fd.Message() != nil {
36
+				ls := m.Get(fd).List()
37
+				for i := 0; i < ls.Len(); i++ {
38
+					discardUnknown(ls.Get(i).Message())
39
+				}
40
+			}
41
+		// Handle map of messages.
42
+		case fd.IsMap():
43
+			if fd.MapValue().Message() != nil {
44
+				ms := m.Get(fd).Map()
45
+				ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
46
+					discardUnknown(v.Message())
47
+					return true
48
+				})
49
+			}
50
+		}
51
+		return true
52
+	})
53
+
54
+	// Discard unknown fields.
55
+	if len(m.GetUnknown()) > 0 {
56
+		m.SetUnknown(nil)
57
+	}
58
+}

+ 356
- 0
vendor/github.com/golang/protobuf/proto/extensions.go View File

@@ -0,0 +1,356 @@
1
+// Copyright 2010 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"errors"
9
+	"fmt"
10
+	"reflect"
11
+
12
+	"google.golang.org/protobuf/encoding/protowire"
13
+	"google.golang.org/protobuf/proto"
14
+	"google.golang.org/protobuf/reflect/protoreflect"
15
+	"google.golang.org/protobuf/reflect/protoregistry"
16
+	"google.golang.org/protobuf/runtime/protoiface"
17
+	"google.golang.org/protobuf/runtime/protoimpl"
18
+)
19
+
20
+type (
21
+	// ExtensionDesc represents an extension descriptor and
22
+	// is used to interact with an extension field in a message.
23
+	//
24
+	// Variables of this type are generated in code by protoc-gen-go.
25
+	ExtensionDesc = protoimpl.ExtensionInfo
26
+
27
+	// ExtensionRange represents a range of message extensions.
28
+	// Used in code generated by protoc-gen-go.
29
+	ExtensionRange = protoiface.ExtensionRangeV1
30
+
31
+	// Deprecated: Do not use; this is an internal type.
32
+	Extension = protoimpl.ExtensionFieldV1
33
+
34
+	// Deprecated: Do not use; this is an internal type.
35
+	XXX_InternalExtensions = protoimpl.ExtensionFields
36
+)
37
+
38
+// ErrMissingExtension reports whether the extension was not present.
39
+var ErrMissingExtension = errors.New("proto: missing extension")
40
+
41
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
42
+
43
+// HasExtension reports whether the extension field is present in m
44
+// either as an explicitly populated field or as an unknown field.
45
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
46
+	mr := MessageReflect(m)
47
+	if mr == nil || !mr.IsValid() {
48
+		return false
49
+	}
50
+
51
+	// Check whether any populated known field matches the field number.
52
+	xtd := xt.TypeDescriptor()
53
+	if isValidExtension(mr.Descriptor(), xtd) {
54
+		has = mr.Has(xtd)
55
+	} else {
56
+		mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
57
+			has = int32(fd.Number()) == xt.Field
58
+			return !has
59
+		})
60
+	}
61
+
62
+	// Check whether any unknown field matches the field number.
63
+	for b := mr.GetUnknown(); !has && len(b) > 0; {
64
+		num, _, n := protowire.ConsumeField(b)
65
+		has = int32(num) == xt.Field
66
+		b = b[n:]
67
+	}
68
+	return has
69
+}
70
+
71
+// ClearExtension removes the extension field from m
72
+// either as an explicitly populated field or as an unknown field.
73
+func ClearExtension(m Message, xt *ExtensionDesc) {
74
+	mr := MessageReflect(m)
75
+	if mr == nil || !mr.IsValid() {
76
+		return
77
+	}
78
+
79
+	xtd := xt.TypeDescriptor()
80
+	if isValidExtension(mr.Descriptor(), xtd) {
81
+		mr.Clear(xtd)
82
+	} else {
83
+		mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
84
+			if int32(fd.Number()) == xt.Field {
85
+				mr.Clear(fd)
86
+				return false
87
+			}
88
+			return true
89
+		})
90
+	}
91
+	clearUnknown(mr, fieldNum(xt.Field))
92
+}
93
+
94
+// ClearAllExtensions clears all extensions from m.
95
+// This includes populated fields and unknown fields in the extension range.
96
+func ClearAllExtensions(m Message) {
97
+	mr := MessageReflect(m)
98
+	if mr == nil || !mr.IsValid() {
99
+		return
100
+	}
101
+
102
+	mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
103
+		if fd.IsExtension() {
104
+			mr.Clear(fd)
105
+		}
106
+		return true
107
+	})
108
+	clearUnknown(mr, mr.Descriptor().ExtensionRanges())
109
+}
110
+
111
+// GetExtension retrieves a proto2 extended field from m.
112
+//
113
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
114
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
115
+// If the field is not present, then the default value is returned (if one is specified),
116
+// otherwise ErrMissingExtension is reported.
117
+//
118
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
119
+// then GetExtension returns the raw encoded bytes for the extension field.
120
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
121
+	mr := MessageReflect(m)
122
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
123
+		return nil, errNotExtendable
124
+	}
125
+
126
+	// Retrieve the unknown fields for this extension field.
127
+	var bo protoreflect.RawFields
128
+	for bi := mr.GetUnknown(); len(bi) > 0; {
129
+		num, _, n := protowire.ConsumeField(bi)
130
+		if int32(num) == xt.Field {
131
+			bo = append(bo, bi[:n]...)
132
+		}
133
+		bi = bi[n:]
134
+	}
135
+
136
+	// For type incomplete descriptors, only retrieve the unknown fields.
137
+	if xt.ExtensionType == nil {
138
+		return []byte(bo), nil
139
+	}
140
+
141
+	// If the extension field only exists as unknown fields, unmarshal it.
142
+	// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
143
+	xtd := xt.TypeDescriptor()
144
+	if !isValidExtension(mr.Descriptor(), xtd) {
145
+		return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
146
+	}
147
+	if !mr.Has(xtd) && len(bo) > 0 {
148
+		m2 := mr.New()
149
+		if err := (proto.UnmarshalOptions{
150
+			Resolver: extensionResolver{xt},
151
+		}.Unmarshal(bo, m2.Interface())); err != nil {
152
+			return nil, err
153
+		}
154
+		if m2.Has(xtd) {
155
+			mr.Set(xtd, m2.Get(xtd))
156
+			clearUnknown(mr, fieldNum(xt.Field))
157
+		}
158
+	}
159
+
160
+	// Check whether the message has the extension field set or a default.
161
+	var pv protoreflect.Value
162
+	switch {
163
+	case mr.Has(xtd):
164
+		pv = mr.Get(xtd)
165
+	case xtd.HasDefault():
166
+		pv = xtd.Default()
167
+	default:
168
+		return nil, ErrMissingExtension
169
+	}
170
+
171
+	v := xt.InterfaceOf(pv)
172
+	rv := reflect.ValueOf(v)
173
+	if isScalarKind(rv.Kind()) {
174
+		rv2 := reflect.New(rv.Type())
175
+		rv2.Elem().Set(rv)
176
+		v = rv2.Interface()
177
+	}
178
+	return v, nil
179
+}
180
+
181
+// extensionResolver is a custom extension resolver that stores a single
182
+// extension type that takes precedence over the global registry.
183
+type extensionResolver struct{ xt protoreflect.ExtensionType }
184
+
185
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
186
+	if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
187
+		return r.xt, nil
188
+	}
189
+	return protoregistry.GlobalTypes.FindExtensionByName(field)
190
+}
191
+
192
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
193
+	if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
194
+		return r.xt, nil
195
+	}
196
+	return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
197
+}
198
+
199
+// GetExtensions returns a list of the extensions values present in m,
200
+// corresponding with the provided list of extension descriptors, xts.
201
+// If an extension is missing in m, the corresponding value is nil.
202
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
203
+	mr := MessageReflect(m)
204
+	if mr == nil || !mr.IsValid() {
205
+		return nil, errNotExtendable
206
+	}
207
+
208
+	vs := make([]interface{}, len(xts))
209
+	for i, xt := range xts {
210
+		v, err := GetExtension(m, xt)
211
+		if err != nil {
212
+			if err == ErrMissingExtension {
213
+				continue
214
+			}
215
+			return vs, err
216
+		}
217
+		vs[i] = v
218
+	}
219
+	return vs, nil
220
+}
221
+
222
+// SetExtension sets an extension field in m to the provided value.
223
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
224
+	mr := MessageReflect(m)
225
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
226
+		return errNotExtendable
227
+	}
228
+
229
+	rv := reflect.ValueOf(v)
230
+	if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
231
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
232
+	}
233
+	if rv.Kind() == reflect.Ptr {
234
+		if rv.IsNil() {
235
+			return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
236
+		}
237
+		if isScalarKind(rv.Elem().Kind()) {
238
+			v = rv.Elem().Interface()
239
+		}
240
+	}
241
+
242
+	xtd := xt.TypeDescriptor()
243
+	if !isValidExtension(mr.Descriptor(), xtd) {
244
+		return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
245
+	}
246
+	mr.Set(xtd, xt.ValueOf(v))
247
+	clearUnknown(mr, fieldNum(xt.Field))
248
+	return nil
249
+}
250
+
251
+// SetRawExtension inserts b into the unknown fields of m.
252
+//
253
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
254
+func SetRawExtension(m Message, fnum int32, b []byte) {
255
+	mr := MessageReflect(m)
256
+	if mr == nil || !mr.IsValid() {
257
+		return
258
+	}
259
+
260
+	// Verify that the raw field is valid.
261
+	for b0 := b; len(b0) > 0; {
262
+		num, _, n := protowire.ConsumeField(b0)
263
+		if int32(num) != fnum {
264
+			panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
265
+		}
266
+		b0 = b0[n:]
267
+	}
268
+
269
+	ClearExtension(m, &ExtensionDesc{Field: fnum})
270
+	mr.SetUnknown(append(mr.GetUnknown(), b...))
271
+}
272
+
273
+// ExtensionDescs returns a list of extension descriptors found in m,
274
+// containing descriptors for both populated extension fields in m and
275
+// also unknown fields of m that are in the extension range.
276
+// For the later case, an type incomplete descriptor is provided where only
277
+// the ExtensionDesc.Field field is populated.
278
+// The order of the extension descriptors is undefined.
279
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
280
+	mr := MessageReflect(m)
281
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
282
+		return nil, errNotExtendable
283
+	}
284
+
285
+	// Collect a set of known extension descriptors.
286
+	extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
287
+	mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
288
+		if fd.IsExtension() {
289
+			xt := fd.(protoreflect.ExtensionTypeDescriptor)
290
+			if xd, ok := xt.Type().(*ExtensionDesc); ok {
291
+				extDescs[fd.Number()] = xd
292
+			}
293
+		}
294
+		return true
295
+	})
296
+
297
+	// Collect a set of unknown extension descriptors.
298
+	extRanges := mr.Descriptor().ExtensionRanges()
299
+	for b := mr.GetUnknown(); len(b) > 0; {
300
+		num, _, n := protowire.ConsumeField(b)
301
+		if extRanges.Has(num) && extDescs[num] == nil {
302
+			extDescs[num] = nil
303
+		}
304
+		b = b[n:]
305
+	}
306
+
307
+	// Transpose the set of descriptors into a list.
308
+	var xts []*ExtensionDesc
309
+	for num, xt := range extDescs {
310
+		if xt == nil {
311
+			xt = &ExtensionDesc{Field: int32(num)}
312
+		}
313
+		xts = append(xts, xt)
314
+	}
315
+	return xts, nil
316
+}
317
+
318
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
319
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
320
+	return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
321
+}
322
+
323
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
324
+// This function exists for historical reasons since the representation of
325
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
326
+func isScalarKind(k reflect.Kind) bool {
327
+	switch k {
328
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
329
+		return true
330
+	default:
331
+		return false
332
+	}
333
+}
334
+
335
+// clearUnknown removes unknown fields from m where remover.Has reports true.
336
+func clearUnknown(m protoreflect.Message, remover interface {
337
+	Has(protoreflect.FieldNumber) bool
338
+}) {
339
+	var bo protoreflect.RawFields
340
+	for bi := m.GetUnknown(); len(bi) > 0; {
341
+		num, _, n := protowire.ConsumeField(bi)
342
+		if !remover.Has(num) {
343
+			bo = append(bo, bi[:n]...)
344
+		}
345
+		bi = bi[n:]
346
+	}
347
+	if bi := m.GetUnknown(); len(bi) != len(bo) {
348
+		m.SetUnknown(bo)
349
+	}
350
+}
351
+
352
+type fieldNum protoreflect.FieldNumber
353
+
354
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
355
+	return protoreflect.FieldNumber(n1) == n2
356
+}

+ 306
- 0
vendor/github.com/golang/protobuf/proto/properties.go View File

@@ -0,0 +1,306 @@
1
+// Copyright 2010 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"fmt"
9
+	"reflect"
10
+	"strconv"
11
+	"strings"
12
+	"sync"
13
+
14
+	"google.golang.org/protobuf/reflect/protoreflect"
15
+	"google.golang.org/protobuf/runtime/protoimpl"
16
+)
17
+
18
+// StructProperties represents protocol buffer type information for a
19
+// generated protobuf message in the open-struct API.
20
+//
21
+// Deprecated: Do not use.
22
+type StructProperties struct {
23
+	// Prop are the properties for each field.
24
+	//
25
+	// Fields belonging to a oneof are stored in OneofTypes instead, with a
26
+	// single Properties representing the parent oneof held here.
27
+	//
28
+	// The order of Prop matches the order of fields in the Go struct.
29
+	// Struct fields that are not related to protobufs have a "XXX_" prefix
30
+	// in the Properties.Name and must be ignored by the user.
31
+	Prop []*Properties
32
+
33
+	// OneofTypes contains information about the oneof fields in this message.
34
+	// It is keyed by the protobuf field name.
35
+	OneofTypes map[string]*OneofProperties
36
+}
37
+
38
+// Properties represents the type information for a protobuf message field.
39
+//
40
+// Deprecated: Do not use.
41
+type Properties struct {
42
+	// Name is a placeholder name with little meaningful semantic value.
43
+	// If the name has an "XXX_" prefix, the entire Properties must be ignored.
44
+	Name string
45
+	// OrigName is the protobuf field name or oneof name.
46
+	OrigName string
47
+	// JSONName is the JSON name for the protobuf field.
48
+	JSONName string
49
+	// Enum is a placeholder name for enums.
50
+	// For historical reasons, this is neither the Go name for the enum,
51
+	// nor the protobuf name for the enum.
52
+	Enum string // Deprecated: Do not use.
53
+	// Weak contains the full name of the weakly referenced message.
54
+	Weak string
55
+	// Wire is a string representation of the wire type.
56
+	Wire string
57
+	// WireType is the protobuf wire type for the field.
58
+	WireType int
59
+	// Tag is the protobuf field number.
60
+	Tag int
61
+	// Required reports whether this is a required field.
62
+	Required bool
63
+	// Optional reports whether this is a optional field.
64
+	Optional bool
65
+	// Repeated reports whether this is a repeated field.
66
+	Repeated bool
67
+	// Packed reports whether this is a packed repeated field of scalars.
68
+	Packed bool
69
+	// Proto3 reports whether this field operates under the proto3 syntax.
70
+	Proto3 bool
71
+	// Oneof reports whether this field belongs within a oneof.
72
+	Oneof bool
73
+
74
+	// Default is the default value in string form.
75
+	Default string
76
+	// HasDefault reports whether the field has a default value.
77
+	HasDefault bool
78
+
79
+	// MapKeyProp is the properties for the key field for a map field.
80
+	MapKeyProp *Properties
81
+	// MapValProp is the properties for the value field for a map field.
82
+	MapValProp *Properties
83
+}
84
+
85
+// OneofProperties represents the type information for a protobuf oneof.
86
+//
87
+// Deprecated: Do not use.
88
+type OneofProperties struct {
89
+	// Type is a pointer to the generated wrapper type for the field value.
90
+	// This is nil for messages that are not in the open-struct API.
91
+	Type reflect.Type
92
+	// Field is the index into StructProperties.Prop for the containing oneof.
93
+	Field int
94
+	// Prop is the properties for the field.
95
+	Prop *Properties
96
+}
97
+
98
+// String formats the properties in the protobuf struct field tag style.
99
+func (p *Properties) String() string {
100
+	s := p.Wire
101
+	s += "," + strconv.Itoa(p.Tag)
102
+	if p.Required {
103
+		s += ",req"
104
+	}
105
+	if p.Optional {
106
+		s += ",opt"
107
+	}
108
+	if p.Repeated {
109
+		s += ",rep"
110
+	}
111
+	if p.Packed {
112
+		s += ",packed"
113
+	}
114
+	s += ",name=" + p.OrigName
115
+	if p.JSONName != "" {
116
+		s += ",json=" + p.JSONName
117
+	}
118
+	if len(p.Enum) > 0 {
119
+		s += ",enum=" + p.Enum
120
+	}
121
+	if len(p.Weak) > 0 {
122
+		s += ",weak=" + p.Weak
123
+	}
124
+	if p.Proto3 {
125
+		s += ",proto3"
126
+	}
127
+	if p.Oneof {
128
+		s += ",oneof"
129
+	}
130
+	if p.HasDefault {
131
+		s += ",def=" + p.Default
132
+	}
133
+	return s
134
+}
135
+
136
+// Parse populates p by parsing a string in the protobuf struct field tag style.
137
+func (p *Properties) Parse(tag string) {
138
+	// For example: "bytes,49,opt,name=foo,def=hello!"
139
+	for len(tag) > 0 {
140
+		i := strings.IndexByte(tag, ',')
141
+		if i < 0 {
142
+			i = len(tag)
143
+		}
144
+		switch s := tag[:i]; {
145
+		case strings.HasPrefix(s, "name="):
146
+			p.OrigName = s[len("name="):]
147
+		case strings.HasPrefix(s, "json="):
148
+			p.JSONName = s[len("json="):]
149
+		case strings.HasPrefix(s, "enum="):
150
+			p.Enum = s[len("enum="):]
151
+		case strings.HasPrefix(s, "weak="):
152
+			p.Weak = s[len("weak="):]
153
+		case strings.Trim(s, "0123456789") == "":
154
+			n, _ := strconv.ParseUint(s, 10, 32)
155
+			p.Tag = int(n)
156
+		case s == "opt":
157
+			p.Optional = true
158
+		case s == "req":
159
+			p.Required = true
160
+		case s == "rep":
161
+			p.Repeated = true
162
+		case s == "varint" || s == "zigzag32" || s == "zigzag64":
163
+			p.Wire = s
164
+			p.WireType = WireVarint
165
+		case s == "fixed32":
166
+			p.Wire = s
167
+			p.WireType = WireFixed32
168
+		case s == "fixed64":
169
+			p.Wire = s
170
+			p.WireType = WireFixed64
171
+		case s == "bytes":
172
+			p.Wire = s
173
+			p.WireType = WireBytes
174
+		case s == "group":
175
+			p.Wire = s
176
+			p.WireType = WireStartGroup
177
+		case s == "packed":
178
+			p.Packed = true
179
+		case s == "proto3":
180
+			p.Proto3 = true
181
+		case s == "oneof":
182
+			p.Oneof = true
183
+		case strings.HasPrefix(s, "def="):
184
+			// The default tag is special in that everything afterwards is the
185
+			// default regardless of the presence of commas.
186
+			p.HasDefault = true
187
+			p.Default, i = tag[len("def="):], len(tag)
188
+		}
189
+		tag = strings.TrimPrefix(tag[i:], ",")
190
+	}
191
+}
192
+
193
+// Init populates the properties from a protocol buffer struct tag.
194
+//
195
+// Deprecated: Do not use.
196
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
197
+	p.Name = name
198
+	p.OrigName = name
199
+	if tag == "" {
200
+		return
201
+	}
202
+	p.Parse(tag)
203
+
204
+	if typ != nil && typ.Kind() == reflect.Map {
205
+		p.MapKeyProp = new(Properties)
206
+		p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
207
+		p.MapValProp = new(Properties)
208
+		p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
209
+	}
210
+}
211
+
212
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
213
+
214
+// GetProperties returns the list of properties for the type represented by t,
215
+// which must be a generated protocol buffer message in the open-struct API,
216
+// where protobuf message fields are represented by exported Go struct fields.
217
+//
218
+// Deprecated: Use protobuf reflection instead.
219
+func GetProperties(t reflect.Type) *StructProperties {
220
+	if p, ok := propertiesCache.Load(t); ok {
221
+		return p.(*StructProperties)
222
+	}
223
+	p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
224
+	return p.(*StructProperties)
225
+}
226
+
227
+func newProperties(t reflect.Type) *StructProperties {
228
+	if t.Kind() != reflect.Struct {
229
+		panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
230
+	}
231
+
232
+	var hasOneof bool
233
+	prop := new(StructProperties)
234
+
235
+	// Construct a list of properties for each field in the struct.
236
+	for i := 0; i < t.NumField(); i++ {
237
+		p := new(Properties)
238
+		f := t.Field(i)
239
+		tagField := f.Tag.Get("protobuf")
240
+		p.Init(f.Type, f.Name, tagField, &f)
241
+
242
+		tagOneof := f.Tag.Get("protobuf_oneof")
243
+		if tagOneof != "" {
244
+			hasOneof = true
245
+			p.OrigName = tagOneof
246
+		}
247
+
248
+		// Rename unrelated struct fields with the "XXX_" prefix since so much
249
+		// user code simply checks for this to exclude special fields.
250
+		if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
251
+			p.Name = "XXX_" + p.Name
252
+			p.OrigName = "XXX_" + p.OrigName
253
+		} else if p.Weak != "" {
254
+			p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
255
+		}
256
+
257
+		prop.Prop = append(prop.Prop, p)
258
+	}
259
+
260
+	// Construct a mapping of oneof field names to properties.
261
+	if hasOneof {
262
+		var oneofWrappers []interface{}
263
+		if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
264
+			oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
265
+		}
266
+		if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
267
+			oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
268
+		}
269
+		if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
270
+			if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
271
+				oneofWrappers = m.ProtoMessageInfo().OneofWrappers
272
+			}
273
+		}
274
+
275
+		prop.OneofTypes = make(map[string]*OneofProperties)
276
+		for _, wrapper := range oneofWrappers {
277
+			p := &OneofProperties{
278
+				Type: reflect.ValueOf(wrapper).Type(), // *T
279
+				Prop: new(Properties),
280
+			}
281
+			f := p.Type.Elem().Field(0)
282
+			p.Prop.Name = f.Name
283
+			p.Prop.Parse(f.Tag.Get("protobuf"))
284
+
285
+			// Determine the struct field that contains this oneof.
286
+			// Each wrapper is assignable to exactly one parent field.
287
+			var foundOneof bool
288
+			for i := 0; i < t.NumField() && !foundOneof; i++ {
289
+				if p.Type.AssignableTo(t.Field(i).Type) {
290
+					p.Field = i
291
+					foundOneof = true
292
+				}
293
+			}
294
+			if !foundOneof {
295
+				panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
296
+			}
297
+			prop.OneofTypes[p.Prop.OrigName] = p
298
+		}
299
+	}
300
+
301
+	return prop
302
+}
303
+
304
+func (sp *StructProperties) Len() int           { return len(sp.Prop) }
305
+func (sp *StructProperties) Less(i, j int) bool { return false }
306
+func (sp *StructProperties) Swap(i, j int)      { return }

+ 167
- 0
vendor/github.com/golang/protobuf/proto/proto.go View File

@@ -0,0 +1,167 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+// Package proto provides functionality for handling protocol buffer messages.
6
+// In particular, it provides marshaling and unmarshaling between a protobuf
7
+// message and the binary wire format.
8
+//
9
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
10
+// more information.
11
+//
12
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
13
+package proto
14
+
15
+import (
16
+	protoV2 "google.golang.org/protobuf/proto"
17
+	"google.golang.org/protobuf/reflect/protoreflect"
18
+	"google.golang.org/protobuf/runtime/protoiface"
19
+	"google.golang.org/protobuf/runtime/protoimpl"
20
+)
21
+
22
+const (
23
+	ProtoPackageIsVersion1 = true
24
+	ProtoPackageIsVersion2 = true
25
+	ProtoPackageIsVersion3 = true
26
+	ProtoPackageIsVersion4 = true
27
+)
28
+
29
+// GeneratedEnum is any enum type generated by protoc-gen-go
30
+// which is a named int32 kind.
31
+// This type exists for documentation purposes.
32
+type GeneratedEnum interface{}
33
+
34
+// GeneratedMessage is any message type generated by protoc-gen-go
35
+// which is a pointer to a named struct kind.
36
+// This type exists for documentation purposes.
37
+type GeneratedMessage interface{}
38
+
39
+// Message is a protocol buffer message.
40
+//
41
+// This is the v1 version of the message interface and is marginally better
42
+// than an empty interface as it lacks any method to programatically interact
43
+// with the contents of the message.
44
+//
45
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
46
+// exposes protobuf reflection as a first-class feature of the interface.
47
+//
48
+// To convert a v1 message to a v2 message, use the MessageV2 function.
49
+// To convert a v2 message to a v1 message, use the MessageV1 function.
50
+type Message = protoiface.MessageV1
51
+
52
+// MessageV1 converts either a v1 or v2 message to a v1 message.
53
+// It returns nil if m is nil.
54
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
55
+	return protoimpl.X.ProtoMessageV1Of(m)
56
+}
57
+
58
+// MessageV2 converts either a v1 or v2 message to a v2 message.
59
+// It returns nil if m is nil.
60
+func MessageV2(m GeneratedMessage) protoV2.Message {
61
+	return protoimpl.X.ProtoMessageV2Of(m)
62
+}
63
+
64
+// MessageReflect returns a reflective view for a message.
65
+// It returns nil if m is nil.
66
+func MessageReflect(m Message) protoreflect.Message {
67
+	return protoimpl.X.MessageOf(m)
68
+}
69
+
70
+// Marshaler is implemented by messages that can marshal themselves.
71
+// This interface is used by the following functions: Size, Marshal,
72
+// Buffer.Marshal, and Buffer.EncodeMessage.
73
+//
74
+// Deprecated: Do not implement.
75
+type Marshaler interface {
76
+	// Marshal formats the encoded bytes of the message.
77
+	// It should be deterministic and emit valid protobuf wire data.
78
+	// The caller takes ownership of the returned buffer.
79
+	Marshal() ([]byte, error)
80
+}
81
+
82
+// Unmarshaler is implemented by messages that can unmarshal themselves.
83
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
84
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
85
+//
86
+// Deprecated: Do not implement.
87
+type Unmarshaler interface {
88
+	// Unmarshal parses the encoded bytes of the protobuf wire input.
89
+	// The provided buffer is only valid for during method call.
90
+	// It should not reset the receiver message.
91
+	Unmarshal([]byte) error
92
+}
93
+
94
+// Merger is implemented by messages that can merge themselves.
95
+// This interface is used by the following functions: Clone and Merge.
96
+//
97
+// Deprecated: Do not implement.
98
+type Merger interface {
99
+	// Merge merges the contents of src into the receiver message.
100
+	// It clones all data structures in src such that it aliases no mutable
101
+	// memory referenced by src.
102
+	Merge(src Message)
103
+}
104
+
105
+// RequiredNotSetError is an error type returned when
106
+// marshaling or unmarshaling a message with missing required fields.
107
+type RequiredNotSetError struct {
108
+	err error
109
+}
110
+
111
+func (e *RequiredNotSetError) Error() string {
112
+	if e.err != nil {
113
+		return e.err.Error()
114
+	}
115
+	return "proto: required field not set"
116
+}
117
+func (e *RequiredNotSetError) RequiredNotSet() bool {
118
+	return true
119
+}
120
+
121
+func checkRequiredNotSet(m protoV2.Message) error {
122
+	if err := protoV2.CheckInitialized(m); err != nil {
123
+		return &RequiredNotSetError{err: err}
124
+	}
125
+	return nil
126
+}
127
+
128
+// Clone returns a deep copy of src.
129
+func Clone(src Message) Message {
130
+	return MessageV1(protoV2.Clone(MessageV2(src)))
131
+}
132
+
133
+// Merge merges src into dst, which must be messages of the same type.
134
+//
135
+// Populated scalar fields in src are copied to dst, while populated
136
+// singular messages in src are merged into dst by recursively calling Merge.
137
+// The elements of every list field in src is appended to the corresponded
138
+// list fields in dst. The entries of every map field in src is copied into
139
+// the corresponding map field in dst, possibly replacing existing entries.
140
+// The unknown fields of src are appended to the unknown fields of dst.
141
+func Merge(dst, src Message) {
142
+	protoV2.Merge(MessageV2(dst), MessageV2(src))
143
+}
144
+
145
+// Equal reports whether two messages are equal.
146
+// If two messages marshal to the same bytes under deterministic serialization,
147
+// then Equal is guaranteed to report true.
148
+//
149
+// Two messages are equal if they are the same protobuf message type,
150
+// have the same set of populated known and extension field values,
151
+// and the same set of unknown fields values.
152
+//
153
+// Scalar values are compared with the equivalent of the == operator in Go,
154
+// except bytes values which are compared using bytes.Equal and
155
+// floating point values which specially treat NaNs as equal.
156
+// Message values are compared by recursively calling Equal.
157
+// Lists are equal if each element value is also equal.
158
+// Maps are equal if they have the same set of keys, where the pair of values
159
+// for each key is also equal.
160
+func Equal(x, y Message) bool {
161
+	return protoV2.Equal(MessageV2(x), MessageV2(y))
162
+}
163
+
164
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
165
+	ms, ok := md.(interface{ IsMessageSet() bool })
166
+	return ok && ms.IsMessageSet()
167
+}

+ 317
- 0
vendor/github.com/golang/protobuf/proto/registry.go View File

@@ -0,0 +1,317 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"bytes"
9
+	"compress/gzip"
10
+	"fmt"
11
+	"io/ioutil"
12
+	"reflect"
13
+	"strings"
14
+	"sync"
15
+
16
+	"google.golang.org/protobuf/reflect/protodesc"
17
+	"google.golang.org/protobuf/reflect/protoreflect"
18
+	"google.golang.org/protobuf/reflect/protoregistry"
19
+	"google.golang.org/protobuf/runtime/protoimpl"
20
+)
21
+
22
+// filePath is the path to the proto source file.
23
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
24
+
25
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
26
+type fileDescGZIP = []byte
27
+
28
+var fileCache sync.Map // map[filePath]fileDescGZIP
29
+
30
+// RegisterFile is called from generated code to register the compressed
31
+// FileDescriptorProto with the file path for a proto source file.
32
+//
33
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
34
+func RegisterFile(s filePath, d fileDescGZIP) {
35
+	// Decompress the descriptor.
36
+	zr, err := gzip.NewReader(bytes.NewReader(d))
37
+	if err != nil {
38
+		panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
39
+	}
40
+	b, err := ioutil.ReadAll(zr)
41
+	if err != nil {
42
+		panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
43
+	}
44
+
45
+	// Construct a protoreflect.FileDescriptor from the raw descriptor.
46
+	// Note that DescBuilder.Build automatically registers the constructed
47
+	// file descriptor with the v2 registry.
48
+	protoimpl.DescBuilder{RawDescriptor: b}.Build()
49
+
50
+	// Locally cache the raw descriptor form for the file.
51
+	fileCache.Store(s, d)
52
+}
53
+
54
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
55
+// for a proto source file. It returns nil if not found.
56
+//
57
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
58
+func FileDescriptor(s filePath) fileDescGZIP {
59
+	if v, ok := fileCache.Load(s); ok {
60
+		return v.(fileDescGZIP)
61
+	}
62
+
63
+	// Find the descriptor in the v2 registry.
64
+	var b []byte
65
+	if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
66
+		b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
67
+	}
68
+
69
+	// Locally cache the raw descriptor form for the file.
70
+	if len(b) > 0 {
71
+		v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
72
+		return v.(fileDescGZIP)
73
+	}
74
+	return nil
75
+}
76
+
77
+// enumName is the name of an enum. For historical reasons, the enum name is
78
+// neither the full Go name nor the full protobuf name of the enum.
79
+// The name is the dot-separated combination of just the proto package that the
80
+// enum is declared within followed by the Go type name of the generated enum.
81
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
82
+
83
+// enumsByName maps enum values by name to their numeric counterpart.
84
+type enumsByName = map[string]int32
85
+
86
+// enumsByNumber maps enum values by number to their name counterpart.
87
+type enumsByNumber = map[int32]string
88
+
89
+var enumCache sync.Map     // map[enumName]enumsByName
90
+var numFilesCache sync.Map // map[protoreflect.FullName]int
91
+
92
+// RegisterEnum is called from the generated code to register the mapping of
93
+// enum value names to enum numbers for the enum identified by s.
94
+//
95
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
96
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
97
+	if _, ok := enumCache.Load(s); ok {
98
+		panic("proto: duplicate enum registered: " + s)
99
+	}
100
+	enumCache.Store(s, m)
101
+
102
+	// This does not forward registration to the v2 registry since this API
103
+	// lacks sufficient information to construct a complete v2 enum descriptor.
104
+}
105
+
106
+// EnumValueMap returns the mapping from enum value names to enum numbers for
107
+// the enum of the given name. It returns nil if not found.
108
+//
109
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
110
+func EnumValueMap(s enumName) enumsByName {
111
+	if v, ok := enumCache.Load(s); ok {
112
+		return v.(enumsByName)
113
+	}
114
+
115
+	// Check whether the cache is stale. If the number of files in the current
116
+	// package differs, then it means that some enums may have been recently
117
+	// registered upstream that we do not know about.
118
+	var protoPkg protoreflect.FullName
119
+	if i := strings.LastIndexByte(s, '.'); i >= 0 {
120
+		protoPkg = protoreflect.FullName(s[:i])
121
+	}
122
+	v, _ := numFilesCache.Load(protoPkg)
123
+	numFiles, _ := v.(int)
124
+	if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
125
+		return nil // cache is up-to-date; was not found earlier
126
+	}
127
+
128
+	// Update the enum cache for all enums declared in the given proto package.
129
+	numFiles = 0
130
+	protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
131
+		walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
132
+			name := protoimpl.X.LegacyEnumName(ed)
133
+			if _, ok := enumCache.Load(name); !ok {
134
+				m := make(enumsByName)
135
+				evs := ed.Values()
136
+				for i := evs.Len() - 1; i >= 0; i-- {
137
+					ev := evs.Get(i)
138
+					m[string(ev.Name())] = int32(ev.Number())
139
+				}
140
+				enumCache.LoadOrStore(name, m)
141
+			}
142
+		})
143
+		numFiles++
144
+		return true
145
+	})
146
+	numFilesCache.Store(protoPkg, numFiles)
147
+
148
+	// Check cache again for enum map.
149
+	if v, ok := enumCache.Load(s); ok {
150
+		return v.(enumsByName)
151
+	}
152
+	return nil
153
+}
154
+
155
+// walkEnums recursively walks all enums declared in d.
156
+func walkEnums(d interface {
157
+	Enums() protoreflect.EnumDescriptors
158
+	Messages() protoreflect.MessageDescriptors
159
+}, f func(protoreflect.EnumDescriptor)) {
160
+	eds := d.Enums()
161
+	for i := eds.Len() - 1; i >= 0; i-- {
162
+		f(eds.Get(i))
163
+	}
164
+	mds := d.Messages()
165
+	for i := mds.Len() - 1; i >= 0; i-- {
166
+		walkEnums(mds.Get(i), f)
167
+	}
168
+}
169
+
170
+// messageName is the full name of protobuf message.
171
+type messageName = string
172
+
173
+var messageTypeCache sync.Map // map[messageName]reflect.Type
174
+
175
+// RegisterType is called from generated code to register the message Go type
176
+// for a message of the given name.
177
+//
178
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
179
+func RegisterType(m Message, s messageName) {
180
+	mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
181
+	if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
182
+		panic(err)
183
+	}
184
+	messageTypeCache.Store(s, reflect.TypeOf(m))
185
+}
186
+
187
+// RegisterMapType is called from generated code to register the Go map type
188
+// for a protobuf message representing a map entry.
189
+//
190
+// Deprecated: Do not use.
191
+func RegisterMapType(m interface{}, s messageName) {
192
+	t := reflect.TypeOf(m)
193
+	if t.Kind() != reflect.Map {
194
+		panic(fmt.Sprintf("invalid map kind: %v", t))
195
+	}
196
+	if _, ok := messageTypeCache.Load(s); ok {
197
+		panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
198
+	}
199
+	messageTypeCache.Store(s, t)
200
+}
201
+
202
+// MessageType returns the message type for a named message.
203
+// It returns nil if not found.
204
+//
205
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
206
+func MessageType(s messageName) reflect.Type {
207
+	if v, ok := messageTypeCache.Load(s); ok {
208
+		return v.(reflect.Type)
209
+	}
210
+
211
+	// Derive the message type from the v2 registry.
212
+	var t reflect.Type
213
+	if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
214
+		t = messageGoType(mt)
215
+	}
216
+
217
+	// If we could not get a concrete type, it is possible that it is a
218
+	// pseudo-message for a map entry.
219
+	if t == nil {
220
+		d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
221
+		if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
222
+			kt := goTypeForField(md.Fields().ByNumber(1))
223
+			vt := goTypeForField(md.Fields().ByNumber(2))
224
+			t = reflect.MapOf(kt, vt)
225
+		}
226
+	}
227
+
228
+	// Locally cache the message type for the given name.
229
+	if t != nil {
230
+		v, _ := messageTypeCache.LoadOrStore(s, t)
231
+		return v.(reflect.Type)
232
+	}
233
+	return nil
234
+}
235
+
236
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
237
+	switch k := fd.Kind(); k {
238
+	case protoreflect.EnumKind:
239
+		if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
240
+			return enumGoType(et)
241
+		}
242
+		return reflect.TypeOf(protoreflect.EnumNumber(0))
243
+	case protoreflect.MessageKind, protoreflect.GroupKind:
244
+		if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
245
+			return messageGoType(mt)
246
+		}
247
+		return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
248
+	default:
249
+		return reflect.TypeOf(fd.Default().Interface())
250
+	}
251
+}
252
+
253
+func enumGoType(et protoreflect.EnumType) reflect.Type {
254
+	return reflect.TypeOf(et.New(0))
255
+}
256
+
257
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
258
+	return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
259
+}
260
+
261
+// MessageName returns the full protobuf name for the given message type.
262
+//
263
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
264
+func MessageName(m Message) messageName {
265
+	if m == nil {
266
+		return ""
267
+	}
268
+	if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
269
+		return m.XXX_MessageName()
270
+	}
271
+	return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
272
+}
273
+
274
+// RegisterExtension is called from the generated code to register
275
+// the extension descriptor.
276
+//
277
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
278
+func RegisterExtension(d *ExtensionDesc) {
279
+	if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
280
+		panic(err)
281
+	}
282
+}
283
+
284
+type extensionsByNumber = map[int32]*ExtensionDesc
285
+
286
+var extensionCache sync.Map // map[messageName]extensionsByNumber
287
+
288
+// RegisteredExtensions returns a map of the registered extensions for the
289
+// provided protobuf message, indexed by the extension field number.
290
+//
291
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
292
+func RegisteredExtensions(m Message) extensionsByNumber {
293
+	// Check whether the cache is stale. If the number of extensions for
294
+	// the given message differs, then it means that some extensions were
295
+	// recently registered upstream that we do not know about.
296
+	s := MessageName(m)
297
+	v, _ := extensionCache.Load(s)
298
+	xs, _ := v.(extensionsByNumber)
299
+	if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
300
+		return xs // cache is up-to-date
301
+	}
302
+
303
+	// Cache is stale, re-compute the extensions map.
304
+	xs = make(extensionsByNumber)
305
+	protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
306
+		if xd, ok := xt.(*ExtensionDesc); ok {
307
+			xs[int32(xt.TypeDescriptor().Number())] = xd
308
+		} else {
309
+			// TODO: This implies that the protoreflect.ExtensionType is a
310
+			// custom type not generated by protoc-gen-go. We could try and
311
+			// convert the type to an ExtensionDesc.
312
+		}
313
+		return true
314
+	})
315
+	extensionCache.Store(s, xs)
316
+	return xs
317
+}

+ 801
- 0
vendor/github.com/golang/protobuf/proto/text_decode.go View File

@@ -0,0 +1,801 @@
1
+// Copyright 2010 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"encoding"
9
+	"errors"
10
+	"fmt"
11
+	"reflect"
12
+	"strconv"
13
+	"strings"
14
+	"unicode/utf8"
15
+
16
+	"google.golang.org/protobuf/encoding/prototext"
17
+	protoV2 "google.golang.org/protobuf/proto"
18
+	"google.golang.org/protobuf/reflect/protoreflect"
19
+	"google.golang.org/protobuf/reflect/protoregistry"
20
+)
21
+
22
+const wrapTextUnmarshalV2 = false
23
+
24
+// ParseError is returned by UnmarshalText.
25
+type ParseError struct {
26
+	Message string
27
+
28
+	// Deprecated: Do not use.
29
+	Line, Offset int
30
+}
31
+
32
+func (e *ParseError) Error() string {
33
+	if wrapTextUnmarshalV2 {
34
+		return e.Message
35
+	}
36
+	if e.Line == 1 {
37
+		return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
38
+	}
39
+	return fmt.Sprintf("line %d: %v", e.Line, e.Message)
40
+}
41
+
42
+// UnmarshalText parses a proto text formatted string into m.
43
+func UnmarshalText(s string, m Message) error {
44
+	if u, ok := m.(encoding.TextUnmarshaler); ok {
45
+		return u.UnmarshalText([]byte(s))
46
+	}
47
+
48
+	m.Reset()
49
+	mi := MessageV2(m)
50
+
51
+	if wrapTextUnmarshalV2 {
52
+		err := prototext.UnmarshalOptions{
53
+			AllowPartial: true,
54
+		}.Unmarshal([]byte(s), mi)
55
+		if err != nil {
56
+			return &ParseError{Message: err.Error()}
57
+		}
58
+		return checkRequiredNotSet(mi)
59
+	} else {
60
+		if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
61
+			return err
62
+		}
63
+		return checkRequiredNotSet(mi)
64
+	}
65
+}
66
+
67
+type textParser struct {
68
+	s            string // remaining input
69
+	done         bool   // whether the parsing is finished (success or error)
70
+	backed       bool   // whether back() was called
71
+	offset, line int
72
+	cur          token
73
+}
74
+
75
+type token struct {
76
+	value    string
77
+	err      *ParseError
78
+	line     int    // line number
79
+	offset   int    // byte number from start of input, not start of line
80
+	unquoted string // the unquoted version of value, if it was a quoted string
81
+}
82
+
83
+func newTextParser(s string) *textParser {
84
+	p := new(textParser)
85
+	p.s = s
86
+	p.line = 1
87
+	p.cur.line = 1
88
+	return p
89
+}
90
+
91
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
92
+	md := m.Descriptor()
93
+	fds := md.Fields()
94
+
95
+	// A struct is a sequence of "name: value", terminated by one of
96
+	// '>' or '}', or the end of the input.  A name may also be
97
+	// "[extension]" or "[type/url]".
98
+	//
99
+	// The whole struct can also be an expanded Any message, like:
100
+	// [type/url] < ... struct contents ... >
101
+	seen := make(map[protoreflect.FieldNumber]bool)
102
+	for {
103
+		tok := p.next()
104
+		if tok.err != nil {
105
+			return tok.err
106
+		}
107
+		if tok.value == terminator {
108
+			break
109
+		}
110
+		if tok.value == "[" {
111
+			if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
112
+				return err
113
+			}
114
+			continue
115
+		}
116
+
117
+		// This is a normal, non-extension field.
118
+		name := protoreflect.Name(tok.value)
119
+		fd := fds.ByName(name)
120
+		switch {
121
+		case fd == nil:
122
+			gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
123
+			if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
124
+				fd = gd
125
+			}
126
+		case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
127
+			fd = nil
128
+		case fd.IsWeak() && fd.Message().IsPlaceholder():
129
+			fd = nil
130
+		}
131
+		if fd == nil {
132
+			typeName := string(md.FullName())
133
+			if m, ok := m.Interface().(Message); ok {
134
+				t := reflect.TypeOf(m)
135
+				if t.Kind() == reflect.Ptr {
136
+					typeName = t.Elem().String()
137
+				}
138
+			}
139
+			return p.errorf("unknown field name %q in %v", name, typeName)
140
+		}
141
+		if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
142
+			return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
143
+		}
144
+		if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
145
+			return p.errorf("non-repeated field %q was repeated", fd.Name())
146
+		}
147
+		seen[fd.Number()] = true
148
+
149
+		// Consume any colon.
150
+		if err := p.checkForColon(fd); err != nil {
151
+			return err
152
+		}
153
+
154
+		// Parse into the field.
155
+		v := m.Get(fd)
156
+		if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
157
+			v = m.Mutable(fd)
158
+		}
159
+		if v, err = p.unmarshalValue(v, fd); err != nil {
160
+			return err
161
+		}
162
+		m.Set(fd, v)
163
+
164
+		if err := p.consumeOptionalSeparator(); err != nil {
165
+			return err
166
+		}
167
+	}
168
+	return nil
169
+}
170
+
171
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
172
+	name, err := p.consumeExtensionOrAnyName()
173
+	if err != nil {
174
+		return err
175
+	}
176
+
177
+	// If it contains a slash, it's an Any type URL.
178
+	if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
179
+		tok := p.next()
180
+		if tok.err != nil {
181
+			return tok.err
182
+		}
183
+		// consume an optional colon
184
+		if tok.value == ":" {
185
+			tok = p.next()
186
+			if tok.err != nil {
187
+				return tok.err
188
+			}
189
+		}
190
+
191
+		var terminator string
192
+		switch tok.value {
193
+		case "<":
194
+			terminator = ">"
195
+		case "{":
196
+			terminator = "}"
197
+		default:
198
+			return p.errorf("expected '{' or '<', found %q", tok.value)
199
+		}
200
+
201
+		mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
202
+		if err != nil {
203
+			return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
204
+		}
205
+		m2 := mt.New()
206
+		if err := p.unmarshalMessage(m2, terminator); err != nil {
207
+			return err
208
+		}
209
+		b, err := protoV2.Marshal(m2.Interface())
210
+		if err != nil {
211
+			return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
212
+		}
213
+
214
+		urlFD := m.Descriptor().Fields().ByName("type_url")
215
+		valFD := m.Descriptor().Fields().ByName("value")
216
+		if seen[urlFD.Number()] {
217
+			return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
218
+		}
219
+		if seen[valFD.Number()] {
220
+			return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
221
+		}
222
+		m.Set(urlFD, protoreflect.ValueOfString(name))
223
+		m.Set(valFD, protoreflect.ValueOfBytes(b))
224
+		seen[urlFD.Number()] = true
225
+		seen[valFD.Number()] = true
226
+		return nil
227
+	}
228
+
229
+	xname := protoreflect.FullName(name)
230
+	xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
231
+	if xt == nil && isMessageSet(m.Descriptor()) {
232
+		xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
233
+	}
234
+	if xt == nil {
235
+		return p.errorf("unrecognized extension %q", name)
236
+	}
237
+	fd := xt.TypeDescriptor()
238
+	if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
239
+		return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
240
+	}
241
+
242
+	if err := p.checkForColon(fd); err != nil {
243
+		return err
244
+	}
245
+
246
+	v := m.Get(fd)
247
+	if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
248
+		v = m.Mutable(fd)
249
+	}
250
+	v, err = p.unmarshalValue(v, fd)
251
+	if err != nil {
252
+		return err
253
+	}
254
+	m.Set(fd, v)
255
+	return p.consumeOptionalSeparator()
256
+}
257
+
258
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
259
+	tok := p.next()
260
+	if tok.err != nil {
261
+		return v, tok.err
262
+	}
263
+	if tok.value == "" {
264
+		return v, p.errorf("unexpected EOF")
265
+	}
266
+
267
+	switch {
268
+	case fd.IsList():
269
+		lv := v.List()
270
+		var err error
271
+		if tok.value == "[" {
272
+			// Repeated field with list notation, like [1,2,3].
273
+			for {
274
+				vv := lv.NewElement()
275
+				vv, err = p.unmarshalSingularValue(vv, fd)
276
+				if err != nil {
277
+					return v, err
278
+				}
279
+				lv.Append(vv)
280
+
281
+				tok := p.next()
282
+				if tok.err != nil {
283
+					return v, tok.err
284
+				}
285
+				if tok.value == "]" {
286
+					break
287
+				}
288
+				if tok.value != "," {
289
+					return v, p.errorf("Expected ']' or ',' found %q", tok.value)
290
+				}
291
+			}
292
+			return v, nil
293
+		}
294
+
295
+		// One value of the repeated field.
296
+		p.back()
297
+		vv := lv.NewElement()
298
+		vv, err = p.unmarshalSingularValue(vv, fd)
299
+		if err != nil {
300
+			return v, err
301
+		}
302
+		lv.Append(vv)
303
+		return v, nil
304
+	case fd.IsMap():
305
+		// The map entry should be this sequence of tokens:
306
+		//	< key : KEY value : VALUE >
307
+		// However, implementations may omit key or value, and technically
308
+		// we should support them in any order.
309
+		var terminator string
310
+		switch tok.value {
311
+		case "<":
312
+			terminator = ">"
313
+		case "{":
314
+			terminator = "}"
315
+		default:
316
+			return v, p.errorf("expected '{' or '<', found %q", tok.value)
317
+		}
318
+
319
+		keyFD := fd.MapKey()
320
+		valFD := fd.MapValue()
321
+
322
+		mv := v.Map()
323
+		kv := keyFD.Default()
324
+		vv := mv.NewValue()
325
+		for {
326
+			tok := p.next()
327
+			if tok.err != nil {
328
+				return v, tok.err
329
+			}
330
+			if tok.value == terminator {
331
+				break
332
+			}
333
+			var err error
334
+			switch tok.value {
335
+			case "key":
336
+				if err := p.consumeToken(":"); err != nil {
337
+					return v, err
338
+				}
339
+				if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
340
+					return v, err
341
+				}
342
+				if err := p.consumeOptionalSeparator(); err != nil {
343
+					return v, err
344
+				}
345
+			case "value":
346
+				if err := p.checkForColon(valFD); err != nil {
347
+					return v, err
348
+				}
349
+				if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
350
+					return v, err
351
+				}
352
+				if err := p.consumeOptionalSeparator(); err != nil {
353
+					return v, err
354
+				}
355
+			default:
356
+				p.back()
357
+				return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
358
+			}
359
+		}
360
+		mv.Set(kv.MapKey(), vv)
361
+		return v, nil
362
+	default:
363
+		p.back()
364
+		return p.unmarshalSingularValue(v, fd)
365
+	}
366
+}
367
+
368
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
369
+	tok := p.next()
370
+	if tok.err != nil {
371
+		return v, tok.err
372
+	}
373
+	if tok.value == "" {
374
+		return v, p.errorf("unexpected EOF")
375
+	}
376
+
377
+	switch fd.Kind() {
378
+	case protoreflect.BoolKind:
379
+		switch tok.value {
380
+		case "true", "1", "t", "True":
381
+			return protoreflect.ValueOfBool(true), nil
382
+		case "false", "0", "f", "False":
383
+			return protoreflect.ValueOfBool(false), nil
384
+		}
385
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
386
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
387
+			return protoreflect.ValueOfInt32(int32(x)), nil
388
+		}
389
+
390
+		// The C++ parser accepts large positive hex numbers that uses
391
+		// two's complement arithmetic to represent negative numbers.
392
+		// This feature is here for backwards compatibility with C++.
393
+		if strings.HasPrefix(tok.value, "0x") {
394
+			if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
395
+				return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
396
+			}
397
+		}
398
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
399
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
400
+			return protoreflect.ValueOfInt64(int64(x)), nil
401
+		}
402
+
403
+		// The C++ parser accepts large positive hex numbers that uses
404
+		// two's complement arithmetic to represent negative numbers.
405
+		// This feature is here for backwards compatibility with C++.
406
+		if strings.HasPrefix(tok.value, "0x") {
407
+			if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
408
+				return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
409
+			}
410
+		}
411
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
412
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
413
+			return protoreflect.ValueOfUint32(uint32(x)), nil
414
+		}
415
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
416
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
417
+			return protoreflect.ValueOfUint64(uint64(x)), nil
418
+		}
419
+	case protoreflect.FloatKind:
420
+		// Ignore 'f' for compatibility with output generated by C++,
421
+		// but don't remove 'f' when the value is "-inf" or "inf".
422
+		v := tok.value
423
+		if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
424
+			v = v[:len(v)-len("f")]
425
+		}
426
+		if x, err := strconv.ParseFloat(v, 32); err == nil {
427
+			return protoreflect.ValueOfFloat32(float32(x)), nil
428
+		}
429
+	case protoreflect.DoubleKind:
430
+		// Ignore 'f' for compatibility with output generated by C++,
431
+		// but don't remove 'f' when the value is "-inf" or "inf".
432
+		v := tok.value
433
+		if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
434
+			v = v[:len(v)-len("f")]
435
+		}
436
+		if x, err := strconv.ParseFloat(v, 64); err == nil {
437
+			return protoreflect.ValueOfFloat64(float64(x)), nil
438
+		}
439
+	case protoreflect.StringKind:
440
+		if isQuote(tok.value[0]) {
441
+			return protoreflect.ValueOfString(tok.unquoted), nil
442
+		}
443
+	case protoreflect.BytesKind:
444
+		if isQuote(tok.value[0]) {
445
+			return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
446
+		}
447
+	case protoreflect.EnumKind:
448
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
449
+			return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
450
+		}
451
+		vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
452
+		if vd != nil {
453
+			return protoreflect.ValueOfEnum(vd.Number()), nil
454
+		}
455
+	case protoreflect.MessageKind, protoreflect.GroupKind:
456
+		var terminator string
457
+		switch tok.value {
458
+		case "{":
459
+			terminator = "}"
460
+		case "<":
461
+			terminator = ">"
462
+		default:
463
+			return v, p.errorf("expected '{' or '<', found %q", tok.value)
464
+		}
465
+		err := p.unmarshalMessage(v.Message(), terminator)
466
+		return v, err
467
+	default:
468
+		panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
469
+	}
470
+	return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
471
+}
472
+
473
+// Consume a ':' from the input stream (if the next token is a colon),
474
+// returning an error if a colon is needed but not present.
475
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
476
+	tok := p.next()
477
+	if tok.err != nil {
478
+		return tok.err
479
+	}
480
+	if tok.value != ":" {
481
+		if fd.Message() == nil {
482
+			return p.errorf("expected ':', found %q", tok.value)
483
+		}
484
+		p.back()
485
+	}
486
+	return nil
487
+}
488
+
489
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
490
+// the following ']'. It returns the name or URL consumed.
491
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
492
+	tok := p.next()
493
+	if tok.err != nil {
494
+		return "", tok.err
495
+	}
496
+
497
+	// If extension name or type url is quoted, it's a single token.
498
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
499
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
500
+		if err != nil {
501
+			return "", err
502
+		}
503
+		return name, p.consumeToken("]")
504
+	}
505
+
506
+	// Consume everything up to "]"
507
+	var parts []string
508
+	for tok.value != "]" {
509
+		parts = append(parts, tok.value)
510
+		tok = p.next()
511
+		if tok.err != nil {
512
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
513
+		}
514
+		if p.done && tok.value != "]" {
515
+			return "", p.errorf("unclosed type_url or extension name")
516
+		}
517
+	}
518
+	return strings.Join(parts, ""), nil
519
+}
520
+
521
+// consumeOptionalSeparator consumes an optional semicolon or comma.
522
+// It is used in unmarshalMessage to provide backward compatibility.
523
+func (p *textParser) consumeOptionalSeparator() error {
524
+	tok := p.next()
525
+	if tok.err != nil {
526
+		return tok.err
527
+	}
528
+	if tok.value != ";" && tok.value != "," {
529
+		p.back()
530
+	}
531
+	return nil
532
+}
533
+
534
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
535
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
536
+	p.cur.err = pe
537
+	p.done = true
538
+	return pe
539
+}
540
+
541
+func (p *textParser) skipWhitespace() {
542
+	i := 0
543
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
544
+		if p.s[i] == '#' {
545
+			// comment; skip to end of line or input
546
+			for i < len(p.s) && p.s[i] != '\n' {
547
+				i++
548
+			}
549
+			if i == len(p.s) {
550
+				break
551
+			}
552
+		}
553
+		if p.s[i] == '\n' {
554
+			p.line++
555
+		}
556
+		i++
557
+	}
558
+	p.offset += i
559
+	p.s = p.s[i:len(p.s)]
560
+	if len(p.s) == 0 {
561
+		p.done = true
562
+	}
563
+}
564
+
565
+func (p *textParser) advance() {
566
+	// Skip whitespace
567
+	p.skipWhitespace()
568
+	if p.done {
569
+		return
570
+	}
571
+
572
+	// Start of non-whitespace
573
+	p.cur.err = nil
574
+	p.cur.offset, p.cur.line = p.offset, p.line
575
+	p.cur.unquoted = ""
576
+	switch p.s[0] {
577
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
578
+		// Single symbol
579
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
580
+	case '"', '\'':
581
+		// Quoted string
582
+		i := 1
583
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
584
+			if p.s[i] == '\\' && i+1 < len(p.s) {
585
+				// skip escaped char
586
+				i++
587
+			}
588
+			i++
589
+		}
590
+		if i >= len(p.s) || p.s[i] != p.s[0] {
591
+			p.errorf("unmatched quote")
592
+			return
593
+		}
594
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
595
+		if err != nil {
596
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
597
+			return
598
+		}
599
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
600
+		p.cur.unquoted = unq
601
+	default:
602
+		i := 0
603
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
604
+			i++
605
+		}
606
+		if i == 0 {
607
+			p.errorf("unexpected byte %#x", p.s[0])
608
+			return
609
+		}
610
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
611
+	}
612
+	p.offset += len(p.cur.value)
613
+}
614
+
615
+// Back off the parser by one token. Can only be done between calls to next().
616
+// It makes the next advance() a no-op.
617
+func (p *textParser) back() { p.backed = true }
618
+
619
+// Advances the parser and returns the new current token.
620
+func (p *textParser) next() *token {
621
+	if p.backed || p.done {
622
+		p.backed = false
623
+		return &p.cur
624
+	}
625
+	p.advance()
626
+	if p.done {
627
+		p.cur.value = ""
628
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
629
+		// Look for multiple quoted strings separated by whitespace,
630
+		// and concatenate them.
631
+		cat := p.cur
632
+		for {
633
+			p.skipWhitespace()
634
+			if p.done || !isQuote(p.s[0]) {
635
+				break
636
+			}
637
+			p.advance()
638
+			if p.cur.err != nil {
639
+				return &p.cur
640
+			}
641
+			cat.value += " " + p.cur.value
642
+			cat.unquoted += p.cur.unquoted
643
+		}
644
+		p.done = false // parser may have seen EOF, but we want to return cat
645
+		p.cur = cat
646
+	}
647
+	return &p.cur
648
+}
649
+
650
+func (p *textParser) consumeToken(s string) error {
651
+	tok := p.next()
652
+	if tok.err != nil {
653
+		return tok.err
654
+	}
655
+	if tok.value != s {
656
+		p.back()
657
+		return p.errorf("expected %q, found %q", s, tok.value)
658
+	}
659
+	return nil
660
+}
661
+
662
+var errBadUTF8 = errors.New("proto: bad UTF-8")
663
+
664
+func unquoteC(s string, quote rune) (string, error) {
665
+	// This is based on C++'s tokenizer.cc.
666
+	// Despite its name, this is *not* parsing C syntax.
667
+	// For instance, "\0" is an invalid quoted string.
668
+
669
+	// Avoid allocation in trivial cases.
670
+	simple := true
671
+	for _, r := range s {
672
+		if r == '\\' || r == quote {
673
+			simple = false
674
+			break
675
+		}
676
+	}
677
+	if simple {
678
+		return s, nil
679
+	}
680
+
681
+	buf := make([]byte, 0, 3*len(s)/2)
682
+	for len(s) > 0 {
683
+		r, n := utf8.DecodeRuneInString(s)
684
+		if r == utf8.RuneError && n == 1 {
685
+			return "", errBadUTF8
686
+		}
687
+		s = s[n:]
688
+		if r != '\\' {
689
+			if r < utf8.RuneSelf {
690
+				buf = append(buf, byte(r))
691
+			} else {
692
+				buf = append(buf, string(r)...)
693
+			}
694
+			continue
695
+		}
696
+
697
+		ch, tail, err := unescape(s)
698
+		if err != nil {
699
+			return "", err
700
+		}
701
+		buf = append(buf, ch...)
702
+		s = tail
703
+	}
704
+	return string(buf), nil
705
+}
706
+
707
+func unescape(s string) (ch string, tail string, err error) {
708
+	r, n := utf8.DecodeRuneInString(s)
709
+	if r == utf8.RuneError && n == 1 {
710
+		return "", "", errBadUTF8
711
+	}
712
+	s = s[n:]
713
+	switch r {
714
+	case 'a':
715
+		return "\a", s, nil
716
+	case 'b':
717
+		return "\b", s, nil
718
+	case 'f':
719
+		return "\f", s, nil
720
+	case 'n':
721
+		return "\n", s, nil
722
+	case 'r':
723
+		return "\r", s, nil
724
+	case 't':
725
+		return "\t", s, nil
726
+	case 'v':
727
+		return "\v", s, nil
728
+	case '?':
729
+		return "?", s, nil // trigraph workaround
730
+	case '\'', '"', '\\':
731
+		return string(r), s, nil
732
+	case '0', '1', '2', '3', '4', '5', '6', '7':
733
+		if len(s) < 2 {
734
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
735
+		}
736
+		ss := string(r) + s[:2]
737
+		s = s[2:]
738
+		i, err := strconv.ParseUint(ss, 8, 8)
739
+		if err != nil {
740
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
741
+		}
742
+		return string([]byte{byte(i)}), s, nil
743
+	case 'x', 'X', 'u', 'U':
744
+		var n int
745
+		switch r {
746
+		case 'x', 'X':
747
+			n = 2
748
+		case 'u':
749
+			n = 4
750
+		case 'U':
751
+			n = 8
752
+		}
753
+		if len(s) < n {
754
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
755
+		}
756
+		ss := s[:n]
757
+		s = s[n:]
758
+		i, err := strconv.ParseUint(ss, 16, 64)
759
+		if err != nil {
760
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
761
+		}
762
+		if r == 'x' || r == 'X' {
763
+			return string([]byte{byte(i)}), s, nil
764
+		}
765
+		if i > utf8.MaxRune {
766
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
767
+		}
768
+		return string(rune(i)), s, nil
769
+	}
770
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
771
+}
772
+
773
+func isIdentOrNumberChar(c byte) bool {
774
+	switch {
775
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
776
+		return true
777
+	case '0' <= c && c <= '9':
778
+		return true
779
+	}
780
+	switch c {
781
+	case '-', '+', '.', '_':
782
+		return true
783
+	}
784
+	return false
785
+}
786
+
787
+func isWhitespace(c byte) bool {
788
+	switch c {
789
+	case ' ', '\t', '\n', '\r':
790
+		return true
791
+	}
792
+	return false
793
+}
794
+
795
+func isQuote(c byte) bool {
796
+	switch c {
797
+	case '"', '\'':
798
+		return true
799
+	}
800
+	return false
801
+}

+ 560
- 0
vendor/github.com/golang/protobuf/proto/text_encode.go View File

@@ -0,0 +1,560 @@
1
+// Copyright 2010 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	"bytes"
9
+	"encoding"
10
+	"fmt"
11
+	"io"
12
+	"math"
13
+	"sort"
14
+	"strings"
15
+
16
+	"google.golang.org/protobuf/encoding/prototext"
17
+	"google.golang.org/protobuf/encoding/protowire"
18
+	"google.golang.org/protobuf/proto"
19
+	"google.golang.org/protobuf/reflect/protoreflect"
20
+	"google.golang.org/protobuf/reflect/protoregistry"
21
+)
22
+
23
+const wrapTextMarshalV2 = false
24
+
25
+// TextMarshaler is a configurable text format marshaler.
26
+type TextMarshaler struct {
27
+	Compact   bool // use compact text format (one line)
28
+	ExpandAny bool // expand google.protobuf.Any messages of known types
29
+}
30
+
31
+// Marshal writes the proto text format of m to w.
32
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
33
+	b, err := tm.marshal(m)
34
+	if len(b) > 0 {
35
+		if _, err := w.Write(b); err != nil {
36
+			return err
37
+		}
38
+	}
39
+	return err
40
+}
41
+
42
+// Text returns a proto text formatted string of m.
43
+func (tm *TextMarshaler) Text(m Message) string {
44
+	b, _ := tm.marshal(m)
45
+	return string(b)
46
+}
47
+
48
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
49
+	mr := MessageReflect(m)
50
+	if mr == nil || !mr.IsValid() {
51
+		return []byte("<nil>"), nil
52
+	}
53
+
54
+	if wrapTextMarshalV2 {
55
+		if m, ok := m.(encoding.TextMarshaler); ok {
56
+			return m.MarshalText()
57
+		}
58
+
59
+		opts := prototext.MarshalOptions{
60
+			AllowPartial: true,
61
+			EmitUnknown:  true,
62
+		}
63
+		if !tm.Compact {
64
+			opts.Indent = "  "
65
+		}
66
+		if !tm.ExpandAny {
67
+			opts.Resolver = (*protoregistry.Types)(nil)
68
+		}
69
+		return opts.Marshal(mr.Interface())
70
+	} else {
71
+		w := &textWriter{
72
+			compact:   tm.Compact,
73
+			expandAny: tm.ExpandAny,
74
+			complete:  true,
75
+		}
76
+
77
+		if m, ok := m.(encoding.TextMarshaler); ok {
78
+			b, err := m.MarshalText()
79
+			if err != nil {
80
+				return nil, err
81
+			}
82
+			w.Write(b)
83
+			return w.buf, nil
84
+		}
85
+
86
+		err := w.writeMessage(mr)
87
+		return w.buf, err
88
+	}
89
+}
90
+
91
+var (
92
+	defaultTextMarshaler = TextMarshaler{}
93
+	compactTextMarshaler = TextMarshaler{Compact: true}
94
+)
95
+
96
+// MarshalText writes the proto text format of m to w.
97
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
98
+
99
+// MarshalTextString returns a proto text formatted string of m.
100
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
101
+
102
+// CompactText writes the compact proto text format of m to w.
103
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
104
+
105
+// CompactTextString returns a compact proto text formatted string of m.
106
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
107
+
108
+var (
109
+	newline         = []byte("\n")
110
+	endBraceNewline = []byte("}\n")
111
+	posInf          = []byte("inf")
112
+	negInf          = []byte("-inf")
113
+	nan             = []byte("nan")
114
+)
115
+
116
+// textWriter is an io.Writer that tracks its indentation level.
117
+type textWriter struct {
118
+	compact   bool // same as TextMarshaler.Compact
119
+	expandAny bool // same as TextMarshaler.ExpandAny
120
+	complete  bool // whether the current position is a complete line
121
+	indent    int  // indentation level; never negative
122
+	buf       []byte
123
+}
124
+
125
+func (w *textWriter) Write(p []byte) (n int, _ error) {
126
+	newlines := bytes.Count(p, newline)
127
+	if newlines == 0 {
128
+		if !w.compact && w.complete {
129
+			w.writeIndent()
130
+		}
131
+		w.buf = append(w.buf, p...)
132
+		w.complete = false
133
+		return len(p), nil
134
+	}
135
+
136
+	frags := bytes.SplitN(p, newline, newlines+1)
137
+	if w.compact {
138
+		for i, frag := range frags {
139
+			if i > 0 {
140
+				w.buf = append(w.buf, ' ')
141
+				n++
142
+			}
143
+			w.buf = append(w.buf, frag...)
144
+			n += len(frag)
145
+		}
146
+		return n, nil
147
+	}
148
+
149
+	for i, frag := range frags {
150
+		if w.complete {
151
+			w.writeIndent()
152
+		}
153
+		w.buf = append(w.buf, frag...)
154
+		n += len(frag)
155
+		if i+1 < len(frags) {
156
+			w.buf = append(w.buf, '\n')
157
+			n++
158
+		}
159
+	}
160
+	w.complete = len(frags[len(frags)-1]) == 0
161
+	return n, nil
162
+}
163
+
164
+func (w *textWriter) WriteByte(c byte) error {
165
+	if w.compact && c == '\n' {
166
+		c = ' '
167
+	}
168
+	if !w.compact && w.complete {
169
+		w.writeIndent()
170
+	}
171
+	w.buf = append(w.buf, c)
172
+	w.complete = c == '\n'
173
+	return nil
174
+}
175
+
176
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
177
+	if !w.compact && w.complete {
178
+		w.writeIndent()
179
+	}
180
+	w.complete = false
181
+
182
+	if fd.Kind() != protoreflect.GroupKind {
183
+		w.buf = append(w.buf, fd.Name()...)
184
+		w.WriteByte(':')
185
+	} else {
186
+		// Use message type name for group field name.
187
+		w.buf = append(w.buf, fd.Message().Name()...)
188
+	}
189
+
190
+	if !w.compact {
191
+		w.WriteByte(' ')
192
+	}
193
+}
194
+
195
+func requiresQuotes(u string) bool {
196
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
197
+	for _, ch := range u {
198
+		switch {
199
+		case ch == '.' || ch == '/' || ch == '_':
200
+			continue
201
+		case '0' <= ch && ch <= '9':
202
+			continue
203
+		case 'A' <= ch && ch <= 'Z':
204
+			continue
205
+		case 'a' <= ch && ch <= 'z':
206
+			continue
207
+		default:
208
+			return true
209
+		}
210
+	}
211
+	return false
212
+}
213
+
214
+// writeProto3Any writes an expanded google.protobuf.Any message.
215
+//
216
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
217
+// required messages are not linked in).
218
+//
219
+// It returns (true, error) when sv was written in expanded format or an error
220
+// was encountered.
221
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
222
+	md := m.Descriptor()
223
+	fdURL := md.Fields().ByName("type_url")
224
+	fdVal := md.Fields().ByName("value")
225
+
226
+	url := m.Get(fdURL).String()
227
+	mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
228
+	if err != nil {
229
+		return false, nil
230
+	}
231
+
232
+	b := m.Get(fdVal).Bytes()
233
+	m2 := mt.New()
234
+	if err := proto.Unmarshal(b, m2.Interface()); err != nil {
235
+		return false, nil
236
+	}
237
+	w.Write([]byte("["))
238
+	if requiresQuotes(url) {
239
+		w.writeQuotedString(url)
240
+	} else {
241
+		w.Write([]byte(url))
242
+	}
243
+	if w.compact {
244
+		w.Write([]byte("]:<"))
245
+	} else {
246
+		w.Write([]byte("]: <\n"))
247
+		w.indent++
248
+	}
249
+	if err := w.writeMessage(m2); err != nil {
250
+		return true, err
251
+	}
252
+	if w.compact {
253
+		w.Write([]byte("> "))
254
+	} else {
255
+		w.indent--
256
+		w.Write([]byte(">\n"))
257
+	}
258
+	return true, nil
259
+}
260
+
261
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
262
+	md := m.Descriptor()
263
+	if w.expandAny && md.FullName() == "google.protobuf.Any" {
264
+		if canExpand, err := w.writeProto3Any(m); canExpand {
265
+			return err
266
+		}
267
+	}
268
+
269
+	fds := md.Fields()
270
+	for i := 0; i < fds.Len(); {
271
+		fd := fds.Get(i)
272
+		if od := fd.ContainingOneof(); od != nil {
273
+			fd = m.WhichOneof(od)
274
+			i += od.Fields().Len()
275
+		} else {
276
+			i++
277
+		}
278
+		if fd == nil || !m.Has(fd) {
279
+			continue
280
+		}
281
+
282
+		switch {
283
+		case fd.IsList():
284
+			lv := m.Get(fd).List()
285
+			for j := 0; j < lv.Len(); j++ {
286
+				w.writeName(fd)
287
+				v := lv.Get(j)
288
+				if err := w.writeSingularValue(v, fd); err != nil {
289
+					return err
290
+				}
291
+				w.WriteByte('\n')
292
+			}
293
+		case fd.IsMap():
294
+			kfd := fd.MapKey()
295
+			vfd := fd.MapValue()
296
+			mv := m.Get(fd).Map()
297
+
298
+			type entry struct{ key, val protoreflect.Value }
299
+			var entries []entry
300
+			mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
301
+				entries = append(entries, entry{k.Value(), v})
302
+				return true
303
+			})
304
+			sort.Slice(entries, func(i, j int) bool {
305
+				switch kfd.Kind() {
306
+				case protoreflect.BoolKind:
307
+					return !entries[i].key.Bool() && entries[j].key.Bool()
308
+				case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
309
+					return entries[i].key.Int() < entries[j].key.Int()
310
+				case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
311
+					return entries[i].key.Uint() < entries[j].key.Uint()
312
+				case protoreflect.StringKind:
313
+					return entries[i].key.String() < entries[j].key.String()
314
+				default:
315
+					panic("invalid kind")
316
+				}
317
+			})
318
+			for _, entry := range entries {
319
+				w.writeName(fd)
320
+				w.WriteByte('<')
321
+				if !w.compact {
322
+					w.WriteByte('\n')
323
+				}
324
+				w.indent++
325
+				w.writeName(kfd)
326
+				if err := w.writeSingularValue(entry.key, kfd); err != nil {
327
+					return err
328
+				}
329
+				w.WriteByte('\n')
330
+				w.writeName(vfd)
331
+				if err := w.writeSingularValue(entry.val, vfd); err != nil {
332
+					return err
333
+				}
334
+				w.WriteByte('\n')
335
+				w.indent--
336
+				w.WriteByte('>')
337
+				w.WriteByte('\n')
338
+			}
339
+		default:
340
+			w.writeName(fd)
341
+			if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
342
+				return err
343
+			}
344
+			w.WriteByte('\n')
345
+		}
346
+	}
347
+
348
+	if b := m.GetUnknown(); len(b) > 0 {
349
+		w.writeUnknownFields(b)
350
+	}
351
+	return w.writeExtensions(m)
352
+}
353
+
354
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
355
+	switch fd.Kind() {
356
+	case protoreflect.FloatKind, protoreflect.DoubleKind:
357
+		switch vf := v.Float(); {
358
+		case math.IsInf(vf, +1):
359
+			w.Write(posInf)
360
+		case math.IsInf(vf, -1):
361
+			w.Write(negInf)
362
+		case math.IsNaN(vf):
363
+			w.Write(nan)
364
+		default:
365
+			fmt.Fprint(w, v.Interface())
366
+		}
367
+	case protoreflect.StringKind:
368
+		// NOTE: This does not validate UTF-8 for historical reasons.
369
+		w.writeQuotedString(string(v.String()))
370
+	case protoreflect.BytesKind:
371
+		w.writeQuotedString(string(v.Bytes()))
372
+	case protoreflect.MessageKind, protoreflect.GroupKind:
373
+		var bra, ket byte = '<', '>'
374
+		if fd.Kind() == protoreflect.GroupKind {
375
+			bra, ket = '{', '}'
376
+		}
377
+		w.WriteByte(bra)
378
+		if !w.compact {
379
+			w.WriteByte('\n')
380
+		}
381
+		w.indent++
382
+		m := v.Message()
383
+		if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
384
+			b, err := m2.MarshalText()
385
+			if err != nil {
386
+				return err
387
+			}
388
+			w.Write(b)
389
+		} else {
390
+			w.writeMessage(m)
391
+		}
392
+		w.indent--
393
+		w.WriteByte(ket)
394
+	case protoreflect.EnumKind:
395
+		if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
396
+			fmt.Fprint(w, ev.Name())
397
+		} else {
398
+			fmt.Fprint(w, v.Enum())
399
+		}
400
+	default:
401
+		fmt.Fprint(w, v.Interface())
402
+	}
403
+	return nil
404
+}
405
+
406
+// writeQuotedString writes a quoted string in the protocol buffer text format.
407
+func (w *textWriter) writeQuotedString(s string) {
408
+	w.WriteByte('"')
409
+	for i := 0; i < len(s); i++ {
410
+		switch c := s[i]; c {
411
+		case '\n':
412
+			w.buf = append(w.buf, `\n`...)
413
+		case '\r':
414
+			w.buf = append(w.buf, `\r`...)
415
+		case '\t':
416
+			w.buf = append(w.buf, `\t`...)
417
+		case '"':
418
+			w.buf = append(w.buf, `\"`...)
419
+		case '\\':
420
+			w.buf = append(w.buf, `\\`...)
421
+		default:
422
+			if isPrint := c >= 0x20 && c < 0x7f; isPrint {
423
+				w.buf = append(w.buf, c)
424
+			} else {
425
+				w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
426
+			}
427
+		}
428
+	}
429
+	w.WriteByte('"')
430
+}
431
+
432
+func (w *textWriter) writeUnknownFields(b []byte) {
433
+	if !w.compact {
434
+		fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
435
+	}
436
+
437
+	for len(b) > 0 {
438
+		num, wtyp, n := protowire.ConsumeTag(b)
439
+		if n < 0 {
440
+			return
441
+		}
442
+		b = b[n:]
443
+
444
+		if wtyp == protowire.EndGroupType {
445
+			w.indent--
446
+			w.Write(endBraceNewline)
447
+			continue
448
+		}
449
+		fmt.Fprint(w, num)
450
+		if wtyp != protowire.StartGroupType {
451
+			w.WriteByte(':')
452
+		}
453
+		if !w.compact || wtyp == protowire.StartGroupType {
454
+			w.WriteByte(' ')
455
+		}
456
+		switch wtyp {
457
+		case protowire.VarintType:
458
+			v, n := protowire.ConsumeVarint(b)
459
+			if n < 0 {
460
+				return
461
+			}
462
+			b = b[n:]
463
+			fmt.Fprint(w, v)
464
+		case protowire.Fixed32Type:
465
+			v, n := protowire.ConsumeFixed32(b)
466
+			if n < 0 {
467
+				return
468
+			}
469
+			b = b[n:]
470
+			fmt.Fprint(w, v)
471
+		case protowire.Fixed64Type:
472
+			v, n := protowire.ConsumeFixed64(b)
473
+			if n < 0 {
474
+				return
475
+			}
476
+			b = b[n:]
477
+			fmt.Fprint(w, v)
478
+		case protowire.BytesType:
479
+			v, n := protowire.ConsumeBytes(b)
480
+			if n < 0 {
481
+				return
482
+			}
483
+			b = b[n:]
484
+			fmt.Fprintf(w, "%q", v)
485
+		case protowire.StartGroupType:
486
+			w.WriteByte('{')
487
+			w.indent++
488
+		default:
489
+			fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
490
+		}
491
+		w.WriteByte('\n')
492
+	}
493
+}
494
+
495
+// writeExtensions writes all the extensions in m.
496
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
497
+	md := m.Descriptor()
498
+	if md.ExtensionRanges().Len() == 0 {
499
+		return nil
500
+	}
501
+
502
+	type ext struct {
503
+		desc protoreflect.FieldDescriptor
504
+		val  protoreflect.Value
505
+	}
506
+	var exts []ext
507
+	m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
508
+		if fd.IsExtension() {
509
+			exts = append(exts, ext{fd, v})
510
+		}
511
+		return true
512
+	})
513
+	sort.Slice(exts, func(i, j int) bool {
514
+		return exts[i].desc.Number() < exts[j].desc.Number()
515
+	})
516
+
517
+	for _, ext := range exts {
518
+		// For message set, use the name of the message as the extension name.
519
+		name := string(ext.desc.FullName())
520
+		if isMessageSet(ext.desc.ContainingMessage()) {
521
+			name = strings.TrimSuffix(name, ".message_set_extension")
522
+		}
523
+
524
+		if !ext.desc.IsList() {
525
+			if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
526
+				return err
527
+			}
528
+		} else {
529
+			lv := ext.val.List()
530
+			for i := 0; i < lv.Len(); i++ {
531
+				if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
532
+					return err
533
+				}
534
+			}
535
+		}
536
+	}
537
+	return nil
538
+}
539
+
540
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
541
+	fmt.Fprintf(w, "[%s]:", name)
542
+	if !w.compact {
543
+		w.WriteByte(' ')
544
+	}
545
+	if err := w.writeSingularValue(v, fd); err != nil {
546
+		return err
547
+	}
548
+	w.WriteByte('\n')
549
+	return nil
550
+}
551
+
552
+func (w *textWriter) writeIndent() {
553
+	if !w.complete {
554
+		return
555
+	}
556
+	for i := 0; i < w.indent*2; i++ {
557
+		w.buf = append(w.buf, ' ')
558
+	}
559
+	w.complete = false
560
+}

+ 78
- 0
vendor/github.com/golang/protobuf/proto/wire.go View File

@@ -0,0 +1,78 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+import (
8
+	protoV2 "google.golang.org/protobuf/proto"
9
+	"google.golang.org/protobuf/runtime/protoiface"
10
+)
11
+
12
+// Size returns the size in bytes of the wire-format encoding of m.
13
+func Size(m Message) int {
14
+	if m == nil {
15
+		return 0
16
+	}
17
+	mi := MessageV2(m)
18
+	return protoV2.Size(mi)
19
+}
20
+
21
+// Marshal returns the wire-format encoding of m.
22
+func Marshal(m Message) ([]byte, error) {
23
+	b, err := marshalAppend(nil, m, false)
24
+	if b == nil {
25
+		b = zeroBytes
26
+	}
27
+	return b, err
28
+}
29
+
30
+var zeroBytes = make([]byte, 0, 0)
31
+
32
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
33
+	if m == nil {
34
+		return nil, ErrNil
35
+	}
36
+	mi := MessageV2(m)
37
+	nbuf, err := protoV2.MarshalOptions{
38
+		Deterministic: deterministic,
39
+		AllowPartial:  true,
40
+	}.MarshalAppend(buf, mi)
41
+	if err != nil {
42
+		return buf, err
43
+	}
44
+	if len(buf) == len(nbuf) {
45
+		if !mi.ProtoReflect().IsValid() {
46
+			return buf, ErrNil
47
+		}
48
+	}
49
+	return nbuf, checkRequiredNotSet(mi)
50
+}
51
+
52
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
53
+//
54
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
55
+// removed. Use UnmarshalMerge to preserve and append to existing data.
56
+func Unmarshal(b []byte, m Message) error {
57
+	m.Reset()
58
+	return UnmarshalMerge(b, m)
59
+}
60
+
61
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
62
+func UnmarshalMerge(b []byte, m Message) error {
63
+	mi := MessageV2(m)
64
+	out, err := protoV2.UnmarshalOptions{
65
+		AllowPartial: true,
66
+		Merge:        true,
67
+	}.UnmarshalState(protoiface.UnmarshalInput{
68
+		Buf:     b,
69
+		Message: mi.ProtoReflect(),
70
+	})
71
+	if err != nil {
72
+		return err
73
+	}
74
+	if out.Flags&protoiface.UnmarshalInitialized > 0 {
75
+		return nil
76
+	}
77
+	return checkRequiredNotSet(mi)
78
+}

+ 34
- 0
vendor/github.com/golang/protobuf/proto/wrappers.go View File

@@ -0,0 +1,34 @@
1
+// Copyright 2019 The Go Authors. All rights reserved.
2
+// Use of this source code is governed by a BSD-style
3
+// license that can be found in the LICENSE file.
4
+
5
+package proto
6
+
7
+// Bool stores v in a new bool value and returns a pointer to it.
8
+func Bool(v bool) *bool { return &v }
9
+
10
+// Int stores v in a new int32 value and returns a pointer to it.
11
+//
12
+// Deprecated: Use Int32 instead.
13
+func Int(v int) *int32 { return Int32(int32(v)) }
14
+
15
+// Int32 stores v in a new int32 value and returns a pointer to it.
16
+func Int32(v int32) *int32 { return &v }
17
+
18
+// Int64 stores v in a new int64 value and returns a pointer to it.
19
+func Int64(v int64) *int64 { return &v }
20
+
21
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
22
+func Uint32(v uint32) *uint32 { return &v }
23
+
24
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
25
+func Uint64(v uint64) *uint64 { return &v }
26
+
27
+// Float32 stores v in a new float32 value and returns a pointer to it.
28
+func Float32(v float32) *float32 { return &v }
29
+
30
+// Float64 stores v in a new float64 value and returns a pointer to it.
31
+func Float64(v float64) *float64 { return &v }
32
+
33
+// String stores v in a new string value and returns a pointer to it.
34
+func String(v string) *string { return &v }

+ 201
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE View File

@@ -0,0 +1,201 @@
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        http://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   APPENDIX: How to apply the Apache License to your work.
179
+
180
+      To apply the Apache License to your work, attach the following
181
+      boilerplate notice, with the fields enclosed by brackets "{}"
182
+      replaced with your own identifying information. (Don't include
183
+      the brackets!)  The text should be enclosed in the appropriate
184
+      comment syntax for the file format. We also recommend that a
185
+      file or class name and description of purpose be included on the
186
+      same "printed page" as the copyright notice for easier
187
+      identification within third-party archives.
188
+
189
+   Copyright {yyyy} {name of copyright owner}
190
+
191
+   Licensed under the Apache License, Version 2.0 (the "License");
192
+   you may not use this file except in compliance with the License.
193
+   You may obtain a copy of the License at
194
+
195
+       http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+   Unless required by applicable law or agreed to in writing, software
198
+   distributed under the License is distributed on an "AS IS" BASIS,
199
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+   See the License for the specific language governing permissions and
201
+   limitations under the License.

+ 1
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE View File

@@ -0,0 +1 @@
1
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

+ 1
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore View File

@@ -0,0 +1 @@
1
+cover.dat

+ 7
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile View File

@@ -0,0 +1,7 @@
1
+all:
2
+
3
+cover:
4
+	go test -cover -v -coverprofile=cover.dat ./...
5
+	go tool cover -func cover.dat
6
+
7
+.PHONY: cover

+ 75
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go View File

@@ -0,0 +1,75 @@
1
+// Copyright 2013 Matt T. Proud
2
+//
3
+// Licensed under the Apache License, Version 2.0 (the "License");
4
+// you may not use this file except in compliance with the License.
5
+// You may obtain a copy of the License at
6
+//
7
+//     http://www.apache.org/licenses/LICENSE-2.0
8
+//
9
+// Unless required by applicable law or agreed to in writing, software
10
+// distributed under the License is distributed on an "AS IS" BASIS,
11
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+// See the License for the specific language governing permissions and
13
+// limitations under the License.
14
+
15
+package pbutil
16
+
17
+import (
18
+	"encoding/binary"
19
+	"errors"
20
+	"io"
21
+
22
+	"github.com/golang/protobuf/proto"
23
+)
24
+
25
+var errInvalidVarint = errors.New("invalid varint32 encountered")
26
+
27
+// ReadDelimited decodes a message from the provided length-delimited stream,
28
+// where the length is encoded as 32-bit varint prefix to the message body.
29
+// It returns the total number of bytes read and any applicable error.  This is
30
+// roughly equivalent to the companion Java API's
31
+// MessageLite#parseDelimitedFrom.  As per the reader contract, this function
32
+// calls r.Read repeatedly as required until exactly one message including its
33
+// prefix is read and decoded (or an error has occurred).  The function never
34
+// reads more bytes from the stream than required.  The function never returns
35
+// an error if a message has been read and decoded correctly, even if the end
36
+// of the stream has been reached in doing so.  In that case, any subsequent
37
+// calls return (0, io.EOF).
38
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
39
+	// Per AbstractParser#parsePartialDelimitedFrom with
40
+	// CodedInputStream#readRawVarint32.
41
+	var headerBuf [binary.MaxVarintLen32]byte
42
+	var bytesRead, varIntBytes int
43
+	var messageLength uint64
44
+	for varIntBytes == 0 { // i.e. no varint has been decoded yet.
45
+		if bytesRead >= len(headerBuf) {
46
+			return bytesRead, errInvalidVarint
47
+		}
48
+		// We have to read byte by byte here to avoid reading more bytes
49
+		// than required. Each read byte is appended to what we have
50
+		// read before.
51
+		newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
52
+		if newBytesRead == 0 {
53
+			if err != nil {
54
+				return bytesRead, err
55
+			}
56
+			// A Reader should not return (0, nil), but if it does,
57
+			// it should be treated as no-op (according to the
58
+			// Reader contract). So let's go on...
59
+			continue
60
+		}
61
+		bytesRead += newBytesRead
62
+		// Now present everything read so far to the varint decoder and
63
+		// see if a varint can be decoded already.
64
+		messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
65
+	}
66
+
67
+	messageBuf := make([]byte, messageLength)
68
+	newBytesRead, err := io.ReadFull(r, messageBuf)
69
+	bytesRead += newBytesRead
70
+	if err != nil {
71
+		return bytesRead, err
72
+	}
73
+
74
+	return bytesRead, proto.Unmarshal(messageBuf, m)
75
+}

+ 16
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go View File

@@ -0,0 +1,16 @@
1
+// Copyright 2013 Matt T. Proud
2
+//
3
+// Licensed under the Apache License, Version 2.0 (the "License");
4
+// you may not use this file except in compliance with the License.
5
+// You may obtain a copy of the License at
6
+//
7
+//     http://www.apache.org/licenses/LICENSE-2.0
8
+//
9
+// Unless required by applicable law or agreed to in writing, software
10
+// distributed under the License is distributed on an "AS IS" BASIS,
11
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+// See the License for the specific language governing permissions and
13
+// limitations under the License.
14
+
15
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
16
+package pbutil

+ 46
- 0
vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go View File

@@ -0,0 +1,46 @@
1
+// Copyright 2013 Matt T. Proud
2
+//
3
+// Licensed under the Apache License, Version 2.0 (the "License");
4
+// you may not use this file except in compliance with the License.
5
+// You may obtain a copy of the License at
6
+//
7
+//     http://www.apache.org/licenses/LICENSE-2.0
8
+//
9
+// Unless required by applicable law or agreed to in writing, software
10
+// distributed under the License is distributed on an "AS IS" BASIS,
11
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+// See the License for the specific language governing permissions and
13
+// limitations under the License.
14
+
15
+package pbutil
16
+
17
+import (
18
+	"encoding/binary"
19
+	"io"
20
+
21
+	"github.com/golang/protobuf/proto"
22
+)
23
+
24
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
25
+// with a 32-bit varint indicating the length of the encoded message, producing
26
+// a length-delimited record stream, which can be used to chain together
27
+// encoded messages of the same type together in a file.  It returns the total
28
+// number of bytes written and any applicable error.  This is roughly
29
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
30
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
31
+	buffer, err := proto.Marshal(m)
32
+	if err != nil {
33
+		return 0, err
34
+	}
35
+
36
+	var buf [binary.MaxVarintLen32]byte
37
+	encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
38
+
39
+	sync, err := w.Write(buf[:encodedLength])
40
+	if err != nil {
41
+		return sync, err
42
+	}
43
+
44
+	n, err = w.Write(buffer)
45
+	return n + sync, err
46
+}

+ 201
- 0
vendor/github.com/prometheus/client_golang/LICENSE View File

@@ -0,0 +1,201 @@
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        http://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   APPENDIX: How to apply the Apache License to your work.
179
+
180
+      To apply the Apache License to your work, attach the following
181
+      boilerplate notice, with the fields enclosed by brackets "[]"
182
+      replaced with your own identifying information. (Don't include
183
+      the brackets!)  The text should be enclosed in the appropriate
184
+      comment syntax for the file format. We also recommend that a
185
+      file or class name and description of purpose be included on the
186
+      same "printed page" as the copyright notice for easier
187
+      identification within third-party archives.
188
+
189
+   Copyright [yyyy] [name of copyright owner]
190
+
191
+   Licensed under the Apache License, Version 2.0 (the "License");
192
+   you may not use this file except in compliance with the License.
193
+   You may obtain a copy of the License at
194
+
195
+       http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+   Unless required by applicable law or agreed to in writing, software
198
+   distributed under the License is distributed on an "AS IS" BASIS,
199
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+   See the License for the specific language governing permissions and
201
+   limitations under the License.

+ 23
- 0
vendor/github.com/prometheus/client_golang/NOTICE View File

@@ -0,0 +1,23 @@
1
+Prometheus instrumentation library for Go applications
2
+Copyright 2012-2015 The Prometheus Authors
3
+
4
+This product includes software developed at
5
+SoundCloud Ltd. (http://soundcloud.com/).
6
+
7
+
8
+The following components are included in this product:
9
+
10
+perks - a fork of https://github.com/bmizerany/perks
11
+https://github.com/beorn7/perks
12
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
13
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
14
+
15
+Go support for Protocol Buffers - Google's data interchange format
16
+http://github.com/golang/protobuf/
17
+Copyright 2010 The Go Authors
18
+See source code for license details.
19
+
20
+Support for streaming Protocol Buffer messages for the Go language (golang).
21
+https://github.com/matttproud/golang_protobuf_extensions
22
+Copyright 2013 Matt T. Proud
23
+Licensed under the Apache License, Version 2.0

+ 1
- 0
vendor/github.com/prometheus/client_golang/prometheus/.gitignore View File

@@ -0,0 +1 @@
1
+command-line-arguments.test

+ 1
- 0
vendor/github.com/prometheus/client_golang/prometheus/README.md View File

@@ -0,0 +1 @@
1
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).

+ 38
- 0
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go View File

@@ -0,0 +1,38 @@
1
+// Copyright 2021 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import "runtime/debug"
17
+
18
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
19
+// See there for documentation.
20
+//
21
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
22
+func NewBuildInfoCollector() Collector {
23
+	path, version, sum := "unknown", "unknown", "unknown"
24
+	if bi, ok := debug.ReadBuildInfo(); ok {
25
+		path = bi.Main.Path
26
+		version = bi.Main.Version
27
+		sum = bi.Main.Sum
28
+	}
29
+	c := &selfCollector{MustNewConstMetric(
30
+		NewDesc(
31
+			"go_build_info",
32
+			"Build information about the main Go module.",
33
+			nil, Labels{"path": path, "version": version, "checksum": sum},
34
+		),
35
+		GaugeValue, 1)}
36
+	c.init(c.self)
37
+	return c
38
+}

+ 128
- 0
vendor/github.com/prometheus/client_golang/prometheus/collector.go View File

@@ -0,0 +1,128 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+// Collector is the interface implemented by anything that can be used by
17
+// Prometheus to collect metrics. A Collector has to be registered for
18
+// collection. See Registerer.Register.
19
+//
20
+// The stock metrics provided by this package (Gauge, Counter, Summary,
21
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
22
+// namely itself). An implementer of Collector may, however, collect multiple
23
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
24
+// for collectors already implemented in this library are the metric vectors
25
+// (i.e. collection of multiple instances of the same Metric but with different
26
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
27
+type Collector interface {
28
+	// Describe sends the super-set of all possible descriptors of metrics
29
+	// collected by this Collector to the provided channel and returns once
30
+	// the last descriptor has been sent. The sent descriptors fulfill the
31
+	// consistency and uniqueness requirements described in the Desc
32
+	// documentation.
33
+	//
34
+	// It is valid if one and the same Collector sends duplicate
35
+	// descriptors. Those duplicates are simply ignored. However, two
36
+	// different Collectors must not send duplicate descriptors.
37
+	//
38
+	// Sending no descriptor at all marks the Collector as “unchecked”,
39
+	// i.e. no checks will be performed at registration time, and the
40
+	// Collector may yield any Metric it sees fit in its Collect method.
41
+	//
42
+	// This method idempotently sends the same descriptors throughout the
43
+	// lifetime of the Collector. It may be called concurrently and
44
+	// therefore must be implemented in a concurrency safe way.
45
+	//
46
+	// If a Collector encounters an error while executing this method, it
47
+	// must send an invalid descriptor (created with NewInvalidDesc) to
48
+	// signal the error to the registry.
49
+	Describe(chan<- *Desc)
50
+	// Collect is called by the Prometheus registry when collecting
51
+	// metrics. The implementation sends each collected metric via the
52
+	// provided channel and returns once the last metric has been sent. The
53
+	// descriptor of each sent metric is one of those returned by Describe
54
+	// (unless the Collector is unchecked, see above). Returned metrics that
55
+	// share the same descriptor must differ in their variable label
56
+	// values.
57
+	//
58
+	// This method may be called concurrently and must therefore be
59
+	// implemented in a concurrency safe way. Blocking occurs at the expense
60
+	// of total performance of rendering all registered metrics. Ideally,
61
+	// Collector implementations support concurrent readers.
62
+	Collect(chan<- Metric)
63
+}
64
+
65
+// DescribeByCollect is a helper to implement the Describe method of a custom
66
+// Collector. It collects the metrics from the provided Collector and sends
67
+// their descriptors to the provided channel.
68
+//
69
+// If a Collector collects the same metrics throughout its lifetime, its
70
+// Describe method can simply be implemented as:
71
+//
72
+//	func (c customCollector) Describe(ch chan<- *Desc) {
73
+//		DescribeByCollect(c, ch)
74
+//	}
75
+//
76
+// However, this will not work if the metrics collected change dynamically over
77
+// the lifetime of the Collector in a way that their combined set of descriptors
78
+// changes as well. The shortcut implementation will then violate the contract
79
+// of the Describe method. If a Collector sometimes collects no metrics at all
80
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
81
+// metrics after a metric with a fully specified label set has been accessed),
82
+// it might even get registered as an unchecked Collector (cf. the Register
83
+// method of the Registerer interface). Hence, only use this shortcut
84
+// implementation of Describe if you are certain to fulfill the contract.
85
+//
86
+// The Collector example demonstrates a use of DescribeByCollect.
87
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
88
+	metrics := make(chan Metric)
89
+	go func() {
90
+		c.Collect(metrics)
91
+		close(metrics)
92
+	}()
93
+	for m := range metrics {
94
+		descs <- m.Desc()
95
+	}
96
+}
97
+
98
+// selfCollector implements Collector for a single Metric so that the Metric
99
+// collects itself. Add it as an anonymous field to a struct that implements
100
+// Metric, and call init with the Metric itself as an argument.
101
+type selfCollector struct {
102
+	self Metric
103
+}
104
+
105
+// init provides the selfCollector with a reference to the metric it is supposed
106
+// to collect. It is usually called within the factory function to create a
107
+// metric. See example.
108
+func (c *selfCollector) init(self Metric) {
109
+	c.self = self
110
+}
111
+
112
+// Describe implements Collector.
113
+func (c *selfCollector) Describe(ch chan<- *Desc) {
114
+	ch <- c.self.Desc()
115
+}
116
+
117
+// Collect implements Collector.
118
+func (c *selfCollector) Collect(ch chan<- Metric) {
119
+	ch <- c.self
120
+}
121
+
122
+// collectorMetric is a metric that is also a collector.
123
+// Because of selfCollector, most (if not all) Metrics in
124
+// this package are also collectors.
125
+type collectorMetric interface {
126
+	Metric
127
+	Collector
128
+}

+ 358
- 0
vendor/github.com/prometheus/client_golang/prometheus/counter.go View File

@@ -0,0 +1,358 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"errors"
18
+	"math"
19
+	"sync/atomic"
20
+	"time"
21
+
22
+	dto "github.com/prometheus/client_model/go"
23
+	"google.golang.org/protobuf/types/known/timestamppb"
24
+)
25
+
26
+// Counter is a Metric that represents a single numerical value that only ever
27
+// goes up. That implies that it cannot be used to count items whose number can
28
+// also go down, e.g. the number of currently running goroutines. Those
29
+// "counters" are represented by Gauges.
30
+//
31
+// A Counter is typically used to count requests served, tasks completed, errors
32
+// occurred, etc.
33
+//
34
+// To create Counter instances, use NewCounter.
35
+type Counter interface {
36
+	Metric
37
+	Collector
38
+
39
+	// Inc increments the counter by 1. Use Add to increment it by arbitrary
40
+	// non-negative values.
41
+	Inc()
42
+	// Add adds the given value to the counter. It panics if the value is <
43
+	// 0.
44
+	Add(float64)
45
+}
46
+
47
+// ExemplarAdder is implemented by Counters that offer the option of adding a
48
+// value to the Counter together with an exemplar. Its AddWithExemplar method
49
+// works like the Add method of the Counter interface but also replaces the
50
+// currently saved exemplar (if any) with a new one, created from the provided
51
+// value, the current time as timestamp, and the provided labels. Empty Labels
52
+// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
53
+// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
54
+// of the provided labels are invalid, or if the provided labels contain more
55
+// than 128 runes in total.
56
+type ExemplarAdder interface {
57
+	AddWithExemplar(value float64, exemplar Labels)
58
+}
59
+
60
+// CounterOpts is an alias for Opts. See there for doc comments.
61
+type CounterOpts Opts
62
+
63
+// CounterVecOpts bundles the options to create a CounterVec metric.
64
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
65
+// is optional and can safely be left to its default value.
66
+type CounterVecOpts struct {
67
+	CounterOpts
68
+
69
+	// VariableLabels are used to partition the metric vector by the given set
70
+	// of labels. Each label value will be constrained with the optional Constraint
71
+	// function, if provided.
72
+	VariableLabels ConstrainableLabels
73
+}
74
+
75
+// NewCounter creates a new Counter based on the provided CounterOpts.
76
+//
77
+// The returned implementation also implements ExemplarAdder. It is safe to
78
+// perform the corresponding type assertion.
79
+//
80
+// The returned implementation tracks the counter value in two separate
81
+// variables, a float64 and a uint64. The latter is used to track calls of the
82
+// Inc method and calls of the Add method with a value that can be represented
83
+// as a uint64. This allows atomic increments of the counter with optimal
84
+// performance. (It is common to have an Inc call in very hot execution paths.)
85
+// Both internal tracking values are added up in the Write method. This has to
86
+// be taken into account when it comes to precision and overflow behavior.
87
+func NewCounter(opts CounterOpts) Counter {
88
+	desc := NewDesc(
89
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
90
+		opts.Help,
91
+		nil,
92
+		opts.ConstLabels,
93
+	)
94
+	if opts.now == nil {
95
+		opts.now = time.Now
96
+	}
97
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
98
+	result.init(result) // Init self-collection.
99
+	result.createdTs = timestamppb.New(opts.now())
100
+	return result
101
+}
102
+
103
+type counter struct {
104
+	// valBits contains the bits of the represented float64 value, while
105
+	// valInt stores values that are exact integers. Both have to go first
106
+	// in the struct to guarantee alignment for atomic operations.
107
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
108
+	valBits uint64
109
+	valInt  uint64
110
+
111
+	selfCollector
112
+	desc *Desc
113
+
114
+	createdTs  *timestamppb.Timestamp
115
+	labelPairs []*dto.LabelPair
116
+	exemplar   atomic.Value // Containing nil or a *dto.Exemplar.
117
+
118
+	// now is for testing purposes, by default it's time.Now.
119
+	now func() time.Time
120
+}
121
+
122
+func (c *counter) Desc() *Desc {
123
+	return c.desc
124
+}
125
+
126
+func (c *counter) Add(v float64) {
127
+	if v < 0 {
128
+		panic(errors.New("counter cannot decrease in value"))
129
+	}
130
+
131
+	ival := uint64(v)
132
+	if float64(ival) == v {
133
+		atomic.AddUint64(&c.valInt, ival)
134
+		return
135
+	}
136
+
137
+	for {
138
+		oldBits := atomic.LoadUint64(&c.valBits)
139
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
140
+		if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
141
+			return
142
+		}
143
+	}
144
+}
145
+
146
+func (c *counter) AddWithExemplar(v float64, e Labels) {
147
+	c.Add(v)
148
+	c.updateExemplar(v, e)
149
+}
150
+
151
+func (c *counter) Inc() {
152
+	atomic.AddUint64(&c.valInt, 1)
153
+}
154
+
155
+func (c *counter) get() float64 {
156
+	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
157
+	ival := atomic.LoadUint64(&c.valInt)
158
+	return fval + float64(ival)
159
+}
160
+
161
+func (c *counter) Write(out *dto.Metric) error {
162
+	// Read the Exemplar first and the value second. This is to avoid a race condition
163
+	// where users see an exemplar for a not-yet-existing observation.
164
+	var exemplar *dto.Exemplar
165
+	if e := c.exemplar.Load(); e != nil {
166
+		exemplar = e.(*dto.Exemplar)
167
+	}
168
+	val := c.get()
169
+	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
170
+}
171
+
172
+func (c *counter) updateExemplar(v float64, l Labels) {
173
+	if l == nil {
174
+		return
175
+	}
176
+	e, err := newExemplar(v, c.now(), l)
177
+	if err != nil {
178
+		panic(err)
179
+	}
180
+	c.exemplar.Store(e)
181
+}
182
+
183
+// CounterVec is a Collector that bundles a set of Counters that all share the
184
+// same Desc, but have different values for their variable labels. This is used
185
+// if you want to count the same thing partitioned by various dimensions
186
+// (e.g. number of HTTP requests, partitioned by response code and
187
+// method). Create instances with NewCounterVec.
188
+type CounterVec struct {
189
+	*MetricVec
190
+}
191
+
192
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
193
+// partitioned by the given label names.
194
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
195
+	return V2.NewCounterVec(CounterVecOpts{
196
+		CounterOpts:    opts,
197
+		VariableLabels: UnconstrainedLabels(labelNames),
198
+	})
199
+}
200
+
201
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
202
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
203
+	desc := V2.NewDesc(
204
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
205
+		opts.Help,
206
+		opts.VariableLabels,
207
+		opts.ConstLabels,
208
+	)
209
+	if opts.now == nil {
210
+		opts.now = time.Now
211
+	}
212
+	return &CounterVec{
213
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
214
+			if len(lvs) != len(desc.variableLabels.names) {
215
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
216
+			}
217
+			result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
218
+			result.init(result) // Init self-collection.
219
+			result.createdTs = timestamppb.New(opts.now())
220
+			return result
221
+		}),
222
+	}
223
+}
224
+
225
+// GetMetricWithLabelValues returns the Counter for the given slice of label
226
+// values (same order as the variable labels in Desc). If that combination of
227
+// label values is accessed for the first time, a new Counter is created.
228
+//
229
+// It is possible to call this method without using the returned Counter to only
230
+// create the new Counter but leave it at its starting value 0. See also the
231
+// SummaryVec example.
232
+//
233
+// Keeping the Counter for later use is possible (and should be considered if
234
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
235
+// Delete can be used to delete the Counter from the CounterVec. In that case,
236
+// the Counter will still exist, but it will not be exported anymore, even if a
237
+// Counter with the same label values is created later.
238
+//
239
+// An error is returned if the number of label values is not the same as the
240
+// number of variable labels in Desc (minus any curried labels).
241
+//
242
+// Note that for more than one label value, this method is prone to mistakes
243
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
244
+// an alternative to avoid that type of mistake. For higher label numbers, the
245
+// latter has a much more readable (albeit more verbose) syntax, but it comes
246
+// with a performance overhead (for creating and processing the Labels map).
247
+// See also the GaugeVec example.
248
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
249
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
250
+	if metric != nil {
251
+		return metric.(Counter), err
252
+	}
253
+	return nil, err
254
+}
255
+
256
+// GetMetricWith returns the Counter for the given Labels map (the label names
257
+// must match those of the variable labels in Desc). If that label map is
258
+// accessed for the first time, a new Counter is created. Implications of
259
+// creating a Counter without using it and keeping the Counter for later use are
260
+// the same as for GetMetricWithLabelValues.
261
+//
262
+// An error is returned if the number and names of the Labels are inconsistent
263
+// with those of the variable labels in Desc (minus any curried labels).
264
+//
265
+// This method is used for the same purpose as
266
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
267
+// methods.
268
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
269
+	metric, err := v.MetricVec.GetMetricWith(labels)
270
+	if metric != nil {
271
+		return metric.(Counter), err
272
+	}
273
+	return nil, err
274
+}
275
+
276
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
277
+// GetMetricWithLabelValues would have returned an error. Not returning an
278
+// error allows shortcuts like
279
+//
280
+//	myVec.WithLabelValues("404", "GET").Add(42)
281
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
282
+	c, err := v.GetMetricWithLabelValues(lvs...)
283
+	if err != nil {
284
+		panic(err)
285
+	}
286
+	return c
287
+}
288
+
289
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
290
+// returned an error. Not returning an error allows shortcuts like
291
+//
292
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
293
+func (v *CounterVec) With(labels Labels) Counter {
294
+	c, err := v.GetMetricWith(labels)
295
+	if err != nil {
296
+		panic(err)
297
+	}
298
+	return c
299
+}
300
+
301
+// CurryWith returns a vector curried with the provided labels, i.e. the
302
+// returned vector has those labels pre-set for all labeled operations performed
303
+// on it. The cardinality of the curried vector is reduced accordingly. The
304
+// order of the remaining labels stays the same (just with the curried labels
305
+// taken out of the sequence – which is relevant for the
306
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
307
+// vector, but only with labels not yet used for currying before.
308
+//
309
+// The metrics contained in the CounterVec are shared between the curried and
310
+// uncurried vectors. They are just accessed differently. Curried and uncurried
311
+// vectors behave identically in terms of collection. Only one must be
312
+// registered with a given registry (usually the uncurried version). The Reset
313
+// method deletes all metrics, even if called on a curried vector.
314
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
315
+	vec, err := v.MetricVec.CurryWith(labels)
316
+	if vec != nil {
317
+		return &CounterVec{vec}, err
318
+	}
319
+	return nil, err
320
+}
321
+
322
+// MustCurryWith works as CurryWith but panics where CurryWith would have
323
+// returned an error.
324
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
325
+	vec, err := v.CurryWith(labels)
326
+	if err != nil {
327
+		panic(err)
328
+	}
329
+	return vec
330
+}
331
+
332
+// CounterFunc is a Counter whose value is determined at collect time by calling a
333
+// provided function.
334
+//
335
+// To create CounterFunc instances, use NewCounterFunc.
336
+type CounterFunc interface {
337
+	Metric
338
+	Collector
339
+}
340
+
341
+// NewCounterFunc creates a new CounterFunc based on the provided
342
+// CounterOpts. The value reported is determined by calling the given function
343
+// from within the Write method. Take into account that metric collection may
344
+// happen concurrently. If that results in concurrent calls to Write, like in
345
+// the case where a CounterFunc is directly registered with Prometheus, the
346
+// provided function must be concurrency-safe. The function should also honor
347
+// the contract for a Counter (values only go up, not down), but compliance will
348
+// not be checked.
349
+//
350
+// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
351
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
352
+	return newValueFunc(NewDesc(
353
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
354
+		opts.Help,
355
+		nil,
356
+		opts.ConstLabels,
357
+	), CounterValue, function)
358
+}

+ 207
- 0
vendor/github.com/prometheus/client_golang/prometheus/desc.go View File

@@ -0,0 +1,207 @@
1
+// Copyright 2016 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"fmt"
18
+	"sort"
19
+	"strings"
20
+
21
+	"github.com/cespare/xxhash/v2"
22
+	dto "github.com/prometheus/client_model/go"
23
+	"github.com/prometheus/common/model"
24
+	"google.golang.org/protobuf/proto"
25
+
26
+	"github.com/prometheus/client_golang/prometheus/internal"
27
+)
28
+
29
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
30
+// the immutable meta-data of a Metric. The normal Metric implementations
31
+// included in this package manage their Desc under the hood. Users only have to
32
+// deal with Desc if they use advanced features like the ExpvarCollector or
33
+// custom Collectors and Metrics.
34
+//
35
+// Descriptors registered with the same registry have to fulfill certain
36
+// consistency and uniqueness criteria if they share the same fully-qualified
37
+// name: They must have the same help string and the same label names (aka label
38
+// dimensions) in each, constLabels and variableLabels, but they must differ in
39
+// the values of the constLabels.
40
+//
41
+// Descriptors that share the same fully-qualified names and the same label
42
+// values of their constLabels are considered equal.
43
+//
44
+// Use NewDesc to create new Desc instances.
45
+type Desc struct {
46
+	// fqName has been built from Namespace, Subsystem, and Name.
47
+	fqName string
48
+	// help provides some helpful information about this metric.
49
+	help string
50
+	// constLabelPairs contains precalculated DTO label pairs based on
51
+	// the constant labels.
52
+	constLabelPairs []*dto.LabelPair
53
+	// variableLabels contains names of labels and normalization function for
54
+	// which the metric maintains variable values.
55
+	variableLabels *compiledLabels
56
+	// id is a hash of the values of the ConstLabels and fqName. This
57
+	// must be unique among all registered descriptors and can therefore be
58
+	// used as an identifier of the descriptor.
59
+	id uint64
60
+	// dimHash is a hash of the label names (preset and variable) and the
61
+	// Help string. Each Desc with the same fqName must have the same
62
+	// dimHash.
63
+	dimHash uint64
64
+	// err is an error that occurred during construction. It is reported on
65
+	// registration time.
66
+	err error
67
+}
68
+
69
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
70
+// and will be reported on registration time. variableLabels and constLabels can
71
+// be nil if no such labels should be set. fqName must not be empty.
72
+//
73
+// variableLabels only contain the label names. Their label values are variable
74
+// and therefore not part of the Desc. (They are managed within the Metric.)
75
+//
76
+// For constLabels, the label values are constant. Therefore, they are fully
77
+// specified in the Desc. See the Collector example for a usage pattern.
78
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
79
+	return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
80
+}
81
+
82
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
83
+// and will be reported on registration time. variableLabels and constLabels can
84
+// be nil if no such labels should be set. fqName must not be empty.
85
+//
86
+// variableLabels only contain the label names and normalization functions. Their
87
+// label values are variable and therefore not part of the Desc. (They are managed
88
+// within the Metric.)
89
+//
90
+// For constLabels, the label values are constant. Therefore, they are fully
91
+// specified in the Desc. See the Collector example for a usage pattern.
92
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
93
+	d := &Desc{
94
+		fqName:         fqName,
95
+		help:           help,
96
+		variableLabels: variableLabels.compile(),
97
+	}
98
+	if !model.IsValidMetricName(model.LabelValue(fqName)) {
99
+		d.err = fmt.Errorf("%q is not a valid metric name", fqName)
100
+		return d
101
+	}
102
+	// labelValues contains the label values of const labels (in order of
103
+	// their sorted label names) plus the fqName (at position 0).
104
+	labelValues := make([]string, 1, len(constLabels)+1)
105
+	labelValues[0] = fqName
106
+	labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
107
+	labelNameSet := map[string]struct{}{}
108
+	// First add only the const label names and sort them...
109
+	for labelName := range constLabels {
110
+		if !checkLabelName(labelName) {
111
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
112
+			return d
113
+		}
114
+		labelNames = append(labelNames, labelName)
115
+		labelNameSet[labelName] = struct{}{}
116
+	}
117
+	sort.Strings(labelNames)
118
+	// ... so that we can now add const label values in the order of their names.
119
+	for _, labelName := range labelNames {
120
+		labelValues = append(labelValues, constLabels[labelName])
121
+	}
122
+	// Validate the const label values. They can't have a wrong cardinality, so
123
+	// use in len(labelValues) as expectedNumberOfValues.
124
+	if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
125
+		d.err = err
126
+		return d
127
+	}
128
+	// Now add the variable label names, but prefix them with something that
129
+	// cannot be in a regular label name. That prevents matching the label
130
+	// dimension with a different mix between preset and variable labels.
131
+	for _, label := range d.variableLabels.names {
132
+		if !checkLabelName(label) {
133
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
134
+			return d
135
+		}
136
+		labelNames = append(labelNames, "$"+label)
137
+		labelNameSet[label] = struct{}{}
138
+	}
139
+	if len(labelNames) != len(labelNameSet) {
140
+		d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
141
+		return d
142
+	}
143
+
144
+	xxh := xxhash.New()
145
+	for _, val := range labelValues {
146
+		xxh.WriteString(val)
147
+		xxh.Write(separatorByteSlice)
148
+	}
149
+	d.id = xxh.Sum64()
150
+	// Sort labelNames so that order doesn't matter for the hash.
151
+	sort.Strings(labelNames)
152
+	// Now hash together (in this order) the help string and the sorted
153
+	// label names.
154
+	xxh.Reset()
155
+	xxh.WriteString(help)
156
+	xxh.Write(separatorByteSlice)
157
+	for _, labelName := range labelNames {
158
+		xxh.WriteString(labelName)
159
+		xxh.Write(separatorByteSlice)
160
+	}
161
+	d.dimHash = xxh.Sum64()
162
+
163
+	d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
164
+	for n, v := range constLabels {
165
+		d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
166
+			Name:  proto.String(n),
167
+			Value: proto.String(v),
168
+		})
169
+	}
170
+	sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
171
+	return d
172
+}
173
+
174
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
175
+// provided error set. If a collector returning such a descriptor is registered,
176
+// registration will fail with the provided error. NewInvalidDesc can be used by
177
+// a Collector to signal inability to describe itself.
178
+func NewInvalidDesc(err error) *Desc {
179
+	return &Desc{
180
+		err: err,
181
+	}
182
+}
183
+
184
+func (d *Desc) String() string {
185
+	lpStrings := make([]string, 0, len(d.constLabelPairs))
186
+	for _, lp := range d.constLabelPairs {
187
+		lpStrings = append(
188
+			lpStrings,
189
+			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
190
+		)
191
+	}
192
+	vlStrings := make([]string, 0, len(d.variableLabels.names))
193
+	for _, vl := range d.variableLabels.names {
194
+		if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
195
+			vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
196
+		} else {
197
+			vlStrings = append(vlStrings, vl)
198
+		}
199
+	}
200
+	return fmt.Sprintf(
201
+		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
202
+		d.fqName,
203
+		d.help,
204
+		strings.Join(lpStrings, ","),
205
+		strings.Join(vlStrings, ","),
206
+	)
207
+}

+ 210
- 0
vendor/github.com/prometheus/client_golang/prometheus/doc.go View File

@@ -0,0 +1,210 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Package prometheus is the core instrumentation package. It provides metrics
15
+// primitives to instrument code for monitoring. It also offers a registry for
16
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
17
+// (package promhttp) or push them to a Pushgateway (package push). There is
18
+// also a sub-package promauto, which provides metrics constructors with
19
+// automatic registration.
20
+//
21
+// All exported functions and methods are safe to be used concurrently unless
22
+// specified otherwise.
23
+//
24
+// # A Basic Example
25
+//
26
+// As a starting point, a very basic usage example:
27
+//
28
+//	package main
29
+//
30
+//	import (
31
+//		"log"
32
+//		"net/http"
33
+//
34
+//		"github.com/prometheus/client_golang/prometheus"
35
+//		"github.com/prometheus/client_golang/prometheus/promhttp"
36
+//	)
37
+//
38
+//	type metrics struct {
39
+//		cpuTemp  prometheus.Gauge
40
+//		hdFailures *prometheus.CounterVec
41
+//	}
42
+//
43
+//	func NewMetrics(reg prometheus.Registerer) *metrics {
44
+//		m := &metrics{
45
+//			cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
46
+//				Name: "cpu_temperature_celsius",
47
+//				Help: "Current temperature of the CPU.",
48
+//			}),
49
+//			hdFailures: prometheus.NewCounterVec(
50
+//				prometheus.CounterOpts{
51
+//					Name: "hd_errors_total",
52
+//					Help: "Number of hard-disk errors.",
53
+//				},
54
+//				[]string{"device"},
55
+//			),
56
+//		}
57
+//		reg.MustRegister(m.cpuTemp)
58
+//		reg.MustRegister(m.hdFailures)
59
+//		return m
60
+//	}
61
+//
62
+//	func main() {
63
+//		// Create a non-global registry.
64
+//		reg := prometheus.NewRegistry()
65
+//
66
+//		// Create new metrics and register them using the custom registry.
67
+//		m := NewMetrics(reg)
68
+//		// Set values for the new created metrics.
69
+//		m.cpuTemp.Set(65.3)
70
+//		m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
71
+//
72
+//		// Expose metrics and custom registry via an HTTP server
73
+//		// using the HandleFor function. "/metrics" is the usual endpoint for that.
74
+//		http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
75
+//		log.Fatal(http.ListenAndServe(":8080", nil))
76
+//	}
77
+//
78
+// This is a complete program that exports two metrics, a Gauge and a Counter,
79
+// the latter with a label attached to turn it into a (one-dimensional) vector.
80
+// It register the metrics using a custom registry and exposes them via an HTTP server
81
+// on the /metrics endpoint.
82
+//
83
+// # Metrics
84
+//
85
+// The number of exported identifiers in this package might appear a bit
86
+// overwhelming. However, in addition to the basic plumbing shown in the example
87
+// above, you only need to understand the different metric types and their
88
+// vector versions for basic usage. Furthermore, if you are not concerned with
89
+// fine-grained control of when and how to register metrics with the registry,
90
+// have a look at the promauto package, which will effectively allow you to
91
+// ignore registration altogether in simple cases.
92
+//
93
+// Above, you have already touched the Counter and the Gauge. There are two more
94
+// advanced metric types: the Summary and Histogram. A more thorough description
95
+// of those four metric types can be found in the Prometheus docs:
96
+// https://prometheus.io/docs/concepts/metric_types/
97
+//
98
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
99
+// Histogram, a very important part of the Prometheus data model is the
100
+// partitioning of samples along dimensions called labels, which results in
101
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
102
+// and HistogramVec.
103
+//
104
+// While only the fundamental metric types implement the Metric interface, both
105
+// the metrics and their vector versions implement the Collector interface. A
106
+// Collector manages the collection of a number of Metrics, but for convenience,
107
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
108
+// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
109
+// and HistogramVec are not.
110
+//
111
+// To create instances of Metrics and their vector versions, you need a suitable
112
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
113
+//
114
+// # Custom Collectors and constant Metrics
115
+//
116
+// While you could create your own implementations of Metric, most likely you
117
+// will only ever implement the Collector interface on your own. At a first
118
+// glance, a custom Collector seems handy to bundle Metrics for common
119
+// registration (with the prime example of the different metric vectors above,
120
+// which bundle all the metrics of the same name but with different labels).
121
+//
122
+// There is a more involved use case, too: If you already have metrics
123
+// available, created outside of the Prometheus context, you don't need the
124
+// interface of the various Metric types. You essentially want to mirror the
125
+// existing numbers into Prometheus Metrics during collection. An own
126
+// implementation of the Collector interface is perfect for that. You can create
127
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
128
+// NewConstSummary (and their respective Must… versions). NewConstMetric is used
129
+// for all metric types with just a float64 as their value: Counter, Gauge, and
130
+// a special “type” called Untyped. Use the latter if you are not sure if the
131
+// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
132
+// happens in the Collect method. The Describe method has to return separate
133
+// Desc instances, representative of the “throw-away” metrics to be created
134
+// later.  NewDesc comes in handy to create those Desc instances. Alternatively,
135
+// you could return no Desc at all, which will mark the Collector “unchecked”.
136
+// No checks are performed at registration time, but metric consistency will
137
+// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
138
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
139
+// metrics that lead to inconsistencies in the total scrape result lies with the
140
+// implementer of the Collector. While this is not a desirable state, it is
141
+// sometimes necessary. The typical use case is a situation where the exact
142
+// metrics to be returned by a Collector cannot be predicted at registration
143
+// time, but the implementer has sufficient knowledge of the whole system to
144
+// guarantee metric consistency.
145
+//
146
+// The Collector example illustrates the use case. You can also look at the
147
+// source code of the processCollector (mirroring process metrics), the
148
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
149
+// metrics) as examples that are used in this package itself.
150
+//
151
+// If you just need to call a function to get a single float value to collect as
152
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
153
+// shortcuts.
154
+//
155
+// # Advanced Uses of the Registry
156
+//
157
+// While MustRegister is the by far most common way of registering a Collector,
158
+// sometimes you might want to handle the errors the registration might cause.
159
+// As suggested by the name, MustRegister panics if an error occurs. With the
160
+// Register function, the error is returned and can be handled.
161
+//
162
+// An error is returned if the registered Collector is incompatible or
163
+// inconsistent with already registered metrics. The registry aims for
164
+// consistency of the collected metrics according to the Prometheus data model.
165
+// Inconsistencies are ideally detected at registration time, not at collect
166
+// time. The former will usually be detected at start-up time of a program,
167
+// while the latter will only happen at scrape time, possibly not even on the
168
+// first scrape if the inconsistency only becomes relevant later. That is the
169
+// main reason why a Collector and a Metric have to describe themselves to the
170
+// registry.
171
+//
172
+// So far, everything we did operated on the so-called default registry, as it
173
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
174
+// can create a custom registry, or you can even implement the Registerer or
175
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
176
+// same way on a custom registry as the global functions Register and Unregister
177
+// on the default registry.
178
+//
179
+// There are a number of uses for custom registries: You can use registries with
180
+// special properties, see NewPedanticRegistry. You can avoid global state, as
181
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
182
+// the same time to expose different metrics in different ways.  You can use
183
+// separate registries for testing purposes.
184
+//
185
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
186
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
187
+// NewProcessCollector). With a custom registry, you are in control and decide
188
+// yourself about the Collectors to register.
189
+//
190
+// # HTTP Exposition
191
+//
192
+// The Registry implements the Gatherer interface. The caller of the Gather
193
+// method can then expose the gathered metrics in some way. Usually, the metrics
194
+// are served via HTTP on the /metrics endpoint. That's happening in the example
195
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
196
+//
197
+// # Pushing to the Pushgateway
198
+//
199
+// Function for pushing to the Pushgateway can be found in the push sub-package.
200
+//
201
+// # Graphite Bridge
202
+//
203
+// Functions and examples to push metrics from a Gatherer to Graphite can be
204
+// found in the graphite sub-package.
205
+//
206
+// # Other Means of Exposition
207
+//
208
+// More ways of exposing metrics can easily be added by following the approaches
209
+// of the existing implementations.
210
+package prometheus

+ 86
- 0
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go View File

@@ -0,0 +1,86 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"encoding/json"
18
+	"expvar"
19
+)
20
+
21
+type expvarCollector struct {
22
+	exports map[string]*Desc
23
+}
24
+
25
+// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
26
+// See there for documentation.
27
+//
28
+// Deprecated: Use collectors.NewExpvarCollector instead.
29
+func NewExpvarCollector(exports map[string]*Desc) Collector {
30
+	return &expvarCollector{
31
+		exports: exports,
32
+	}
33
+}
34
+
35
+// Describe implements Collector.
36
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
37
+	for _, desc := range e.exports {
38
+		ch <- desc
39
+	}
40
+}
41
+
42
+// Collect implements Collector.
43
+func (e *expvarCollector) Collect(ch chan<- Metric) {
44
+	for name, desc := range e.exports {
45
+		var m Metric
46
+		expVar := expvar.Get(name)
47
+		if expVar == nil {
48
+			continue
49
+		}
50
+		var v interface{}
51
+		labels := make([]string, len(desc.variableLabels.names))
52
+		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
53
+			ch <- NewInvalidMetric(desc, err)
54
+			continue
55
+		}
56
+		var processValue func(v interface{}, i int)
57
+		processValue = func(v interface{}, i int) {
58
+			if i >= len(labels) {
59
+				copiedLabels := append(make([]string, 0, len(labels)), labels...)
60
+				switch v := v.(type) {
61
+				case float64:
62
+					m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
63
+				case bool:
64
+					if v {
65
+						m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
66
+					} else {
67
+						m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
68
+					}
69
+				default:
70
+					return
71
+				}
72
+				ch <- m
73
+				return
74
+			}
75
+			vm, ok := v.(map[string]interface{})
76
+			if !ok {
77
+				return
78
+			}
79
+			for lv, val := range vm {
80
+				labels[i] = lv
81
+				processValue(val, i+1)
82
+			}
83
+		}
84
+		processValue(v, 0)
85
+	}
86
+}

+ 42
- 0
vendor/github.com/prometheus/client_golang/prometheus/fnv.go View File

@@ -0,0 +1,42 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+// Inline and byte-free variant of hash/fnv's fnv64a.
17
+
18
+const (
19
+	offset64 = 14695981039346656037
20
+	prime64  = 1099511628211
21
+)
22
+
23
+// hashNew initializies a new fnv64a hash value.
24
+func hashNew() uint64 {
25
+	return offset64
26
+}
27
+
28
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
29
+func hashAdd(h uint64, s string) uint64 {
30
+	for i := 0; i < len(s); i++ {
31
+		h ^= uint64(s[i])
32
+		h *= prime64
33
+	}
34
+	return h
35
+}
36
+
37
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
38
+func hashAddByte(h uint64, b byte) uint64 {
39
+	h ^= uint64(b)
40
+	h *= prime64
41
+	return h
42
+}

+ 311
- 0
vendor/github.com/prometheus/client_golang/prometheus/gauge.go View File

@@ -0,0 +1,311 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"math"
18
+	"sync/atomic"
19
+	"time"
20
+
21
+	dto "github.com/prometheus/client_model/go"
22
+)
23
+
24
+// Gauge is a Metric that represents a single numerical value that can
25
+// arbitrarily go up and down.
26
+//
27
+// A Gauge is typically used for measured values like temperatures or current
28
+// memory usage, but also "counts" that can go up and down, like the number of
29
+// running goroutines.
30
+//
31
+// To create Gauge instances, use NewGauge.
32
+type Gauge interface {
33
+	Metric
34
+	Collector
35
+
36
+	// Set sets the Gauge to an arbitrary value.
37
+	Set(float64)
38
+	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
39
+	// values.
40
+	Inc()
41
+	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
42
+	// values.
43
+	Dec()
44
+	// Add adds the given value to the Gauge. (The value can be negative,
45
+	// resulting in a decrease of the Gauge.)
46
+	Add(float64)
47
+	// Sub subtracts the given value from the Gauge. (The value can be
48
+	// negative, resulting in an increase of the Gauge.)
49
+	Sub(float64)
50
+
51
+	// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
52
+	SetToCurrentTime()
53
+}
54
+
55
+// GaugeOpts is an alias for Opts. See there for doc comments.
56
+type GaugeOpts Opts
57
+
58
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
59
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
60
+// is optional and can safely be left to its default value.
61
+type GaugeVecOpts struct {
62
+	GaugeOpts
63
+
64
+	// VariableLabels are used to partition the metric vector by the given set
65
+	// of labels. Each label value will be constrained with the optional Constraint
66
+	// function, if provided.
67
+	VariableLabels ConstrainableLabels
68
+}
69
+
70
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
71
+//
72
+// The returned implementation is optimized for a fast Set method. If you have a
73
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
74
+// the former. For example, the Inc method of the returned Gauge is slower than
75
+// the Inc method of a Counter returned by NewCounter. This matches the typical
76
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
77
+// the latter Inc-heavy.
78
+func NewGauge(opts GaugeOpts) Gauge {
79
+	desc := NewDesc(
80
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
81
+		opts.Help,
82
+		nil,
83
+		opts.ConstLabels,
84
+	)
85
+	result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
86
+	result.init(result) // Init self-collection.
87
+	return result
88
+}
89
+
90
+type gauge struct {
91
+	// valBits contains the bits of the represented float64 value. It has
92
+	// to go first in the struct to guarantee alignment for atomic
93
+	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG
94
+	valBits uint64
95
+
96
+	selfCollector
97
+
98
+	desc       *Desc
99
+	labelPairs []*dto.LabelPair
100
+}
101
+
102
+func (g *gauge) Desc() *Desc {
103
+	return g.desc
104
+}
105
+
106
+func (g *gauge) Set(val float64) {
107
+	atomic.StoreUint64(&g.valBits, math.Float64bits(val))
108
+}
109
+
110
+func (g *gauge) SetToCurrentTime() {
111
+	g.Set(float64(time.Now().UnixNano()) / 1e9)
112
+}
113
+
114
+func (g *gauge) Inc() {
115
+	g.Add(1)
116
+}
117
+
118
+func (g *gauge) Dec() {
119
+	g.Add(-1)
120
+}
121
+
122
+func (g *gauge) Add(val float64) {
123
+	for {
124
+		oldBits := atomic.LoadUint64(&g.valBits)
125
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
126
+		if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
127
+			return
128
+		}
129
+	}
130
+}
131
+
132
+func (g *gauge) Sub(val float64) {
133
+	g.Add(val * -1)
134
+}
135
+
136
+func (g *gauge) Write(out *dto.Metric) error {
137
+	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
138
+	return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
139
+}
140
+
141
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
142
+// Desc, but have different values for their variable labels. This is used if
143
+// you want to count the same thing partitioned by various dimensions
144
+// (e.g. number of operations queued, partitioned by user and operation
145
+// type). Create instances with NewGaugeVec.
146
+type GaugeVec struct {
147
+	*MetricVec
148
+}
149
+
150
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
151
+// partitioned by the given label names.
152
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
153
+	return V2.NewGaugeVec(GaugeVecOpts{
154
+		GaugeOpts:      opts,
155
+		VariableLabels: UnconstrainedLabels(labelNames),
156
+	})
157
+}
158
+
159
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
160
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
161
+	desc := V2.NewDesc(
162
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
163
+		opts.Help,
164
+		opts.VariableLabels,
165
+		opts.ConstLabels,
166
+	)
167
+	return &GaugeVec{
168
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
169
+			if len(lvs) != len(desc.variableLabels.names) {
170
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
171
+			}
172
+			result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
173
+			result.init(result) // Init self-collection.
174
+			return result
175
+		}),
176
+	}
177
+}
178
+
179
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
180
+// values (same order as the variable labels in Desc). If that combination of
181
+// label values is accessed for the first time, a new Gauge is created.
182
+//
183
+// It is possible to call this method without using the returned Gauge to only
184
+// create the new Gauge but leave it at its starting value 0. See also the
185
+// SummaryVec example.
186
+//
187
+// Keeping the Gauge for later use is possible (and should be considered if
188
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
189
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
190
+// Gauge will still exist, but it will not be exported anymore, even if a
191
+// Gauge with the same label values is created later. See also the CounterVec
192
+// example.
193
+//
194
+// An error is returned if the number of label values is not the same as the
195
+// number of variable labels in Desc (minus any curried labels).
196
+//
197
+// Note that for more than one label value, this method is prone to mistakes
198
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
199
+// an alternative to avoid that type of mistake. For higher label numbers, the
200
+// latter has a much more readable (albeit more verbose) syntax, but it comes
201
+// with a performance overhead (for creating and processing the Labels map).
202
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
203
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
204
+	if metric != nil {
205
+		return metric.(Gauge), err
206
+	}
207
+	return nil, err
208
+}
209
+
210
+// GetMetricWith returns the Gauge for the given Labels map (the label names
211
+// must match those of the variable labels in Desc). If that label map is
212
+// accessed for the first time, a new Gauge is created. Implications of
213
+// creating a Gauge without using it and keeping the Gauge for later use are
214
+// the same as for GetMetricWithLabelValues.
215
+//
216
+// An error is returned if the number and names of the Labels are inconsistent
217
+// with those of the variable labels in Desc (minus any curried labels).
218
+//
219
+// This method is used for the same purpose as
220
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
221
+// methods.
222
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
223
+	metric, err := v.MetricVec.GetMetricWith(labels)
224
+	if metric != nil {
225
+		return metric.(Gauge), err
226
+	}
227
+	return nil, err
228
+}
229
+
230
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
231
+// GetMetricWithLabelValues would have returned an error. Not returning an
232
+// error allows shortcuts like
233
+//
234
+//	myVec.WithLabelValues("404", "GET").Add(42)
235
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
236
+	g, err := v.GetMetricWithLabelValues(lvs...)
237
+	if err != nil {
238
+		panic(err)
239
+	}
240
+	return g
241
+}
242
+
243
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
244
+// returned an error. Not returning an error allows shortcuts like
245
+//
246
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
247
+func (v *GaugeVec) With(labels Labels) Gauge {
248
+	g, err := v.GetMetricWith(labels)
249
+	if err != nil {
250
+		panic(err)
251
+	}
252
+	return g
253
+}
254
+
255
+// CurryWith returns a vector curried with the provided labels, i.e. the
256
+// returned vector has those labels pre-set for all labeled operations performed
257
+// on it. The cardinality of the curried vector is reduced accordingly. The
258
+// order of the remaining labels stays the same (just with the curried labels
259
+// taken out of the sequence – which is relevant for the
260
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
261
+// vector, but only with labels not yet used for currying before.
262
+//
263
+// The metrics contained in the GaugeVec are shared between the curried and
264
+// uncurried vectors. They are just accessed differently. Curried and uncurried
265
+// vectors behave identically in terms of collection. Only one must be
266
+// registered with a given registry (usually the uncurried version). The Reset
267
+// method deletes all metrics, even if called on a curried vector.
268
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
269
+	vec, err := v.MetricVec.CurryWith(labels)
270
+	if vec != nil {
271
+		return &GaugeVec{vec}, err
272
+	}
273
+	return nil, err
274
+}
275
+
276
+// MustCurryWith works as CurryWith but panics where CurryWith would have
277
+// returned an error.
278
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
279
+	vec, err := v.CurryWith(labels)
280
+	if err != nil {
281
+		panic(err)
282
+	}
283
+	return vec
284
+}
285
+
286
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
287
+// provided function.
288
+//
289
+// To create GaugeFunc instances, use NewGaugeFunc.
290
+type GaugeFunc interface {
291
+	Metric
292
+	Collector
293
+}
294
+
295
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
296
+// value reported is determined by calling the given function from within the
297
+// Write method. Take into account that metric collection may happen
298
+// concurrently. Therefore, it must be safe to call the provided function
299
+// concurrently.
300
+//
301
+// NewGaugeFunc is a good way to create an “info” style metric with a constant
302
+// value of 1. Example:
303
+// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
304
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
305
+	return newValueFunc(NewDesc(
306
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
307
+		opts.Help,
308
+		nil,
309
+		opts.ConstLabels,
310
+	), GaugeValue, function)
311
+}

+ 26
- 0
vendor/github.com/prometheus/client_golang/prometheus/get_pid.go View File

@@ -0,0 +1,26 @@
1
+// Copyright 2015 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build !js || wasm
15
+// +build !js wasm
16
+
17
+package prometheus
18
+
19
+import "os"
20
+
21
+func getPIDFn() func() (int, error) {
22
+	pid := os.Getpid()
23
+	return func() (int, error) {
24
+		return pid, nil
25
+	}
26
+}

+ 23
- 0
vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go View File

@@ -0,0 +1,23 @@
1
+// Copyright 2015 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build js && !wasm
15
+// +build js,!wasm
16
+
17
+package prometheus
18
+
19
+func getPIDFn() func() (int, error) {
20
+	return func() (int, error) {
21
+		return 1, nil
22
+	}
23
+}

+ 281
- 0
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go View File

@@ -0,0 +1,281 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"runtime"
18
+	"runtime/debug"
19
+	"time"
20
+)
21
+
22
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
23
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
24
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
25
+// populated using runtime/metrics.
26
+func goRuntimeMemStats() memStatsMetrics {
27
+	return memStatsMetrics{
28
+		{
29
+			desc: NewDesc(
30
+				memstatNamespace("alloc_bytes"),
31
+				"Number of bytes allocated and still in use.",
32
+				nil, nil,
33
+			),
34
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
35
+			valType: GaugeValue,
36
+		}, {
37
+			desc: NewDesc(
38
+				memstatNamespace("alloc_bytes_total"),
39
+				"Total number of bytes allocated, even if freed.",
40
+				nil, nil,
41
+			),
42
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
43
+			valType: CounterValue,
44
+		}, {
45
+			desc: NewDesc(
46
+				memstatNamespace("sys_bytes"),
47
+				"Number of bytes obtained from system.",
48
+				nil, nil,
49
+			),
50
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
51
+			valType: GaugeValue,
52
+		}, {
53
+			desc: NewDesc(
54
+				memstatNamespace("lookups_total"),
55
+				"Total number of pointer lookups.",
56
+				nil, nil,
57
+			),
58
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
59
+			valType: CounterValue,
60
+		}, {
61
+			desc: NewDesc(
62
+				memstatNamespace("mallocs_total"),
63
+				"Total number of mallocs.",
64
+				nil, nil,
65
+			),
66
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
67
+			valType: CounterValue,
68
+		}, {
69
+			desc: NewDesc(
70
+				memstatNamespace("frees_total"),
71
+				"Total number of frees.",
72
+				nil, nil,
73
+			),
74
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
75
+			valType: CounterValue,
76
+		}, {
77
+			desc: NewDesc(
78
+				memstatNamespace("heap_alloc_bytes"),
79
+				"Number of heap bytes allocated and still in use.",
80
+				nil, nil,
81
+			),
82
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
83
+			valType: GaugeValue,
84
+		}, {
85
+			desc: NewDesc(
86
+				memstatNamespace("heap_sys_bytes"),
87
+				"Number of heap bytes obtained from system.",
88
+				nil, nil,
89
+			),
90
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
91
+			valType: GaugeValue,
92
+		}, {
93
+			desc: NewDesc(
94
+				memstatNamespace("heap_idle_bytes"),
95
+				"Number of heap bytes waiting to be used.",
96
+				nil, nil,
97
+			),
98
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
99
+			valType: GaugeValue,
100
+		}, {
101
+			desc: NewDesc(
102
+				memstatNamespace("heap_inuse_bytes"),
103
+				"Number of heap bytes that are in use.",
104
+				nil, nil,
105
+			),
106
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
107
+			valType: GaugeValue,
108
+		}, {
109
+			desc: NewDesc(
110
+				memstatNamespace("heap_released_bytes"),
111
+				"Number of heap bytes released to OS.",
112
+				nil, nil,
113
+			),
114
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
115
+			valType: GaugeValue,
116
+		}, {
117
+			desc: NewDesc(
118
+				memstatNamespace("heap_objects"),
119
+				"Number of allocated objects.",
120
+				nil, nil,
121
+			),
122
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
123
+			valType: GaugeValue,
124
+		}, {
125
+			desc: NewDesc(
126
+				memstatNamespace("stack_inuse_bytes"),
127
+				"Number of bytes in use by the stack allocator.",
128
+				nil, nil,
129
+			),
130
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
131
+			valType: GaugeValue,
132
+		}, {
133
+			desc: NewDesc(
134
+				memstatNamespace("stack_sys_bytes"),
135
+				"Number of bytes obtained from system for stack allocator.",
136
+				nil, nil,
137
+			),
138
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
139
+			valType: GaugeValue,
140
+		}, {
141
+			desc: NewDesc(
142
+				memstatNamespace("mspan_inuse_bytes"),
143
+				"Number of bytes in use by mspan structures.",
144
+				nil, nil,
145
+			),
146
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
147
+			valType: GaugeValue,
148
+		}, {
149
+			desc: NewDesc(
150
+				memstatNamespace("mspan_sys_bytes"),
151
+				"Number of bytes used for mspan structures obtained from system.",
152
+				nil, nil,
153
+			),
154
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
155
+			valType: GaugeValue,
156
+		}, {
157
+			desc: NewDesc(
158
+				memstatNamespace("mcache_inuse_bytes"),
159
+				"Number of bytes in use by mcache structures.",
160
+				nil, nil,
161
+			),
162
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
163
+			valType: GaugeValue,
164
+		}, {
165
+			desc: NewDesc(
166
+				memstatNamespace("mcache_sys_bytes"),
167
+				"Number of bytes used for mcache structures obtained from system.",
168
+				nil, nil,
169
+			),
170
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
171
+			valType: GaugeValue,
172
+		}, {
173
+			desc: NewDesc(
174
+				memstatNamespace("buck_hash_sys_bytes"),
175
+				"Number of bytes used by the profiling bucket hash table.",
176
+				nil, nil,
177
+			),
178
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
179
+			valType: GaugeValue,
180
+		}, {
181
+			desc: NewDesc(
182
+				memstatNamespace("gc_sys_bytes"),
183
+				"Number of bytes used for garbage collection system metadata.",
184
+				nil, nil,
185
+			),
186
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
187
+			valType: GaugeValue,
188
+		}, {
189
+			desc: NewDesc(
190
+				memstatNamespace("other_sys_bytes"),
191
+				"Number of bytes used for other system allocations.",
192
+				nil, nil,
193
+			),
194
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
195
+			valType: GaugeValue,
196
+		}, {
197
+			desc: NewDesc(
198
+				memstatNamespace("next_gc_bytes"),
199
+				"Number of heap bytes when next garbage collection will take place.",
200
+				nil, nil,
201
+			),
202
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
203
+			valType: GaugeValue,
204
+		},
205
+	}
206
+}
207
+
208
+type baseGoCollector struct {
209
+	goroutinesDesc *Desc
210
+	threadsDesc    *Desc
211
+	gcDesc         *Desc
212
+	gcLastTimeDesc *Desc
213
+	goInfoDesc     *Desc
214
+}
215
+
216
+func newBaseGoCollector() baseGoCollector {
217
+	return baseGoCollector{
218
+		goroutinesDesc: NewDesc(
219
+			"go_goroutines",
220
+			"Number of goroutines that currently exist.",
221
+			nil, nil),
222
+		threadsDesc: NewDesc(
223
+			"go_threads",
224
+			"Number of OS threads created.",
225
+			nil, nil),
226
+		gcDesc: NewDesc(
227
+			"go_gc_duration_seconds",
228
+			"A summary of the pause duration of garbage collection cycles.",
229
+			nil, nil),
230
+		gcLastTimeDesc: NewDesc(
231
+			"go_memstats_last_gc_time_seconds",
232
+			"Number of seconds since 1970 of last garbage collection.",
233
+			nil, nil),
234
+		goInfoDesc: NewDesc(
235
+			"go_info",
236
+			"Information about the Go environment.",
237
+			nil, Labels{"version": runtime.Version()}),
238
+	}
239
+}
240
+
241
+// Describe returns all descriptions of the collector.
242
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
243
+	ch <- c.goroutinesDesc
244
+	ch <- c.threadsDesc
245
+	ch <- c.gcDesc
246
+	ch <- c.gcLastTimeDesc
247
+	ch <- c.goInfoDesc
248
+}
249
+
250
+// Collect returns the current state of all metrics of the collector.
251
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
252
+	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
253
+
254
+	n := getRuntimeNumThreads()
255
+	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
256
+
257
+	var stats debug.GCStats
258
+	stats.PauseQuantiles = make([]time.Duration, 5)
259
+	debug.ReadGCStats(&stats)
260
+
261
+	quantiles := make(map[float64]float64)
262
+	for idx, pq := range stats.PauseQuantiles[1:] {
263
+		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
264
+	}
265
+	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
266
+	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
267
+	ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
268
+	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
269
+}
270
+
271
+func memstatNamespace(s string) string {
272
+	return "go_memstats_" + s
273
+}
274
+
275
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
276
+// value type for memstat metrics.
277
+type memStatsMetrics []struct {
278
+	desc    *Desc
279
+	eval    func(*runtime.MemStats) float64
280
+	valType ValueType
281
+}

+ 122
- 0
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go View File

@@ -0,0 +1,122 @@
1
+// Copyright 2021 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build !go1.17
15
+// +build !go1.17
16
+
17
+package prometheus
18
+
19
+import (
20
+	"runtime"
21
+	"sync"
22
+	"time"
23
+)
24
+
25
+type goCollector struct {
26
+	base baseGoCollector
27
+
28
+	// ms... are memstats related.
29
+	msLast          *runtime.MemStats // Previously collected memstats.
30
+	msLastTimestamp time.Time
31
+	msMtx           sync.Mutex // Protects msLast and msLastTimestamp.
32
+	msMetrics       memStatsMetrics
33
+	msRead          func(*runtime.MemStats) // For mocking in tests.
34
+	msMaxWait       time.Duration           // Wait time for fresh memstats.
35
+	msMaxAge        time.Duration           // Maximum allowed age of old memstats.
36
+}
37
+
38
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
39
+// See there for documentation.
40
+//
41
+// Deprecated: Use collectors.NewGoCollector instead.
42
+func NewGoCollector() Collector {
43
+	msMetrics := goRuntimeMemStats()
44
+	msMetrics = append(msMetrics, struct {
45
+		desc    *Desc
46
+		eval    func(*runtime.MemStats) float64
47
+		valType ValueType
48
+	}{
49
+		// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
50
+		desc: NewDesc(
51
+			memstatNamespace("gc_cpu_fraction"),
52
+			"The fraction of this program's available CPU time used by the GC since the program started.",
53
+			nil, nil,
54
+		),
55
+		eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
56
+		valType: GaugeValue,
57
+	})
58
+	return &goCollector{
59
+		base:      newBaseGoCollector(),
60
+		msLast:    &runtime.MemStats{},
61
+		msRead:    runtime.ReadMemStats,
62
+		msMaxWait: time.Second,
63
+		msMaxAge:  5 * time.Minute,
64
+		msMetrics: msMetrics,
65
+	}
66
+}
67
+
68
+// Describe returns all descriptions of the collector.
69
+func (c *goCollector) Describe(ch chan<- *Desc) {
70
+	c.base.Describe(ch)
71
+	for _, i := range c.msMetrics {
72
+		ch <- i.desc
73
+	}
74
+}
75
+
76
+// Collect returns the current state of all metrics of the collector.
77
+func (c *goCollector) Collect(ch chan<- Metric) {
78
+	var (
79
+		ms   = &runtime.MemStats{}
80
+		done = make(chan struct{})
81
+	)
82
+	// Start reading memstats first as it might take a while.
83
+	go func() {
84
+		c.msRead(ms)
85
+		c.msMtx.Lock()
86
+		c.msLast = ms
87
+		c.msLastTimestamp = time.Now()
88
+		c.msMtx.Unlock()
89
+		close(done)
90
+	}()
91
+
92
+	// Collect base non-memory metrics.
93
+	c.base.Collect(ch)
94
+
95
+	timer := time.NewTimer(c.msMaxWait)
96
+	select {
97
+	case <-done: // Our own ReadMemStats succeeded in time. Use it.
98
+		timer.Stop() // Important for high collection frequencies to not pile up timers.
99
+		c.msCollect(ch, ms)
100
+		return
101
+	case <-timer.C: // Time out, use last memstats if possible. Continue below.
102
+	}
103
+	c.msMtx.Lock()
104
+	if time.Since(c.msLastTimestamp) < c.msMaxAge {
105
+		// Last memstats are recent enough. Collect from them under the lock.
106
+		c.msCollect(ch, c.msLast)
107
+		c.msMtx.Unlock()
108
+		return
109
+	}
110
+	// If we are here, the last memstats are too old or don't exist. We have
111
+	// to wait until our own ReadMemStats finally completes. For that to
112
+	// happen, we have to release the lock.
113
+	c.msMtx.Unlock()
114
+	<-done
115
+	c.msCollect(ch, ms)
116
+}
117
+
118
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
119
+	for _, i := range c.msMetrics {
120
+		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
121
+	}
122
+}

+ 567
- 0
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go View File

@@ -0,0 +1,567 @@
1
+// Copyright 2021 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build go1.17
15
+// +build go1.17
16
+
17
+package prometheus
18
+
19
+import (
20
+	"math"
21
+	"runtime"
22
+	"runtime/metrics"
23
+	"strings"
24
+	"sync"
25
+
26
+	"github.com/prometheus/client_golang/prometheus/internal"
27
+
28
+	dto "github.com/prometheus/client_model/go"
29
+	"google.golang.org/protobuf/proto"
30
+)
31
+
32
+const (
33
+	// constants for strings referenced more than once.
34
+	goGCHeapTinyAllocsObjects               = "/gc/heap/tiny/allocs:objects"
35
+	goGCHeapAllocsObjects                   = "/gc/heap/allocs:objects"
36
+	goGCHeapFreesObjects                    = "/gc/heap/frees:objects"
37
+	goGCHeapFreesBytes                      = "/gc/heap/frees:bytes"
38
+	goGCHeapAllocsBytes                     = "/gc/heap/allocs:bytes"
39
+	goGCHeapObjects                         = "/gc/heap/objects:objects"
40
+	goGCHeapGoalBytes                       = "/gc/heap/goal:bytes"
41
+	goMemoryClassesTotalBytes               = "/memory/classes/total:bytes"
42
+	goMemoryClassesHeapObjectsBytes         = "/memory/classes/heap/objects:bytes"
43
+	goMemoryClassesHeapUnusedBytes          = "/memory/classes/heap/unused:bytes"
44
+	goMemoryClassesHeapReleasedBytes        = "/memory/classes/heap/released:bytes"
45
+	goMemoryClassesHeapFreeBytes            = "/memory/classes/heap/free:bytes"
46
+	goMemoryClassesHeapStacksBytes          = "/memory/classes/heap/stacks:bytes"
47
+	goMemoryClassesOSStacksBytes            = "/memory/classes/os-stacks:bytes"
48
+	goMemoryClassesMetadataMSpanInuseBytes  = "/memory/classes/metadata/mspan/inuse:bytes"
49
+	goMemoryClassesMetadataMSPanFreeBytes   = "/memory/classes/metadata/mspan/free:bytes"
50
+	goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
51
+	goMemoryClassesMetadataMCacheFreeBytes  = "/memory/classes/metadata/mcache/free:bytes"
52
+	goMemoryClassesProfilingBucketsBytes    = "/memory/classes/profiling/buckets:bytes"
53
+	goMemoryClassesMetadataOtherBytes       = "/memory/classes/metadata/other:bytes"
54
+	goMemoryClassesOtherBytes               = "/memory/classes/other:bytes"
55
+)
56
+
57
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
58
+var rmNamesForMemStatsMetrics = []string{
59
+	goGCHeapTinyAllocsObjects,
60
+	goGCHeapAllocsObjects,
61
+	goGCHeapFreesObjects,
62
+	goGCHeapAllocsBytes,
63
+	goGCHeapObjects,
64
+	goGCHeapGoalBytes,
65
+	goMemoryClassesTotalBytes,
66
+	goMemoryClassesHeapObjectsBytes,
67
+	goMemoryClassesHeapUnusedBytes,
68
+	goMemoryClassesHeapReleasedBytes,
69
+	goMemoryClassesHeapFreeBytes,
70
+	goMemoryClassesHeapStacksBytes,
71
+	goMemoryClassesOSStacksBytes,
72
+	goMemoryClassesMetadataMSpanInuseBytes,
73
+	goMemoryClassesMetadataMSPanFreeBytes,
74
+	goMemoryClassesMetadataMCacheInuseBytes,
75
+	goMemoryClassesMetadataMCacheFreeBytes,
76
+	goMemoryClassesProfilingBucketsBytes,
77
+	goMemoryClassesMetadataOtherBytes,
78
+	goMemoryClassesOtherBytes,
79
+}
80
+
81
+func bestEffortLookupRM(lookup []string) []metrics.Description {
82
+	ret := make([]metrics.Description, 0, len(lookup))
83
+	for _, rm := range metrics.All() {
84
+		for _, m := range lookup {
85
+			if m == rm.Name {
86
+				ret = append(ret, rm)
87
+			}
88
+		}
89
+	}
90
+	return ret
91
+}
92
+
93
+type goCollector struct {
94
+	base baseGoCollector
95
+
96
+	// mu protects updates to all fields ensuring a consistent
97
+	// snapshot is always produced by Collect.
98
+	mu sync.Mutex
99
+
100
+	// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
101
+	sampleBuf []metrics.Sample
102
+	// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
103
+	sampleMap map[string]*metrics.Sample
104
+
105
+	// rmExposedMetrics represents all runtime/metrics package metrics
106
+	// that were configured to be exposed.
107
+	rmExposedMetrics     []collectorMetric
108
+	rmExactSumMapForHist map[string]string
109
+
110
+	// With Go 1.17, the runtime/metrics package was introduced.
111
+	// From that point on, metric names produced by the runtime/metrics
112
+	// package could be generated from runtime/metrics names. However,
113
+	// these differ from the old names for the same values.
114
+	//
115
+	// This field exists to export the same values under the old names
116
+	// as well.
117
+	msMetrics        memStatsMetrics
118
+	msMetricsEnabled bool
119
+}
120
+
121
+type rmMetricDesc struct {
122
+	metrics.Description
123
+}
124
+
125
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
126
+	var descs []rmMetricDesc
127
+	for _, d := range metrics.All() {
128
+		var (
129
+			deny = true
130
+			desc rmMetricDesc
131
+		)
132
+
133
+		for _, r := range rules {
134
+			if !r.Matcher.MatchString(d.Name) {
135
+				continue
136
+			}
137
+			deny = r.Deny
138
+		}
139
+		if deny {
140
+			continue
141
+		}
142
+
143
+		desc.Description = d
144
+		descs = append(descs, desc)
145
+	}
146
+	return descs
147
+}
148
+
149
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
150
+	return internal.GoCollectorOptions{
151
+		RuntimeMetricSumForHist: map[string]string{
152
+			"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
153
+			"/gc/heap/frees-by-size:bytes":  goGCHeapFreesBytes,
154
+		},
155
+		RuntimeMetricRules: []internal.GoCollectorRule{
156
+			//{Matcher: regexp.MustCompile("")},
157
+		},
158
+	}
159
+}
160
+
161
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
162
+// See there for documentation.
163
+//
164
+// Deprecated: Use collectors.NewGoCollector instead.
165
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
166
+	opt := defaultGoCollectorOptions()
167
+	for _, o := range opts {
168
+		o(&opt)
169
+	}
170
+
171
+	exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
172
+
173
+	// Collect all histogram samples so that we can get their buckets.
174
+	// The API guarantees that the buckets are always fixed for the lifetime
175
+	// of the process.
176
+	var histograms []metrics.Sample
177
+	for _, d := range exposedDescriptions {
178
+		if d.Kind == metrics.KindFloat64Histogram {
179
+			histograms = append(histograms, metrics.Sample{Name: d.Name})
180
+		}
181
+	}
182
+
183
+	if len(histograms) > 0 {
184
+		metrics.Read(histograms)
185
+	}
186
+
187
+	bucketsMap := make(map[string][]float64)
188
+	for i := range histograms {
189
+		bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
190
+	}
191
+
192
+	// Generate a collector for each exposed runtime/metrics metric.
193
+	metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
194
+	// SampleBuf is used for reading from runtime/metrics.
195
+	// We are assuming the largest case to have stable pointers for sampleMap purposes.
196
+	sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
197
+	sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
198
+	for _, d := range exposedDescriptions {
199
+		namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
200
+		if !ok {
201
+			// Just ignore this metric; we can't do anything with it here.
202
+			// If a user decides to use the latest version of Go, we don't want
203
+			// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
204
+			continue
205
+		}
206
+
207
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
208
+		sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
209
+
210
+		var m collectorMetric
211
+		if d.Kind == metrics.KindFloat64Histogram {
212
+			_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
213
+			unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
214
+			m = newBatchHistogram(
215
+				NewDesc(
216
+					BuildFQName(namespace, subsystem, name),
217
+					d.Description.Description,
218
+					nil,
219
+					nil,
220
+				),
221
+				internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
222
+				hasSum,
223
+			)
224
+		} else if d.Cumulative {
225
+			m = NewCounter(CounterOpts{
226
+				Namespace: namespace,
227
+				Subsystem: subsystem,
228
+				Name:      name,
229
+				Help:      d.Description.Description,
230
+			},
231
+			)
232
+		} else {
233
+			m = NewGauge(GaugeOpts{
234
+				Namespace: namespace,
235
+				Subsystem: subsystem,
236
+				Name:      name,
237
+				Help:      d.Description.Description,
238
+			})
239
+		}
240
+		metricSet = append(metricSet, m)
241
+	}
242
+
243
+	// Add exact sum metrics to sampleBuf if not added before.
244
+	for _, h := range histograms {
245
+		sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
246
+		if !ok {
247
+			continue
248
+		}
249
+
250
+		if _, ok := sampleMap[sumMetric]; ok {
251
+			continue
252
+		}
253
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
254
+		sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
255
+	}
256
+
257
+	var (
258
+		msMetrics      memStatsMetrics
259
+		msDescriptions []metrics.Description
260
+	)
261
+
262
+	if !opt.DisableMemStatsLikeMetrics {
263
+		msMetrics = goRuntimeMemStats()
264
+		msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
265
+
266
+		// Check if metric was not exposed before and if not, add to sampleBuf.
267
+		for _, mdDesc := range msDescriptions {
268
+			if _, ok := sampleMap[mdDesc.Name]; ok {
269
+				continue
270
+			}
271
+			sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
272
+			sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
273
+		}
274
+	}
275
+
276
+	return &goCollector{
277
+		base:                 newBaseGoCollector(),
278
+		sampleBuf:            sampleBuf,
279
+		sampleMap:            sampleMap,
280
+		rmExposedMetrics:     metricSet,
281
+		rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
282
+		msMetrics:            msMetrics,
283
+		msMetricsEnabled:     !opt.DisableMemStatsLikeMetrics,
284
+	}
285
+}
286
+
287
+// Describe returns all descriptions of the collector.
288
+func (c *goCollector) Describe(ch chan<- *Desc) {
289
+	c.base.Describe(ch)
290
+	for _, i := range c.msMetrics {
291
+		ch <- i.desc
292
+	}
293
+	for _, m := range c.rmExposedMetrics {
294
+		ch <- m.Desc()
295
+	}
296
+}
297
+
298
+// Collect returns the current state of all metrics of the collector.
299
+func (c *goCollector) Collect(ch chan<- Metric) {
300
+	// Collect base non-memory metrics.
301
+	c.base.Collect(ch)
302
+
303
+	if len(c.sampleBuf) == 0 {
304
+		return
305
+	}
306
+
307
+	// Collect must be thread-safe, so prevent concurrent use of
308
+	// sampleBuf elements. Just read into sampleBuf but write all the data
309
+	// we get into our Metrics or MemStats.
310
+	//
311
+	// This lock also ensures that the Metrics we send out are all from
312
+	// the same updates, ensuring their mutual consistency insofar as
313
+	// is guaranteed by the runtime/metrics package.
314
+	//
315
+	// N.B. This locking is heavy-handed, but Collect is expected to be called
316
+	// relatively infrequently. Also the core operation here, metrics.Read,
317
+	// is fast (O(tens of microseconds)) so contention should certainly be
318
+	// low, though channel operations and any allocations may add to that.
319
+	c.mu.Lock()
320
+	defer c.mu.Unlock()
321
+
322
+	// Populate runtime/metrics sample buffer.
323
+	metrics.Read(c.sampleBuf)
324
+
325
+	// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
326
+	for i, metric := range c.rmExposedMetrics {
327
+		// We created samples for exposed metrics first in order, so indexes match.
328
+		sample := c.sampleBuf[i]
329
+
330
+		// N.B. switch on concrete type because it's significantly more efficient
331
+		// than checking for the Counter and Gauge interface implementations. In
332
+		// this case, we control all the types here.
333
+		switch m := metric.(type) {
334
+		case *counter:
335
+			// Guard against decreases. This should never happen, but a failure
336
+			// to do so will result in a panic, which is a harsh consequence for
337
+			// a metrics collection bug.
338
+			v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
339
+			if v1 > v0 {
340
+				m.Add(unwrapScalarRMValue(sample.Value) - m.get())
341
+			}
342
+			m.Collect(ch)
343
+		case *gauge:
344
+			m.Set(unwrapScalarRMValue(sample.Value))
345
+			m.Collect(ch)
346
+		case *batchHistogram:
347
+			m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
348
+			m.Collect(ch)
349
+		default:
350
+			panic("unexpected metric type")
351
+		}
352
+	}
353
+
354
+	if c.msMetricsEnabled {
355
+		// ms is a dummy MemStats that we populate ourselves so that we can
356
+		// populate the old metrics from it if goMemStatsCollection is enabled.
357
+		var ms runtime.MemStats
358
+		memStatsFromRM(&ms, c.sampleMap)
359
+		for _, i := range c.msMetrics {
360
+			ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
361
+		}
362
+	}
363
+}
364
+
365
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
366
+// to be scalar and returns the equivalent float64 value. Panics if the
367
+// value is not scalar.
368
+func unwrapScalarRMValue(v metrics.Value) float64 {
369
+	switch v.Kind() {
370
+	case metrics.KindUint64:
371
+		return float64(v.Uint64())
372
+	case metrics.KindFloat64:
373
+		return v.Float64()
374
+	case metrics.KindBad:
375
+		// Unsupported metric.
376
+		//
377
+		// This should never happen because we always populate our metric
378
+		// set from the runtime/metrics package.
379
+		panic("unexpected unsupported metric")
380
+	default:
381
+		// Unsupported metric kind.
382
+		//
383
+		// This should never happen because we check for this during initialization
384
+		// and flag and filter metrics whose kinds we don't understand.
385
+		panic("unexpected unsupported metric kind")
386
+	}
387
+}
388
+
389
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
390
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
391
+// its exact sum exists.
392
+//
393
+// The runtime/metrics API for histograms doesn't currently expose exact
394
+// sums, but some of the other metrics are in fact exact sums of histograms.
395
+func (c *goCollector) exactSumFor(rmName string) float64 {
396
+	sumName, ok := c.rmExactSumMapForHist[rmName]
397
+	if !ok {
398
+		return 0
399
+	}
400
+	s, ok := c.sampleMap[sumName]
401
+	if !ok {
402
+		return 0
403
+	}
404
+	return unwrapScalarRMValue(s.Value)
405
+}
406
+
407
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
408
+	lookupOrZero := func(name string) uint64 {
409
+		if s, ok := rm[name]; ok {
410
+			return s.Value.Uint64()
411
+		}
412
+		return 0
413
+	}
414
+
415
+	// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
416
+	// The reason for this is because MemStats couldn't be extended at the time
417
+	// but there was a desire to have Mallocs at least be a little more representative,
418
+	// while having Mallocs - Frees still represent a live object count.
419
+	// Unfortunately, MemStats doesn't actually export a large allocation count,
420
+	// so it's impossible to pull this number out directly.
421
+	tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
422
+	ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
423
+	ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
424
+
425
+	ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
426
+	ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
427
+	ms.Lookups = 0 // Already always zero.
428
+	ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
429
+	ms.Alloc = ms.HeapAlloc
430
+	ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
431
+	ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
432
+	ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
433
+	ms.HeapSys = ms.HeapInuse + ms.HeapIdle
434
+	ms.HeapObjects = lookupOrZero(goGCHeapObjects)
435
+	ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
436
+	ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
437
+	ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
438
+	ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
439
+	ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
440
+	ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
441
+	ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
442
+	ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
443
+	ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
444
+	ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
445
+
446
+	// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
447
+	// and often misleading due to the fact that it's an average over the lifetime
448
+	// of the process.
449
+	// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
450
+	// for more details.
451
+	ms.GCCPUFraction = 0
452
+}
453
+
454
+// batchHistogram is a mutable histogram that is updated
455
+// in batches.
456
+type batchHistogram struct {
457
+	selfCollector
458
+
459
+	// Static fields updated only once.
460
+	desc   *Desc
461
+	hasSum bool
462
+
463
+	// Because this histogram operates in batches, it just uses a
464
+	// single mutex for everything. updates are always serialized
465
+	// but Write calls may operate concurrently with updates.
466
+	// Contention between these two sources should be rare.
467
+	mu      sync.Mutex
468
+	buckets []float64 // Inclusive lower bounds, like runtime/metrics.
469
+	counts  []uint64
470
+	sum     float64 // Used if hasSum is true.
471
+}
472
+
473
+// newBatchHistogram creates a new batch histogram value with the given
474
+// Desc, buckets, and whether or not it has an exact sum available.
475
+//
476
+// buckets must always be from the runtime/metrics package, following
477
+// the same conventions.
478
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
479
+	// We need to remove -Inf values. runtime/metrics keeps them around.
480
+	// But -Inf bucket should not be allowed for prometheus histograms.
481
+	if buckets[0] == math.Inf(-1) {
482
+		buckets = buckets[1:]
483
+	}
484
+	h := &batchHistogram{
485
+		desc:    desc,
486
+		buckets: buckets,
487
+		// Because buckets follows runtime/metrics conventions, there's
488
+		// 1 more value in the buckets list than there are buckets represented,
489
+		// because in runtime/metrics, the bucket values represent *boundaries*,
490
+		// and non-Inf boundaries are inclusive lower bounds for that bucket.
491
+		counts: make([]uint64, len(buckets)-1),
492
+		hasSum: hasSum,
493
+	}
494
+	h.init(h)
495
+	return h
496
+}
497
+
498
+// update updates the batchHistogram from a runtime/metrics histogram.
499
+//
500
+// sum must be provided if the batchHistogram was created to have an exact sum.
501
+// h.buckets must be a strict subset of his.Buckets.
502
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
503
+	counts, buckets := his.Counts, his.Buckets
504
+
505
+	h.mu.Lock()
506
+	defer h.mu.Unlock()
507
+
508
+	// Clear buckets.
509
+	for i := range h.counts {
510
+		h.counts[i] = 0
511
+	}
512
+	// Copy and reduce buckets.
513
+	var j int
514
+	for i, count := range counts {
515
+		h.counts[j] += count
516
+		if buckets[i+1] == h.buckets[j+1] {
517
+			j++
518
+		}
519
+	}
520
+	if h.hasSum {
521
+		h.sum = sum
522
+	}
523
+}
524
+
525
+func (h *batchHistogram) Desc() *Desc {
526
+	return h.desc
527
+}
528
+
529
+func (h *batchHistogram) Write(out *dto.Metric) error {
530
+	h.mu.Lock()
531
+	defer h.mu.Unlock()
532
+
533
+	sum := float64(0)
534
+	if h.hasSum {
535
+		sum = h.sum
536
+	}
537
+	dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
538
+	totalCount := uint64(0)
539
+	for i, count := range h.counts {
540
+		totalCount += count
541
+		if !h.hasSum {
542
+			if count != 0 {
543
+				// N.B. This computed sum is an underestimate.
544
+				sum += h.buckets[i] * float64(count)
545
+			}
546
+		}
547
+
548
+		// Skip the +Inf bucket, but only for the bucket list.
549
+		// It must still count for sum and totalCount.
550
+		if math.IsInf(h.buckets[i+1], 1) {
551
+			break
552
+		}
553
+		// Float64Histogram's upper bound is exclusive, so make it inclusive
554
+		// by obtaining the next float64 value down, in order.
555
+		upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
556
+		dtoBuckets = append(dtoBuckets, &dto.Bucket{
557
+			CumulativeCount: proto.Uint64(totalCount),
558
+			UpperBound:      proto.Float64(upperBound),
559
+		})
560
+	}
561
+	out.Histogram = &dto.Histogram{
562
+		Bucket:      dtoBuckets,
563
+		SampleCount: proto.Uint64(totalCount),
564
+		SampleSum:   proto.Float64(sum),
565
+	}
566
+	return nil
567
+}

+ 1531
- 0
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
File diff suppressed because it is too large
View File


+ 60
- 0
vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go View File

@@ -0,0 +1,60 @@
1
+// Copyright (c) 2015 Björn Rabenstein
2
+//
3
+// Permission is hereby granted, free of charge, to any person obtaining a copy
4
+// of this software and associated documentation files (the "Software"), to deal
5
+// in the Software without restriction, including without limitation the rights
6
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+// copies of the Software, and to permit persons to whom the Software is
8
+// furnished to do so, subject to the following conditions:
9
+//
10
+// The above copyright notice and this permission notice shall be included in all
11
+// copies or substantial portions of the Software.
12
+//
13
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19
+// SOFTWARE.
20
+//
21
+// The code in this package is copy/paste to avoid a dependency. Hence this file
22
+// carries the copyright of the original repo.
23
+// https://github.com/beorn7/floats
24
+package internal
25
+
26
+import (
27
+	"math"
28
+)
29
+
30
+// minNormalFloat64 is the smallest positive normal value of type float64.
31
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
32
+
33
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
34
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
35
+// details of the applied method.
36
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
37
+	if a == b {
38
+		return true
39
+	}
40
+	absA := math.Abs(a)
41
+	absB := math.Abs(b)
42
+	diff := math.Abs(a - b)
43
+	if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
44
+		return diff < epsilon*minNormalFloat64
45
+	}
46
+	return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
47
+}
48
+
49
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
50
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
51
+	if len(a) != len(b) {
52
+		return false
53
+	}
54
+	for i := range a {
55
+		if !AlmostEqualFloat64(a[i], b[i], epsilon) {
56
+			return false
57
+		}
58
+	}
59
+	return true
60
+}

+ 654
- 0
vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go View File

@@ -0,0 +1,654 @@
1
+// Copyright 2022 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+//
14
+// It provides tools to compare sequences of strings and generate textual diffs.
15
+//
16
+// Maintaining `GetUnifiedDiffString` here because original repository
17
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
18
+package internal
19
+
20
+import (
21
+	"bufio"
22
+	"bytes"
23
+	"fmt"
24
+	"io"
25
+	"strings"
26
+)
27
+
28
+func min(a, b int) int {
29
+	if a < b {
30
+		return a
31
+	}
32
+	return b
33
+}
34
+
35
+func max(a, b int) int {
36
+	if a > b {
37
+		return a
38
+	}
39
+	return b
40
+}
41
+
42
+func calculateRatio(matches, length int) float64 {
43
+	if length > 0 {
44
+		return 2.0 * float64(matches) / float64(length)
45
+	}
46
+	return 1.0
47
+}
48
+
49
+type Match struct {
50
+	A    int
51
+	B    int
52
+	Size int
53
+}
54
+
55
+type OpCode struct {
56
+	Tag byte
57
+	I1  int
58
+	I2  int
59
+	J1  int
60
+	J2  int
61
+}
62
+
63
+// SequenceMatcher compares sequence of strings. The basic
64
+// algorithm predates, and is a little fancier than, an algorithm
65
+// published in the late 1980's by Ratcliff and Obershelp under the
66
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
67
+// the longest contiguous matching subsequence that contains no "junk"
68
+// elements (R-O doesn't address junk).  The same idea is then applied
69
+// recursively to the pieces of the sequences to the left and to the right
70
+// of the matching subsequence.  This does not yield minimal edit
71
+// sequences, but does tend to yield matches that "look right" to people.
72
+//
73
+// SequenceMatcher tries to compute a "human-friendly diff" between two
74
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
75
+// longest *contiguous* & junk-free matching subsequence.  That's what
76
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
77
+// notion, pairing up elements that appear uniquely in each sequence.
78
+// That, and the method here, appear to yield more intuitive difference
79
+// reports than does diff.  This method appears to be the least vulnerable
80
+// to synching up on blocks of "junk lines", though (like blank lines in
81
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
82
+// because this is the only method of the 3 that has a *concept* of
83
+// "junk" <wink>.
84
+//
85
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
86
+// case.  SequenceMatcher is quadratic time for the worst case and has
87
+// expected-case behavior dependent in a complicated way on how many
88
+// elements the sequences have in common; best case time is linear.
89
+type SequenceMatcher struct {
90
+	a              []string
91
+	b              []string
92
+	b2j            map[string][]int
93
+	IsJunk         func(string) bool
94
+	autoJunk       bool
95
+	bJunk          map[string]struct{}
96
+	matchingBlocks []Match
97
+	fullBCount     map[string]int
98
+	bPopular       map[string]struct{}
99
+	opCodes        []OpCode
100
+}
101
+
102
+func NewMatcher(a, b []string) *SequenceMatcher {
103
+	m := SequenceMatcher{autoJunk: true}
104
+	m.SetSeqs(a, b)
105
+	return &m
106
+}
107
+
108
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
109
+	isJunk func(string) bool,
110
+) *SequenceMatcher {
111
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
112
+	m.SetSeqs(a, b)
113
+	return &m
114
+}
115
+
116
+// Set two sequences to be compared.
117
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
118
+	m.SetSeq1(a)
119
+	m.SetSeq2(b)
120
+}
121
+
122
+// Set the first sequence to be compared. The second sequence to be compared is
123
+// not changed.
124
+//
125
+// SequenceMatcher computes and caches detailed information about the second
126
+// sequence, so if you want to compare one sequence S against many sequences,
127
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
128
+// sequences.
129
+//
130
+// See also SetSeqs() and SetSeq2().
131
+func (m *SequenceMatcher) SetSeq1(a []string) {
132
+	if &a == &m.a {
133
+		return
134
+	}
135
+	m.a = a
136
+	m.matchingBlocks = nil
137
+	m.opCodes = nil
138
+}
139
+
140
+// Set the second sequence to be compared. The first sequence to be compared is
141
+// not changed.
142
+func (m *SequenceMatcher) SetSeq2(b []string) {
143
+	if &b == &m.b {
144
+		return
145
+	}
146
+	m.b = b
147
+	m.matchingBlocks = nil
148
+	m.opCodes = nil
149
+	m.fullBCount = nil
150
+	m.chainB()
151
+}
152
+
153
+func (m *SequenceMatcher) chainB() {
154
+	// Populate line -> index mapping
155
+	b2j := map[string][]int{}
156
+	for i, s := range m.b {
157
+		indices := b2j[s]
158
+		indices = append(indices, i)
159
+		b2j[s] = indices
160
+	}
161
+
162
+	// Purge junk elements
163
+	m.bJunk = map[string]struct{}{}
164
+	if m.IsJunk != nil {
165
+		junk := m.bJunk
166
+		for s := range b2j {
167
+			if m.IsJunk(s) {
168
+				junk[s] = struct{}{}
169
+			}
170
+		}
171
+		for s := range junk {
172
+			delete(b2j, s)
173
+		}
174
+	}
175
+
176
+	// Purge remaining popular elements
177
+	popular := map[string]struct{}{}
178
+	n := len(m.b)
179
+	if m.autoJunk && n >= 200 {
180
+		ntest := n/100 + 1
181
+		for s, indices := range b2j {
182
+			if len(indices) > ntest {
183
+				popular[s] = struct{}{}
184
+			}
185
+		}
186
+		for s := range popular {
187
+			delete(b2j, s)
188
+		}
189
+	}
190
+	m.bPopular = popular
191
+	m.b2j = b2j
192
+}
193
+
194
+func (m *SequenceMatcher) isBJunk(s string) bool {
195
+	_, ok := m.bJunk[s]
196
+	return ok
197
+}
198
+
199
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
200
+//
201
+// If IsJunk is not defined:
202
+//
203
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
204
+//
205
+//	alo <= i <= i+k <= ahi
206
+//	blo <= j <= j+k <= bhi
207
+//
208
+// and for all (i',j',k') meeting those conditions,
209
+//
210
+//	k >= k'
211
+//	i <= i'
212
+//	and if i == i', j <= j'
213
+//
214
+// In other words, of all maximal matching blocks, return one that
215
+// starts earliest in a, and of all those maximal matching blocks that
216
+// start earliest in a, return the one that starts earliest in b.
217
+//
218
+// If IsJunk is defined, first the longest matching block is
219
+// determined as above, but with the additional restriction that no
220
+// junk element appears in the block.  Then that block is extended as
221
+// far as possible by matching (only) junk elements on both sides.  So
222
+// the resulting block never matches on junk except as identical junk
223
+// happens to be adjacent to an "interesting" match.
224
+//
225
+// If no blocks match, return (alo, blo, 0).
226
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
227
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
228
+	// E.g.,
229
+	//    ab
230
+	//    acab
231
+	// Longest matching block is "ab", but if common prefix is
232
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
233
+	// strip, so ends up claiming that ab is changed to acab by
234
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
235
+	// "it's obvious" that someone inserted "ac" at the front.
236
+	// Windiff ends up at the same place as diff, but by pairing up
237
+	// the unique 'b's and then matching the first two 'a's.
238
+	besti, bestj, bestsize := alo, blo, 0
239
+
240
+	// find longest junk-free match
241
+	// during an iteration of the loop, j2len[j] = length of longest
242
+	// junk-free match ending with a[i-1] and b[j]
243
+	j2len := map[int]int{}
244
+	for i := alo; i != ahi; i++ {
245
+		// look at all instances of a[i] in b; note that because
246
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
247
+		newj2len := map[int]int{}
248
+		for _, j := range m.b2j[m.a[i]] {
249
+			// a[i] matches b[j]
250
+			if j < blo {
251
+				continue
252
+			}
253
+			if j >= bhi {
254
+				break
255
+			}
256
+			k := j2len[j-1] + 1
257
+			newj2len[j] = k
258
+			if k > bestsize {
259
+				besti, bestj, bestsize = i-k+1, j-k+1, k
260
+			}
261
+		}
262
+		j2len = newj2len
263
+	}
264
+
265
+	// Extend the best by non-junk elements on each end.  In particular,
266
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
267
+	// the inner loop above, but also means "the best" match so far
268
+	// doesn't contain any junk *or* popular non-junk elements.
269
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
270
+		m.a[besti-1] == m.b[bestj-1] {
271
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
272
+	}
273
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
274
+		!m.isBJunk(m.b[bestj+bestsize]) &&
275
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
276
+		bestsize++
277
+	}
278
+
279
+	// Now that we have a wholly interesting match (albeit possibly
280
+	// empty!), we may as well suck up the matching junk on each
281
+	// side of it too.  Can't think of a good reason not to, and it
282
+	// saves post-processing the (possibly considerable) expense of
283
+	// figuring out what to do with it.  In the case of an empty
284
+	// interesting match, this is clearly the right thing to do,
285
+	// because no other kind of match is possible in the regions.
286
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
287
+		m.a[besti-1] == m.b[bestj-1] {
288
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
289
+	}
290
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
291
+		m.isBJunk(m.b[bestj+bestsize]) &&
292
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
293
+		bestsize++
294
+	}
295
+
296
+	return Match{A: besti, B: bestj, Size: bestsize}
297
+}
298
+
299
+// Return list of triples describing matching subsequences.
300
+//
301
+// Each triple is of the form (i, j, n), and means that
302
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
303
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
304
+// adjacent triples in the list, and the second is not the last triple in the
305
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
306
+// adjacent equal blocks.
307
+//
308
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
309
+// triple with n==0.
310
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
311
+	if m.matchingBlocks != nil {
312
+		return m.matchingBlocks
313
+	}
314
+
315
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
316
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
317
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
318
+		i, j, k := match.A, match.B, match.Size
319
+		if match.Size > 0 {
320
+			if alo < i && blo < j {
321
+				matched = matchBlocks(alo, i, blo, j, matched)
322
+			}
323
+			matched = append(matched, match)
324
+			if i+k < ahi && j+k < bhi {
325
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
326
+			}
327
+		}
328
+		return matched
329
+	}
330
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
331
+
332
+	// It's possible that we have adjacent equal blocks in the
333
+	// matching_blocks list now.
334
+	nonAdjacent := []Match{}
335
+	i1, j1, k1 := 0, 0, 0
336
+	for _, b := range matched {
337
+		// Is this block adjacent to i1, j1, k1?
338
+		i2, j2, k2 := b.A, b.B, b.Size
339
+		if i1+k1 == i2 && j1+k1 == j2 {
340
+			// Yes, so collapse them -- this just increases the length of
341
+			// the first block by the length of the second, and the first
342
+			// block so lengthened remains the block to compare against.
343
+			k1 += k2
344
+		} else {
345
+			// Not adjacent.  Remember the first block (k1==0 means it's
346
+			// the dummy we started with), and make the second block the
347
+			// new block to compare against.
348
+			if k1 > 0 {
349
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
350
+			}
351
+			i1, j1, k1 = i2, j2, k2
352
+		}
353
+	}
354
+	if k1 > 0 {
355
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
356
+	}
357
+
358
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
359
+	m.matchingBlocks = nonAdjacent
360
+	return m.matchingBlocks
361
+}
362
+
363
+// Return list of 5-tuples describing how to turn a into b.
364
+//
365
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
366
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
367
+// tuple preceding it, and likewise for j1 == the previous j2.
368
+//
369
+// The tags are characters, with these meanings:
370
+//
371
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
372
+//
373
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
374
+//
375
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
376
+//
377
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
378
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
379
+	if m.opCodes != nil {
380
+		return m.opCodes
381
+	}
382
+	i, j := 0, 0
383
+	matching := m.GetMatchingBlocks()
384
+	opCodes := make([]OpCode, 0, len(matching))
385
+	for _, m := range matching {
386
+		//  invariant:  we've pumped out correct diffs to change
387
+		//  a[:i] into b[:j], and the next matching block is
388
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
389
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
390
+		//  the matching block, and move (i,j) beyond the match
391
+		ai, bj, size := m.A, m.B, m.Size
392
+		tag := byte(0)
393
+		if i < ai && j < bj {
394
+			tag = 'r'
395
+		} else if i < ai {
396
+			tag = 'd'
397
+		} else if j < bj {
398
+			tag = 'i'
399
+		}
400
+		if tag > 0 {
401
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
402
+		}
403
+		i, j = ai+size, bj+size
404
+		// the list of matching blocks is terminated by a
405
+		// sentinel with size 0
406
+		if size > 0 {
407
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
408
+		}
409
+	}
410
+	m.opCodes = opCodes
411
+	return m.opCodes
412
+}
413
+
414
+// Isolate change clusters by eliminating ranges with no changes.
415
+//
416
+// Return a generator of groups with up to n lines of context.
417
+// Each group is in the same format as returned by GetOpCodes().
418
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
419
+	if n < 0 {
420
+		n = 3
421
+	}
422
+	codes := m.GetOpCodes()
423
+	if len(codes) == 0 {
424
+		codes = []OpCode{{'e', 0, 1, 0, 1}}
425
+	}
426
+	// Fixup leading and trailing groups if they show no changes.
427
+	if codes[0].Tag == 'e' {
428
+		c := codes[0]
429
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
430
+		codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
431
+	}
432
+	if codes[len(codes)-1].Tag == 'e' {
433
+		c := codes[len(codes)-1]
434
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
435
+		codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
436
+	}
437
+	nn := n + n
438
+	groups := [][]OpCode{}
439
+	group := []OpCode{}
440
+	for _, c := range codes {
441
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
442
+		// End the current group and start a new one whenever
443
+		// there is a large range with no changes.
444
+		if c.Tag == 'e' && i2-i1 > nn {
445
+			group = append(group, OpCode{
446
+				c.Tag, i1, min(i2, i1+n),
447
+				j1, min(j2, j1+n),
448
+			})
449
+			groups = append(groups, group)
450
+			group = []OpCode{}
451
+			i1, j1 = max(i1, i2-n), max(j1, j2-n)
452
+		}
453
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
454
+	}
455
+	if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
456
+		groups = append(groups, group)
457
+	}
458
+	return groups
459
+}
460
+
461
+// Return a measure of the sequences' similarity (float in [0,1]).
462
+//
463
+// Where T is the total number of elements in both sequences, and
464
+// M is the number of matches, this is 2.0*M / T.
465
+// Note that this is 1 if the sequences are identical, and 0 if
466
+// they have nothing in common.
467
+//
468
+// .Ratio() is expensive to compute if you haven't already computed
469
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
470
+// want to try .QuickRatio() or .RealQuickRation() first to get an
471
+// upper bound.
472
+func (m *SequenceMatcher) Ratio() float64 {
473
+	matches := 0
474
+	for _, m := range m.GetMatchingBlocks() {
475
+		matches += m.Size
476
+	}
477
+	return calculateRatio(matches, len(m.a)+len(m.b))
478
+}
479
+
480
+// Return an upper bound on ratio() relatively quickly.
481
+//
482
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
483
+// is faster to compute.
484
+func (m *SequenceMatcher) QuickRatio() float64 {
485
+	// viewing a and b as multisets, set matches to the cardinality
486
+	// of their intersection; this counts the number of matches
487
+	// without regard to order, so is clearly an upper bound
488
+	if m.fullBCount == nil {
489
+		m.fullBCount = map[string]int{}
490
+		for _, s := range m.b {
491
+			m.fullBCount[s]++
492
+		}
493
+	}
494
+
495
+	// avail[x] is the number of times x appears in 'b' less the
496
+	// number of times we've seen it in 'a' so far ... kinda
497
+	avail := map[string]int{}
498
+	matches := 0
499
+	for _, s := range m.a {
500
+		n, ok := avail[s]
501
+		if !ok {
502
+			n = m.fullBCount[s]
503
+		}
504
+		avail[s] = n - 1
505
+		if n > 0 {
506
+			matches++
507
+		}
508
+	}
509
+	return calculateRatio(matches, len(m.a)+len(m.b))
510
+}
511
+
512
+// Return an upper bound on ratio() very quickly.
513
+//
514
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
515
+// is faster to compute than either .Ratio() or .QuickRatio().
516
+func (m *SequenceMatcher) RealQuickRatio() float64 {
517
+	la, lb := len(m.a), len(m.b)
518
+	return calculateRatio(min(la, lb), la+lb)
519
+}
520
+
521
+// Convert range to the "ed" format
522
+func formatRangeUnified(start, stop int) string {
523
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
524
+	beginning := start + 1 // lines start numbering with one
525
+	length := stop - start
526
+	if length == 1 {
527
+		return fmt.Sprintf("%d", beginning)
528
+	}
529
+	if length == 0 {
530
+		beginning-- // empty ranges begin at line just before the range
531
+	}
532
+	return fmt.Sprintf("%d,%d", beginning, length)
533
+}
534
+
535
+// Unified diff parameters
536
+type UnifiedDiff struct {
537
+	A        []string // First sequence lines
538
+	FromFile string   // First file name
539
+	FromDate string   // First file time
540
+	B        []string // Second sequence lines
541
+	ToFile   string   // Second file name
542
+	ToDate   string   // Second file time
543
+	Eol      string   // Headers end of line, defaults to LF
544
+	Context  int      // Number of context lines
545
+}
546
+
547
+// Compare two sequences of lines; generate the delta as a unified diff.
548
+//
549
+// Unified diffs are a compact way of showing line changes and a few
550
+// lines of context.  The number of context lines is set by 'n' which
551
+// defaults to three.
552
+//
553
+// By default, the diff control lines (those with ---, +++, or @@) are
554
+// created with a trailing newline.  This is helpful so that inputs
555
+// created from file.readlines() result in diffs that are suitable for
556
+// file.writelines() since both the inputs and outputs have trailing
557
+// newlines.
558
+//
559
+// For inputs that do not have trailing newlines, set the lineterm
560
+// argument to "" so that the output will be uniformly newline free.
561
+//
562
+// The unidiff format normally has a header for filenames and modification
563
+// times.  Any or all of these may be specified using strings for
564
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
565
+// The modification times are normally expressed in the ISO 8601 format.
566
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
567
+	buf := bufio.NewWriter(writer)
568
+	defer buf.Flush()
569
+	wf := func(format string, args ...interface{}) error {
570
+		_, err := buf.WriteString(fmt.Sprintf(format, args...))
571
+		return err
572
+	}
573
+	ws := func(s string) error {
574
+		_, err := buf.WriteString(s)
575
+		return err
576
+	}
577
+
578
+	if len(diff.Eol) == 0 {
579
+		diff.Eol = "\n"
580
+	}
581
+
582
+	started := false
583
+	m := NewMatcher(diff.A, diff.B)
584
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
585
+		if !started {
586
+			started = true
587
+			fromDate := ""
588
+			if len(diff.FromDate) > 0 {
589
+				fromDate = "\t" + diff.FromDate
590
+			}
591
+			toDate := ""
592
+			if len(diff.ToDate) > 0 {
593
+				toDate = "\t" + diff.ToDate
594
+			}
595
+			if diff.FromFile != "" || diff.ToFile != "" {
596
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
597
+				if err != nil {
598
+					return err
599
+				}
600
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
601
+				if err != nil {
602
+					return err
603
+				}
604
+			}
605
+		}
606
+		first, last := g[0], g[len(g)-1]
607
+		range1 := formatRangeUnified(first.I1, last.I2)
608
+		range2 := formatRangeUnified(first.J1, last.J2)
609
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
610
+			return err
611
+		}
612
+		for _, c := range g {
613
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
614
+			if c.Tag == 'e' {
615
+				for _, line := range diff.A[i1:i2] {
616
+					if err := ws(" " + line); err != nil {
617
+						return err
618
+					}
619
+				}
620
+				continue
621
+			}
622
+			if c.Tag == 'r' || c.Tag == 'd' {
623
+				for _, line := range diff.A[i1:i2] {
624
+					if err := ws("-" + line); err != nil {
625
+						return err
626
+					}
627
+				}
628
+			}
629
+			if c.Tag == 'r' || c.Tag == 'i' {
630
+				for _, line := range diff.B[j1:j2] {
631
+					if err := ws("+" + line); err != nil {
632
+						return err
633
+					}
634
+				}
635
+			}
636
+		}
637
+	}
638
+	return nil
639
+}
640
+
641
+// Like WriteUnifiedDiff but returns the diff a string.
642
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
643
+	w := &bytes.Buffer{}
644
+	err := WriteUnifiedDiff(w, diff)
645
+	return w.String(), err
646
+}
647
+
648
+// Split a string on "\n" while preserving them. The output can be used
649
+// as input for UnifiedDiff and ContextDiff structures.
650
+func SplitLines(s string) []string {
651
+	lines := strings.SplitAfter(s, "\n")
652
+	lines[len(lines)-1] += "\n"
653
+	return lines
654
+}

+ 32
- 0
vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go View File

@@ -0,0 +1,32 @@
1
+// Copyright 2021 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package internal
15
+
16
+import "regexp"
17
+
18
+type GoCollectorRule struct {
19
+	Matcher *regexp.Regexp
20
+	Deny    bool
21
+}
22
+
23
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
24
+// Use it via collectors package instead. See issue
25
+// https://github.com/prometheus/client_golang/issues/1030.
26
+//
27
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
28
+type GoCollectorOptions struct {
29
+	DisableMemStatsLikeMetrics bool
30
+	RuntimeMetricSumForHist    map[string]string
31
+	RuntimeMetricRules         []GoCollectorRule
32
+}

+ 142
- 0
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go View File

@@ -0,0 +1,142 @@
1
+// Copyright 2021 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build go1.17
15
+// +build go1.17
16
+
17
+package internal
18
+
19
+import (
20
+	"math"
21
+	"path"
22
+	"runtime/metrics"
23
+	"strings"
24
+
25
+	"github.com/prometheus/common/model"
26
+)
27
+
28
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
29
+// metric description and validates whether the metric is suitable for integration
30
+// with Prometheus.
31
+//
32
+// Returns false if a name could not be produced, or if Prometheus does not understand
33
+// the runtime/metrics Kind.
34
+//
35
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
36
+// package exports a name with characters outside the valid Prometheus metric name
37
+// character set. This is theoretically possible, but should never happen in practice.
38
+// Still, don't rely on it.
39
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
40
+	namespace := "go"
41
+
42
+	comp := strings.SplitN(d.Name, ":", 2)
43
+	key := comp[0]
44
+	unit := comp[1]
45
+
46
+	// The last path element in the key is the name,
47
+	// the rest is the subsystem.
48
+	subsystem := path.Dir(key[1:] /* remove leading / */)
49
+	name := path.Base(key)
50
+
51
+	// subsystem is translated by replacing all / and - with _.
52
+	subsystem = strings.ReplaceAll(subsystem, "/", "_")
53
+	subsystem = strings.ReplaceAll(subsystem, "-", "_")
54
+
55
+	// unit is translated assuming that the unit contains no
56
+	// non-ASCII characters.
57
+	unit = strings.ReplaceAll(unit, "-", "_")
58
+	unit = strings.ReplaceAll(unit, "*", "_")
59
+	unit = strings.ReplaceAll(unit, "/", "_per_")
60
+
61
+	// name has - replaced with _ and is concatenated with the unit and
62
+	// other data.
63
+	name = strings.ReplaceAll(name, "-", "_")
64
+	name += "_" + unit
65
+	if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
66
+		name += "_total"
67
+	}
68
+
69
+	valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
70
+	switch d.Kind {
71
+	case metrics.KindUint64:
72
+	case metrics.KindFloat64:
73
+	case metrics.KindFloat64Histogram:
74
+	default:
75
+		valid = false
76
+	}
77
+	return namespace, subsystem, name, valid
78
+}
79
+
80
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
81
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
82
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
83
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
84
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
85
+	switch unit {
86
+	case "bytes":
87
+		// Re-bucket as powers of 2.
88
+		return reBucketExp(buckets, 2)
89
+	case "seconds":
90
+		// Re-bucket as powers of 10 and then merge all buckets greater
91
+		// than 1 second into the +Inf bucket.
92
+		b := reBucketExp(buckets, 10)
93
+		for i := range b {
94
+			if b[i] <= 1 {
95
+				continue
96
+			}
97
+			b[i] = math.Inf(1)
98
+			b = b[:i+1]
99
+			break
100
+		}
101
+		return b
102
+	}
103
+	return buckets
104
+}
105
+
106
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
107
+// downsamples the buckets to those a multiple of base apart. The end result
108
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
109
+// scheme.
110
+func reBucketExp(buckets []float64, base float64) []float64 {
111
+	bucket := buckets[0]
112
+	var newBuckets []float64
113
+	// We may see a -Inf here, in which case, add it and skip it
114
+	// since we risk producing NaNs otherwise.
115
+	//
116
+	// We need to preserve -Inf values to maintain runtime/metrics
117
+	// conventions. We'll strip it out later.
118
+	if bucket == math.Inf(-1) {
119
+		newBuckets = append(newBuckets, bucket)
120
+		buckets = buckets[1:]
121
+		bucket = buckets[0]
122
+	}
123
+	// From now on, bucket should always have a non-Inf value because
124
+	// Infs are only ever at the ends of the bucket lists, so
125
+	// arithmetic operations on it are non-NaN.
126
+	for i := 1; i < len(buckets); i++ {
127
+		if bucket >= 0 && buckets[i] < bucket*base {
128
+			// The next bucket we want to include is at least bucket*base.
129
+			continue
130
+		} else if bucket < 0 && buckets[i] < bucket/base {
131
+			// In this case the bucket we're targeting is negative, and since
132
+			// we're ascending through buckets here, we need to divide to get
133
+			// closer to zero exponentially.
134
+			continue
135
+		}
136
+		// The +Inf bucket will always be the last one, and we'll always
137
+		// end up including it here because bucket
138
+		newBuckets = append(newBuckets, bucket)
139
+		bucket = buckets[i]
140
+	}
141
+	return append(newBuckets, bucket)
142
+}

+ 101
- 0
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go View File

@@ -0,0 +1,101 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package internal
15
+
16
+import (
17
+	"sort"
18
+
19
+	dto "github.com/prometheus/client_model/go"
20
+)
21
+
22
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
23
+// dto.LabelPair pointers.
24
+type LabelPairSorter []*dto.LabelPair
25
+
26
+func (s LabelPairSorter) Len() int {
27
+	return len(s)
28
+}
29
+
30
+func (s LabelPairSorter) Swap(i, j int) {
31
+	s[i], s[j] = s[j], s[i]
32
+}
33
+
34
+func (s LabelPairSorter) Less(i, j int) bool {
35
+	return s[i].GetName() < s[j].GetName()
36
+}
37
+
38
+// MetricSorter is a sortable slice of *dto.Metric.
39
+type MetricSorter []*dto.Metric
40
+
41
+func (s MetricSorter) Len() int {
42
+	return len(s)
43
+}
44
+
45
+func (s MetricSorter) Swap(i, j int) {
46
+	s[i], s[j] = s[j], s[i]
47
+}
48
+
49
+func (s MetricSorter) Less(i, j int) bool {
50
+	if len(s[i].Label) != len(s[j].Label) {
51
+		// This should not happen. The metrics are
52
+		// inconsistent. However, we have to deal with the fact, as
53
+		// people might use custom collectors or metric family injection
54
+		// to create inconsistent metrics. So let's simply compare the
55
+		// number of labels in this case. That will still yield
56
+		// reproducible sorting.
57
+		return len(s[i].Label) < len(s[j].Label)
58
+	}
59
+	for n, lp := range s[i].Label {
60
+		vi := lp.GetValue()
61
+		vj := s[j].Label[n].GetValue()
62
+		if vi != vj {
63
+			return vi < vj
64
+		}
65
+	}
66
+
67
+	// We should never arrive here. Multiple metrics with the same
68
+	// label set in the same scrape will lead to undefined ingestion
69
+	// behavior. However, as above, we have to provide stable sorting
70
+	// here, even for inconsistent metrics. So sort equal metrics
71
+	// by their timestamp, with missing timestamps (implying "now")
72
+	// coming last.
73
+	if s[i].TimestampMs == nil {
74
+		return false
75
+	}
76
+	if s[j].TimestampMs == nil {
77
+		return true
78
+	}
79
+	return s[i].GetTimestampMs() < s[j].GetTimestampMs()
80
+}
81
+
82
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
83
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
84
+// the slice, with the contained Metrics sorted within each MetricFamily.
85
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
86
+	for _, mf := range metricFamiliesByName {
87
+		sort.Sort(MetricSorter(mf.Metric))
88
+	}
89
+	names := make([]string, 0, len(metricFamiliesByName))
90
+	for name, mf := range metricFamiliesByName {
91
+		if len(mf.Metric) > 0 {
92
+			names = append(names, name)
93
+		}
94
+	}
95
+	sort.Strings(names)
96
+	result := make([]*dto.MetricFamily, 0, len(names))
97
+	for _, name := range names {
98
+		result = append(result, metricFamiliesByName[name])
99
+	}
100
+	return result
101
+}

+ 186
- 0
vendor/github.com/prometheus/client_golang/prometheus/labels.go View File

@@ -0,0 +1,186 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"errors"
18
+	"fmt"
19
+	"strings"
20
+	"unicode/utf8"
21
+
22
+	"github.com/prometheus/common/model"
23
+)
24
+
25
+// Labels represents a collection of label name -> value mappings. This type is
26
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
27
+// metric vector Collectors, e.g.:
28
+//
29
+//	myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
30
+//
31
+// The other use-case is the specification of constant label pairs in Opts or to
32
+// create a Desc.
33
+type Labels map[string]string
34
+
35
+// LabelConstraint normalizes label values.
36
+type LabelConstraint func(string) string
37
+
38
+// ConstrainedLabels represents a label name and its constrain function
39
+// to normalize label values. This type is commonly used when constructing
40
+// metric vector Collectors.
41
+type ConstrainedLabel struct {
42
+	Name       string
43
+	Constraint LabelConstraint
44
+}
45
+
46
+// ConstrainableLabels is an interface that allows creating of labels that can
47
+// be optionally constrained.
48
+//
49
+//	prometheus.V2().NewCounterVec(CounterVecOpts{
50
+//	  CounterOpts: {...}, // Usual CounterOpts fields
51
+//	  VariableLabels: []ConstrainedLabels{
52
+//	    {Name: "A"},
53
+//	    {Name: "B", Constraint: func(v string) string { ... }},
54
+//	  },
55
+//	})
56
+type ConstrainableLabels interface {
57
+	compile() *compiledLabels
58
+	labelNames() []string
59
+}
60
+
61
+// ConstrainedLabels represents a collection of label name -> constrain function
62
+// to normalize label values. This type is commonly used when constructing
63
+// metric vector Collectors.
64
+type ConstrainedLabels []ConstrainedLabel
65
+
66
+func (cls ConstrainedLabels) compile() *compiledLabels {
67
+	compiled := &compiledLabels{
68
+		names:            make([]string, len(cls)),
69
+		labelConstraints: map[string]LabelConstraint{},
70
+	}
71
+
72
+	for i, label := range cls {
73
+		compiled.names[i] = label.Name
74
+		if label.Constraint != nil {
75
+			compiled.labelConstraints[label.Name] = label.Constraint
76
+		}
77
+	}
78
+
79
+	return compiled
80
+}
81
+
82
+func (cls ConstrainedLabels) labelNames() []string {
83
+	names := make([]string, len(cls))
84
+	for i, label := range cls {
85
+		names[i] = label.Name
86
+	}
87
+	return names
88
+}
89
+
90
+// UnconstrainedLabels represents collection of label without any constraint on
91
+// their value. Thus, it is simply a collection of label names.
92
+//
93
+//	UnconstrainedLabels([]string{ "A", "B" })
94
+//
95
+// is equivalent to
96
+//
97
+//	ConstrainedLabels {
98
+//	  { Name: "A" },
99
+//	  { Name: "B" },
100
+//	}
101
+type UnconstrainedLabels []string
102
+
103
+func (uls UnconstrainedLabels) compile() *compiledLabels {
104
+	return &compiledLabels{
105
+		names: uls,
106
+	}
107
+}
108
+
109
+func (uls UnconstrainedLabels) labelNames() []string {
110
+	return uls
111
+}
112
+
113
+type compiledLabels struct {
114
+	names            []string
115
+	labelConstraints map[string]LabelConstraint
116
+}
117
+
118
+func (cls *compiledLabels) compile() *compiledLabels {
119
+	return cls
120
+}
121
+
122
+func (cls *compiledLabels) labelNames() []string {
123
+	return cls.names
124
+}
125
+
126
+func (cls *compiledLabels) constrain(labelName, value string) string {
127
+	if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
128
+		return fn(value)
129
+	}
130
+	return value
131
+}
132
+
133
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
134
+// label names.
135
+const reservedLabelPrefix = "__"
136
+
137
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
138
+
139
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
140
+	return fmt.Errorf(
141
+		"%w: %q has %d variable labels named %q but %d values %q were provided",
142
+		errInconsistentCardinality, fqName,
143
+		len(labels), labels,
144
+		len(labelValues), labelValues,
145
+	)
146
+}
147
+
148
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
149
+	if len(labels) != expectedNumberOfValues {
150
+		return fmt.Errorf(
151
+			"%w: expected %d label values but got %d in %#v",
152
+			errInconsistentCardinality, expectedNumberOfValues,
153
+			len(labels), labels,
154
+		)
155
+	}
156
+
157
+	for name, val := range labels {
158
+		if !utf8.ValidString(val) {
159
+			return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
160
+		}
161
+	}
162
+
163
+	return nil
164
+}
165
+
166
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
167
+	if len(vals) != expectedNumberOfValues {
168
+		return fmt.Errorf(
169
+			"%w: expected %d label values but got %d in %#v",
170
+			errInconsistentCardinality, expectedNumberOfValues,
171
+			len(vals), vals,
172
+		)
173
+	}
174
+
175
+	for _, val := range vals {
176
+		if !utf8.ValidString(val) {
177
+			return fmt.Errorf("label value %q is not valid UTF-8", val)
178
+		}
179
+	}
180
+
181
+	return nil
182
+}
183
+
184
+func checkLabelName(l string) bool {
185
+	return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
186
+}

+ 257
- 0
vendor/github.com/prometheus/client_golang/prometheus/metric.go View File

@@ -0,0 +1,257 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"errors"
18
+	"math"
19
+	"sort"
20
+	"strings"
21
+	"time"
22
+
23
+	dto "github.com/prometheus/client_model/go"
24
+	"github.com/prometheus/common/model"
25
+	"google.golang.org/protobuf/proto"
26
+)
27
+
28
+var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
29
+
30
+// A Metric models a single sample value with its meta data being exported to
31
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
32
+// Histogram, Summary, and Untyped.
33
+type Metric interface {
34
+	// Desc returns the descriptor for the Metric. This method idempotently
35
+	// returns the same descriptor throughout the lifetime of the
36
+	// Metric. The returned descriptor is immutable by contract. A Metric
37
+	// unable to describe itself must return an invalid descriptor (created
38
+	// with NewInvalidDesc).
39
+	Desc() *Desc
40
+	// Write encodes the Metric into a "Metric" Protocol Buffer data
41
+	// transmission object.
42
+	//
43
+	// Metric implementations must observe concurrency safety as reads of
44
+	// this metric may occur at any time, and any blocking occurs at the
45
+	// expense of total performance of rendering all registered
46
+	// metrics. Ideally, Metric implementations should support concurrent
47
+	// readers.
48
+	//
49
+	// While populating dto.Metric, it is the responsibility of the
50
+	// implementation to ensure validity of the Metric protobuf (like valid
51
+	// UTF-8 strings or syntactically valid metric and label names). It is
52
+	// recommended to sort labels lexicographically. Callers of Write should
53
+	// still make sure of sorting if they depend on it.
54
+	Write(*dto.Metric) error
55
+	// TODO(beorn7): The original rationale of passing in a pre-allocated
56
+	// dto.Metric protobuf to save allocations has disappeared. The
57
+	// signature of this method should be changed to "Write() (*dto.Metric,
58
+	// error)".
59
+}
60
+
61
+// Opts bundles the options for creating most Metric types. Each metric
62
+// implementation XXX has its own XXXOpts type, but in most cases, it is just
63
+// an alias of this type (which might change when the requirement arises.)
64
+//
65
+// It is mandatory to set Name to a non-empty string. All other fields are
66
+// optional and can safely be left at their zero value, although it is strongly
67
+// encouraged to set a Help string.
68
+type Opts struct {
69
+	// Namespace, Subsystem, and Name are components of the fully-qualified
70
+	// name of the Metric (created by joining these components with
71
+	// "_"). Only Name is mandatory, the others merely help structuring the
72
+	// name. Note that the fully-qualified name of the metric must be a
73
+	// valid Prometheus metric name.
74
+	Namespace string
75
+	Subsystem string
76
+	Name      string
77
+
78
+	// Help provides information about this metric.
79
+	//
80
+	// Metrics with the same fully-qualified name must have the same Help
81
+	// string.
82
+	Help string
83
+
84
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
85
+	// with the same fully-qualified name must have the same label names in
86
+	// their ConstLabels.
87
+	//
88
+	// ConstLabels are only used rarely. In particular, do not use them to
89
+	// attach the same labels to all your metrics. Those use cases are
90
+	// better covered by target labels set by the scraping Prometheus
91
+	// server, or by one specific metric (e.g. a build_info or a
92
+	// machine_role metric). See also
93
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
94
+	ConstLabels Labels
95
+
96
+	// now is for testing purposes, by default it's time.Now.
97
+	now func() time.Time
98
+}
99
+
100
+// BuildFQName joins the given three name components by "_". Empty name
101
+// components are ignored. If the name parameter itself is empty, an empty
102
+// string is returned, no matter what. Metric implementations included in this
103
+// library use this function internally to generate the fully-qualified metric
104
+// name from the name component in their Opts. Users of the library will only
105
+// need this function if they implement their own Metric or instantiate a Desc
106
+// (with NewDesc) directly.
107
+func BuildFQName(namespace, subsystem, name string) string {
108
+	if name == "" {
109
+		return ""
110
+	}
111
+	switch {
112
+	case namespace != "" && subsystem != "":
113
+		return strings.Join([]string{namespace, subsystem, name}, "_")
114
+	case namespace != "":
115
+		return strings.Join([]string{namespace, name}, "_")
116
+	case subsystem != "":
117
+		return strings.Join([]string{subsystem, name}, "_")
118
+	}
119
+	return name
120
+}
121
+
122
+type invalidMetric struct {
123
+	desc *Desc
124
+	err  error
125
+}
126
+
127
+// NewInvalidMetric returns a metric whose Write method always returns the
128
+// provided error. It is useful if a Collector finds itself unable to collect
129
+// a metric and wishes to report an error to the registry.
130
+func NewInvalidMetric(desc *Desc, err error) Metric {
131
+	return &invalidMetric{desc, err}
132
+}
133
+
134
+func (m *invalidMetric) Desc() *Desc { return m.desc }
135
+
136
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
137
+
138
+type timestampedMetric struct {
139
+	Metric
140
+	t time.Time
141
+}
142
+
143
+func (m timestampedMetric) Write(pb *dto.Metric) error {
144
+	e := m.Metric.Write(pb)
145
+	pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
146
+	return e
147
+}
148
+
149
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
150
+// way that it has an explicit timestamp set to the provided Time. This is only
151
+// useful in rare cases as the timestamp of a Prometheus metric should usually
152
+// be set by the Prometheus server during scraping. Exceptions include mirroring
153
+// metrics with given timestamps from other metric
154
+// sources.
155
+//
156
+// NewMetricWithTimestamp works best with MustNewConstMetric,
157
+// MustNewConstHistogram, and MustNewConstSummary, see example.
158
+//
159
+// Currently, the exposition formats used by Prometheus are limited to
160
+// millisecond resolution. Thus, the provided time will be rounded down to the
161
+// next full millisecond value.
162
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
163
+	return timestampedMetric{Metric: m, t: t}
164
+}
165
+
166
+type withExemplarsMetric struct {
167
+	Metric
168
+
169
+	exemplars []*dto.Exemplar
170
+}
171
+
172
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
173
+	if err := m.Metric.Write(pb); err != nil {
174
+		return err
175
+	}
176
+
177
+	switch {
178
+	case pb.Counter != nil:
179
+		pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
180
+	case pb.Histogram != nil:
181
+		for _, e := range m.exemplars {
182
+			// pb.Histogram.Bucket are sorted by UpperBound.
183
+			i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
184
+				return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
185
+			})
186
+			if i < len(pb.Histogram.Bucket) {
187
+				pb.Histogram.Bucket[i].Exemplar = e
188
+			} else {
189
+				// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
190
+				b := &dto.Bucket{
191
+					CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
192
+					UpperBound:      proto.Float64(math.Inf(1)),
193
+					Exemplar:        e,
194
+				}
195
+				pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
196
+			}
197
+		}
198
+	default:
199
+		// TODO(bwplotka): Implement Gauge?
200
+		return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
201
+	}
202
+
203
+	return nil
204
+}
205
+
206
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
207
+type Exemplar struct {
208
+	Value  float64
209
+	Labels Labels
210
+	// Optional.
211
+	// Default value (time.Time{}) indicates its empty, which should be
212
+	// understood as time.Now() time at the moment of creation of metric.
213
+	Timestamp time.Time
214
+}
215
+
216
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
217
+// exemplars. Exemplars are validated.
218
+//
219
+// Only last applicable exemplar is injected from the list.
220
+// For example for Counter it means last exemplar is injected.
221
+// For Histogram, it means last applicable exemplar for each bucket is injected.
222
+//
223
+// NewMetricWithExemplars works best with MustNewConstMetric and
224
+// MustNewConstHistogram, see example.
225
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
226
+	if len(exemplars) == 0 {
227
+		return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
228
+	}
229
+
230
+	var (
231
+		now = time.Now()
232
+		exs = make([]*dto.Exemplar, len(exemplars))
233
+		err error
234
+	)
235
+	for i, e := range exemplars {
236
+		ts := e.Timestamp
237
+		if ts == (time.Time{}) {
238
+			ts = now
239
+		}
240
+		exs[i], err = newExemplar(e.Value, ts, e.Labels)
241
+		if err != nil {
242
+			return nil, err
243
+		}
244
+	}
245
+
246
+	return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
247
+}
248
+
249
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
250
+// NewMetricWithExemplars would have returned an error.
251
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
252
+	ret, err := NewMetricWithExemplars(m, exemplars...)
253
+	if err != nil {
254
+		panic(err)
255
+	}
256
+	return ret
257
+}

+ 25
- 0
vendor/github.com/prometheus/client_golang/prometheus/num_threads.go View File

@@ -0,0 +1,25 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build !js || wasm
15
+// +build !js wasm
16
+
17
+package prometheus
18
+
19
+import "runtime"
20
+
21
+// getRuntimeNumThreads returns the number of open OS threads.
22
+func getRuntimeNumThreads() float64 {
23
+	n, _ := runtime.ThreadCreateProfile(nil)
24
+	return float64(n)
25
+}

+ 22
- 0
vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go View File

@@ -0,0 +1,22 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build js && !wasm
15
+// +build js,!wasm
16
+
17
+package prometheus
18
+
19
+// getRuntimeNumThreads returns the number of open OS threads.
20
+func getRuntimeNumThreads() float64 {
21
+	return 1
22
+}

+ 64
- 0
vendor/github.com/prometheus/client_golang/prometheus/observer.go View File

@@ -0,0 +1,64 @@
1
+// Copyright 2017 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+// Observer is the interface that wraps the Observe method, which is used by
17
+// Histogram and Summary to add observations.
18
+type Observer interface {
19
+	Observe(float64)
20
+}
21
+
22
+// The ObserverFunc type is an adapter to allow the use of ordinary
23
+// functions as Observers. If f is a function with the appropriate
24
+// signature, ObserverFunc(f) is an Observer that calls f.
25
+//
26
+// This adapter is usually used in connection with the Timer type, and there are
27
+// two general use cases:
28
+//
29
+// The most common one is to use a Gauge as the Observer for a Timer.
30
+// See the "Gauge" Timer example.
31
+//
32
+// The more advanced use case is to create a function that dynamically decides
33
+// which Observer to use for observing the duration. See the "Complex" Timer
34
+// example.
35
+type ObserverFunc func(float64)
36
+
37
+// Observe calls f(value). It implements Observer.
38
+func (f ObserverFunc) Observe(value float64) {
39
+	f(value)
40
+}
41
+
42
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
43
+type ObserverVec interface {
44
+	GetMetricWith(Labels) (Observer, error)
45
+	GetMetricWithLabelValues(lvs ...string) (Observer, error)
46
+	With(Labels) Observer
47
+	WithLabelValues(...string) Observer
48
+	CurryWith(Labels) (ObserverVec, error)
49
+	MustCurryWith(Labels) ObserverVec
50
+
51
+	Collector
52
+}
53
+
54
+// ExemplarObserver is implemented by Observers that offer the option of
55
+// observing a value together with an exemplar. Its ObserveWithExemplar method
56
+// works like the Observe method of an Observer but also replaces the currently
57
+// saved exemplar (if any) with a new one, created from the provided value, the
58
+// current time as timestamp, and the provided Labels. Empty Labels will lead to
59
+// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
60
+// left in place. ObserveWithExemplar panics if any of the provided labels are
61
+// invalid or if the provided labels contain more than 128 runes in total.
62
+type ExemplarObserver interface {
63
+	ObserveWithExemplar(value float64, exemplar Labels)
64
+}

+ 164
- 0
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go View File

@@ -0,0 +1,164 @@
1
+// Copyright 2015 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"errors"
18
+	"fmt"
19
+	"os"
20
+	"strconv"
21
+	"strings"
22
+)
23
+
24
+type processCollector struct {
25
+	collectFn       func(chan<- Metric)
26
+	pidFn           func() (int, error)
27
+	reportErrors    bool
28
+	cpuTotal        *Desc
29
+	openFDs, maxFDs *Desc
30
+	vsize, maxVsize *Desc
31
+	rss             *Desc
32
+	startTime       *Desc
33
+}
34
+
35
+// ProcessCollectorOpts defines the behavior of a process metrics collector
36
+// created with NewProcessCollector.
37
+type ProcessCollectorOpts struct {
38
+	// PidFn returns the PID of the process the collector collects metrics
39
+	// for. It is called upon each collection. By default, the PID of the
40
+	// current process is used, as determined on construction time by
41
+	// calling os.Getpid().
42
+	PidFn func() (int, error)
43
+	// If non-empty, each of the collected metrics is prefixed by the
44
+	// provided string and an underscore ("_").
45
+	Namespace string
46
+	// If true, any error encountered during collection is reported as an
47
+	// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
48
+	// and the collected metrics will be incomplete. (Possibly, no metrics
49
+	// will be collected at all.) While that's usually not desired, it is
50
+	// appropriate for the common "mix-in" of process metrics, where process
51
+	// metrics are nice to have, but failing to collect them should not
52
+	// disrupt the collection of the remaining metrics.
53
+	ReportErrors bool
54
+}
55
+
56
+// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
57
+// See there for documentation.
58
+//
59
+// Deprecated: Use collectors.NewProcessCollector instead.
60
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
61
+	ns := ""
62
+	if len(opts.Namespace) > 0 {
63
+		ns = opts.Namespace + "_"
64
+	}
65
+
66
+	c := &processCollector{
67
+		reportErrors: opts.ReportErrors,
68
+		cpuTotal: NewDesc(
69
+			ns+"process_cpu_seconds_total",
70
+			"Total user and system CPU time spent in seconds.",
71
+			nil, nil,
72
+		),
73
+		openFDs: NewDesc(
74
+			ns+"process_open_fds",
75
+			"Number of open file descriptors.",
76
+			nil, nil,
77
+		),
78
+		maxFDs: NewDesc(
79
+			ns+"process_max_fds",
80
+			"Maximum number of open file descriptors.",
81
+			nil, nil,
82
+		),
83
+		vsize: NewDesc(
84
+			ns+"process_virtual_memory_bytes",
85
+			"Virtual memory size in bytes.",
86
+			nil, nil,
87
+		),
88
+		maxVsize: NewDesc(
89
+			ns+"process_virtual_memory_max_bytes",
90
+			"Maximum amount of virtual memory available in bytes.",
91
+			nil, nil,
92
+		),
93
+		rss: NewDesc(
94
+			ns+"process_resident_memory_bytes",
95
+			"Resident memory size in bytes.",
96
+			nil, nil,
97
+		),
98
+		startTime: NewDesc(
99
+			ns+"process_start_time_seconds",
100
+			"Start time of the process since unix epoch in seconds.",
101
+			nil, nil,
102
+		),
103
+	}
104
+
105
+	if opts.PidFn == nil {
106
+		c.pidFn = getPIDFn()
107
+	} else {
108
+		c.pidFn = opts.PidFn
109
+	}
110
+
111
+	// Set up process metric collection if supported by the runtime.
112
+	if canCollectProcess() {
113
+		c.collectFn = c.processCollect
114
+	} else {
115
+		c.collectFn = func(ch chan<- Metric) {
116
+			c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
117
+		}
118
+	}
119
+
120
+	return c
121
+}
122
+
123
+// Describe returns all descriptions of the collector.
124
+func (c *processCollector) Describe(ch chan<- *Desc) {
125
+	ch <- c.cpuTotal
126
+	ch <- c.openFDs
127
+	ch <- c.maxFDs
128
+	ch <- c.vsize
129
+	ch <- c.maxVsize
130
+	ch <- c.rss
131
+	ch <- c.startTime
132
+}
133
+
134
+// Collect returns the current state of all metrics of the collector.
135
+func (c *processCollector) Collect(ch chan<- Metric) {
136
+	c.collectFn(ch)
137
+}
138
+
139
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
140
+	if !c.reportErrors {
141
+		return
142
+	}
143
+	if desc == nil {
144
+		desc = NewInvalidDesc(err)
145
+	}
146
+	ch <- NewInvalidMetric(desc, err)
147
+}
148
+
149
+// NewPidFileFn returns a function that retrieves a pid from the specified file.
150
+// It is meant to be used for the PidFn field in ProcessCollectorOpts.
151
+func NewPidFileFn(pidFilePath string) func() (int, error) {
152
+	return func() (int, error) {
153
+		content, err := os.ReadFile(pidFilePath)
154
+		if err != nil {
155
+			return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
156
+		}
157
+		pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
158
+		if err != nil {
159
+			return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
160
+		}
161
+
162
+		return pid, nil
163
+	}
164
+}

+ 26
- 0
vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go View File

@@ -0,0 +1,26 @@
1
+// Copyright 2019 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build js
15
+// +build js
16
+
17
+package prometheus
18
+
19
+func canCollectProcess() bool {
20
+	return false
21
+}
22
+
23
+func (c *processCollector) processCollect(ch chan<- Metric) {
24
+	// noop on this platform
25
+	return
26
+}

+ 66
- 0
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go View File

@@ -0,0 +1,66 @@
1
+// Copyright 2019 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+//go:build !windows && !js
15
+// +build !windows,!js
16
+
17
+package prometheus
18
+
19
+import (
20
+	"github.com/prometheus/procfs"
21
+)
22
+
23
+func canCollectProcess() bool {
24
+	_, err := procfs.NewDefaultFS()
25
+	return err == nil
26
+}
27
+
28
+func (c *processCollector) processCollect(ch chan<- Metric) {
29
+	pid, err := c.pidFn()
30
+	if err != nil {
31
+		c.reportError(ch, nil, err)
32
+		return
33
+	}
34
+
35
+	p, err := procfs.NewProc(pid)
36
+	if err != nil {
37
+		c.reportError(ch, nil, err)
38
+		return
39
+	}
40
+
41
+	if stat, err := p.Stat(); err == nil {
42
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
43
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
44
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
45
+		if startTime, err := stat.StartTime(); err == nil {
46
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
47
+		} else {
48
+			c.reportError(ch, c.startTime, err)
49
+		}
50
+	} else {
51
+		c.reportError(ch, nil, err)
52
+	}
53
+
54
+	if fds, err := p.FileDescriptorsLen(); err == nil {
55
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
56
+	} else {
57
+		c.reportError(ch, c.openFDs, err)
58
+	}
59
+
60
+	if limits, err := p.Limits(); err == nil {
61
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
62
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
63
+	} else {
64
+		c.reportError(ch, nil, err)
65
+	}
66
+}

+ 116
- 0
vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go View File

@@ -0,0 +1,116 @@
1
+// Copyright 2019 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"syscall"
18
+	"unsafe"
19
+
20
+	"golang.org/x/sys/windows"
21
+)
22
+
23
+func canCollectProcess() bool {
24
+	return true
25
+}
26
+
27
+var (
28
+	modpsapi    = syscall.NewLazyDLL("psapi.dll")
29
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
30
+
31
+	procGetProcessMemoryInfo  = modpsapi.NewProc("GetProcessMemoryInfo")
32
+	procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
33
+)
34
+
35
+type processMemoryCounters struct {
36
+	// System interface description
37
+	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
38
+
39
+	// Refer to the Golang internal implementation
40
+	// https://golang.org/src/internal/syscall/windows/psapi_windows.go
41
+	_                          uint32
42
+	PageFaultCount             uint32
43
+	PeakWorkingSetSize         uintptr
44
+	WorkingSetSize             uintptr
45
+	QuotaPeakPagedPoolUsage    uintptr
46
+	QuotaPagedPoolUsage        uintptr
47
+	QuotaPeakNonPagedPoolUsage uintptr
48
+	QuotaNonPagedPoolUsage     uintptr
49
+	PagefileUsage              uintptr
50
+	PeakPagefileUsage          uintptr
51
+	PrivateUsage               uintptr
52
+}
53
+
54
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
55
+	mem := processMemoryCounters{}
56
+	r1, _, err := procGetProcessMemoryInfo.Call(
57
+		uintptr(handle),
58
+		uintptr(unsafe.Pointer(&mem)),
59
+		uintptr(unsafe.Sizeof(mem)),
60
+	)
61
+	if r1 != 1 {
62
+		return mem, err
63
+	} else {
64
+		return mem, nil
65
+	}
66
+}
67
+
68
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
69
+	var count uint32
70
+	r1, _, err := procGetProcessHandleCount.Call(
71
+		uintptr(handle),
72
+		uintptr(unsafe.Pointer(&count)),
73
+	)
74
+	if r1 != 1 {
75
+		return 0, err
76
+	} else {
77
+		return count, nil
78
+	}
79
+}
80
+
81
+func (c *processCollector) processCollect(ch chan<- Metric) {
82
+	h, err := windows.GetCurrentProcess()
83
+	if err != nil {
84
+		c.reportError(ch, nil, err)
85
+		return
86
+	}
87
+
88
+	var startTime, exitTime, kernelTime, userTime windows.Filetime
89
+	err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
90
+	if err != nil {
91
+		c.reportError(ch, nil, err)
92
+		return
93
+	}
94
+	ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
95
+	ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
96
+
97
+	mem, err := getProcessMemoryInfo(h)
98
+	if err != nil {
99
+		c.reportError(ch, nil, err)
100
+		return
101
+	}
102
+	ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
103
+	ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
104
+
105
+	handles, err := getProcessHandleCount(h)
106
+	if err != nil {
107
+		c.reportError(ch, nil, err)
108
+		return
109
+	}
110
+	ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
111
+	ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
112
+}
113
+
114
+func fileTimeToSeconds(ft windows.Filetime) float64 {
115
+	return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
116
+}

+ 374
- 0
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go View File

@@ -0,0 +1,374 @@
1
+// Copyright 2017 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package promhttp
15
+
16
+import (
17
+	"bufio"
18
+	"io"
19
+	"net"
20
+	"net/http"
21
+)
22
+
23
+const (
24
+	closeNotifier = 1 << iota
25
+	flusher
26
+	hijacker
27
+	readerFrom
28
+	pusher
29
+)
30
+
31
+type delegator interface {
32
+	http.ResponseWriter
33
+
34
+	Status() int
35
+	Written() int64
36
+}
37
+
38
+type responseWriterDelegator struct {
39
+	http.ResponseWriter
40
+
41
+	status             int
42
+	written            int64
43
+	wroteHeader        bool
44
+	observeWriteHeader func(int)
45
+}
46
+
47
+func (r *responseWriterDelegator) Status() int {
48
+	return r.status
49
+}
50
+
51
+func (r *responseWriterDelegator) Written() int64 {
52
+	return r.written
53
+}
54
+
55
+func (r *responseWriterDelegator) WriteHeader(code int) {
56
+	if r.observeWriteHeader != nil && !r.wroteHeader {
57
+		// Only call observeWriteHeader for the 1st time. It's a bug if
58
+		// WriteHeader is called more than once, but we want to protect
59
+		// against it here. Note that we still delegate the WriteHeader
60
+		// to the original ResponseWriter to not mask the bug from it.
61
+		r.observeWriteHeader(code)
62
+	}
63
+	r.status = code
64
+	r.wroteHeader = true
65
+	r.ResponseWriter.WriteHeader(code)
66
+}
67
+
68
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
69
+	// If applicable, call WriteHeader here so that observeWriteHeader is
70
+	// handled appropriately.
71
+	if !r.wroteHeader {
72
+		r.WriteHeader(http.StatusOK)
73
+	}
74
+	n, err := r.ResponseWriter.Write(b)
75
+	r.written += int64(n)
76
+	return n, err
77
+}
78
+
79
+type (
80
+	closeNotifierDelegator struct{ *responseWriterDelegator }
81
+	flusherDelegator       struct{ *responseWriterDelegator }
82
+	hijackerDelegator      struct{ *responseWriterDelegator }
83
+	readerFromDelegator    struct{ *responseWriterDelegator }
84
+	pusherDelegator        struct{ *responseWriterDelegator }
85
+)
86
+
87
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
88
+	//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
89
+	return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
90
+}
91
+
92
+func (d flusherDelegator) Flush() {
93
+	// If applicable, call WriteHeader here so that observeWriteHeader is
94
+	// handled appropriately.
95
+	if !d.wroteHeader {
96
+		d.WriteHeader(http.StatusOK)
97
+	}
98
+	d.ResponseWriter.(http.Flusher).Flush()
99
+}
100
+
101
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
102
+	return d.ResponseWriter.(http.Hijacker).Hijack()
103
+}
104
+
105
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
106
+	// If applicable, call WriteHeader here so that observeWriteHeader is
107
+	// handled appropriately.
108
+	if !d.wroteHeader {
109
+		d.WriteHeader(http.StatusOK)
110
+	}
111
+	n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
112
+	d.written += n
113
+	return n, err
114
+}
115
+
116
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
117
+	return d.ResponseWriter.(http.Pusher).Push(target, opts)
118
+}
119
+
120
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
121
+
122
+func init() {
123
+	// TODO(beorn7): Code generation would help here.
124
+	pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
125
+		return d
126
+	}
127
+	pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
128
+		return closeNotifierDelegator{d}
129
+	}
130
+	pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
131
+		return flusherDelegator{d}
132
+	}
133
+	pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
134
+		return struct {
135
+			*responseWriterDelegator
136
+			http.Flusher
137
+			http.CloseNotifier
138
+		}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
139
+	}
140
+	pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
141
+		return hijackerDelegator{d}
142
+	}
143
+	pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
144
+		return struct {
145
+			*responseWriterDelegator
146
+			http.Hijacker
147
+			http.CloseNotifier
148
+		}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
149
+	}
150
+	pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
151
+		return struct {
152
+			*responseWriterDelegator
153
+			http.Hijacker
154
+			http.Flusher
155
+		}{d, hijackerDelegator{d}, flusherDelegator{d}}
156
+	}
157
+	pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
158
+		return struct {
159
+			*responseWriterDelegator
160
+			http.Hijacker
161
+			http.Flusher
162
+			http.CloseNotifier
163
+		}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
164
+	}
165
+	pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
166
+		return readerFromDelegator{d}
167
+	}
168
+	pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
169
+		return struct {
170
+			*responseWriterDelegator
171
+			io.ReaderFrom
172
+			http.CloseNotifier
173
+		}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
174
+	}
175
+	pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
176
+		return struct {
177
+			*responseWriterDelegator
178
+			io.ReaderFrom
179
+			http.Flusher
180
+		}{d, readerFromDelegator{d}, flusherDelegator{d}}
181
+	}
182
+	pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
183
+		return struct {
184
+			*responseWriterDelegator
185
+			io.ReaderFrom
186
+			http.Flusher
187
+			http.CloseNotifier
188
+		}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
189
+	}
190
+	pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
191
+		return struct {
192
+			*responseWriterDelegator
193
+			io.ReaderFrom
194
+			http.Hijacker
195
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}}
196
+	}
197
+	pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
198
+		return struct {
199
+			*responseWriterDelegator
200
+			io.ReaderFrom
201
+			http.Hijacker
202
+			http.CloseNotifier
203
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
204
+	}
205
+	pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
206
+		return struct {
207
+			*responseWriterDelegator
208
+			io.ReaderFrom
209
+			http.Hijacker
210
+			http.Flusher
211
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
212
+	}
213
+	pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
214
+		return struct {
215
+			*responseWriterDelegator
216
+			io.ReaderFrom
217
+			http.Hijacker
218
+			http.Flusher
219
+			http.CloseNotifier
220
+		}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
221
+	}
222
+	pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
223
+		return pusherDelegator{d}
224
+	}
225
+	pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
226
+		return struct {
227
+			*responseWriterDelegator
228
+			http.Pusher
229
+			http.CloseNotifier
230
+		}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
231
+	}
232
+	pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
233
+		return struct {
234
+			*responseWriterDelegator
235
+			http.Pusher
236
+			http.Flusher
237
+		}{d, pusherDelegator{d}, flusherDelegator{d}}
238
+	}
239
+	pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
240
+		return struct {
241
+			*responseWriterDelegator
242
+			http.Pusher
243
+			http.Flusher
244
+			http.CloseNotifier
245
+		}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
246
+	}
247
+	pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
248
+		return struct {
249
+			*responseWriterDelegator
250
+			http.Pusher
251
+			http.Hijacker
252
+		}{d, pusherDelegator{d}, hijackerDelegator{d}}
253
+	}
254
+	pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
255
+		return struct {
256
+			*responseWriterDelegator
257
+			http.Pusher
258
+			http.Hijacker
259
+			http.CloseNotifier
260
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
261
+	}
262
+	pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
263
+		return struct {
264
+			*responseWriterDelegator
265
+			http.Pusher
266
+			http.Hijacker
267
+			http.Flusher
268
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
269
+	}
270
+	pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
271
+		return struct {
272
+			*responseWriterDelegator
273
+			http.Pusher
274
+			http.Hijacker
275
+			http.Flusher
276
+			http.CloseNotifier
277
+		}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
278
+	}
279
+	pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
280
+		return struct {
281
+			*responseWriterDelegator
282
+			http.Pusher
283
+			io.ReaderFrom
284
+		}{d, pusherDelegator{d}, readerFromDelegator{d}}
285
+	}
286
+	pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
287
+		return struct {
288
+			*responseWriterDelegator
289
+			http.Pusher
290
+			io.ReaderFrom
291
+			http.CloseNotifier
292
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
293
+	}
294
+	pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
295
+		return struct {
296
+			*responseWriterDelegator
297
+			http.Pusher
298
+			io.ReaderFrom
299
+			http.Flusher
300
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
301
+	}
302
+	pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
303
+		return struct {
304
+			*responseWriterDelegator
305
+			http.Pusher
306
+			io.ReaderFrom
307
+			http.Flusher
308
+			http.CloseNotifier
309
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
310
+	}
311
+	pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
312
+		return struct {
313
+			*responseWriterDelegator
314
+			http.Pusher
315
+			io.ReaderFrom
316
+			http.Hijacker
317
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
318
+	}
319
+	pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
320
+		return struct {
321
+			*responseWriterDelegator
322
+			http.Pusher
323
+			io.ReaderFrom
324
+			http.Hijacker
325
+			http.CloseNotifier
326
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
327
+	}
328
+	pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
329
+		return struct {
330
+			*responseWriterDelegator
331
+			http.Pusher
332
+			io.ReaderFrom
333
+			http.Hijacker
334
+			http.Flusher
335
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
336
+	}
337
+	pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
338
+		return struct {
339
+			*responseWriterDelegator
340
+			http.Pusher
341
+			io.ReaderFrom
342
+			http.Hijacker
343
+			http.Flusher
344
+			http.CloseNotifier
345
+		}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
346
+	}
347
+}
348
+
349
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
350
+	d := &responseWriterDelegator{
351
+		ResponseWriter:     w,
352
+		observeWriteHeader: observeWriteHeaderFunc,
353
+	}
354
+
355
+	id := 0
356
+	//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
357
+	if _, ok := w.(http.CloseNotifier); ok {
358
+		id += closeNotifier
359
+	}
360
+	if _, ok := w.(http.Flusher); ok {
361
+		id += flusher
362
+	}
363
+	if _, ok := w.(http.Hijacker); ok {
364
+		id += hijacker
365
+	}
366
+	if _, ok := w.(io.ReaderFrom); ok {
367
+		id += readerFrom
368
+	}
369
+	if _, ok := w.(http.Pusher); ok {
370
+		id += pusher
371
+	}
372
+
373
+	return pickDelegator[id](d)
374
+}

+ 408
- 0
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go View File

@@ -0,0 +1,408 @@
1
+// Copyright 2016 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Package promhttp provides tooling around HTTP servers and clients.
15
+//
16
+// First, the package allows the creation of http.Handler instances to expose
17
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
18
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
19
+// custom registry or anything that implements the Gatherer interface. It also
20
+// allows the creation of handlers that act differently on errors or allow to
21
+// log errors.
22
+//
23
+// Second, the package provides tooling to instrument instances of http.Handler
24
+// via middleware. Middleware wrappers follow the naming scheme
25
+// InstrumentHandlerX, where X describes the intended use of the middleware.
26
+// See each function's doc comment for specific details.
27
+//
28
+// Finally, the package allows for an http.RoundTripper to be instrumented via
29
+// middleware. Middleware wrappers follow the naming scheme
30
+// InstrumentRoundTripperX, where X describes the intended use of the
31
+// middleware. See each function's doc comment for specific details.
32
+package promhttp
33
+
34
+import (
35
+	"compress/gzip"
36
+	"errors"
37
+	"fmt"
38
+	"io"
39
+	"net/http"
40
+	"strconv"
41
+	"strings"
42
+	"sync"
43
+	"time"
44
+
45
+	"github.com/prometheus/common/expfmt"
46
+
47
+	"github.com/prometheus/client_golang/prometheus"
48
+)
49
+
50
+const (
51
+	contentTypeHeader      = "Content-Type"
52
+	contentEncodingHeader  = "Content-Encoding"
53
+	acceptEncodingHeader   = "Accept-Encoding"
54
+	processStartTimeHeader = "Process-Start-Time-Unix"
55
+)
56
+
57
+var gzipPool = sync.Pool{
58
+	New: func() interface{} {
59
+		return gzip.NewWriter(nil)
60
+	},
61
+}
62
+
63
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
64
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
65
+// no error logging, and it applies compression if requested by the client.
66
+//
67
+// The returned http.Handler is already instrumented using the
68
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
69
+// create multiple http.Handlers by separate calls of the Handler function, the
70
+// metrics used for instrumentation will be shared between them, providing
71
+// global scrape counts.
72
+//
73
+// This function is meant to cover the bulk of basic use cases. If you are doing
74
+// anything that requires more customization (including using a non-default
75
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
76
+// HandlerFor function. See there for details.
77
+func Handler() http.Handler {
78
+	return InstrumentMetricHandler(
79
+		prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
80
+	)
81
+}
82
+
83
+// HandlerFor returns an uninstrumented http.Handler for the provided
84
+// Gatherer. The behavior of the Handler is defined by the provided
85
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
86
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
87
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
88
+// kind of instrumentation as it is used by the Handler function.
89
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
90
+	return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
91
+}
92
+
93
+// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
94
+// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
95
+// call to `done` of that `Gather`.
96
+func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
97
+	var (
98
+		inFlightSem chan struct{}
99
+		errCnt      = prometheus.NewCounterVec(
100
+			prometheus.CounterOpts{
101
+				Name: "promhttp_metric_handler_errors_total",
102
+				Help: "Total number of internal errors encountered by the promhttp metric handler.",
103
+			},
104
+			[]string{"cause"},
105
+		)
106
+	)
107
+
108
+	if opts.MaxRequestsInFlight > 0 {
109
+		inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
110
+	}
111
+	if opts.Registry != nil {
112
+		// Initialize all possibilities that can occur below.
113
+		errCnt.WithLabelValues("gathering")
114
+		errCnt.WithLabelValues("encoding")
115
+		if err := opts.Registry.Register(errCnt); err != nil {
116
+			are := &prometheus.AlreadyRegisteredError{}
117
+			if errors.As(err, are) {
118
+				errCnt = are.ExistingCollector.(*prometheus.CounterVec)
119
+			} else {
120
+				panic(err)
121
+			}
122
+		}
123
+	}
124
+
125
+	h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
126
+		if !opts.ProcessStartTime.IsZero() {
127
+			rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
128
+		}
129
+		if inFlightSem != nil {
130
+			select {
131
+			case inFlightSem <- struct{}{}: // All good, carry on.
132
+				defer func() { <-inFlightSem }()
133
+			default:
134
+				http.Error(rsp, fmt.Sprintf(
135
+					"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
136
+				), http.StatusServiceUnavailable)
137
+				return
138
+			}
139
+		}
140
+		mfs, done, err := reg.Gather()
141
+		defer done()
142
+		if err != nil {
143
+			if opts.ErrorLog != nil {
144
+				opts.ErrorLog.Println("error gathering metrics:", err)
145
+			}
146
+			errCnt.WithLabelValues("gathering").Inc()
147
+			switch opts.ErrorHandling {
148
+			case PanicOnError:
149
+				panic(err)
150
+			case ContinueOnError:
151
+				if len(mfs) == 0 {
152
+					// Still report the error if no metrics have been gathered.
153
+					httpError(rsp, err)
154
+					return
155
+				}
156
+			case HTTPErrorOnError:
157
+				httpError(rsp, err)
158
+				return
159
+			}
160
+		}
161
+
162
+		var contentType expfmt.Format
163
+		if opts.EnableOpenMetrics {
164
+			contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
165
+		} else {
166
+			contentType = expfmt.Negotiate(req.Header)
167
+		}
168
+		header := rsp.Header()
169
+		header.Set(contentTypeHeader, string(contentType))
170
+
171
+		w := io.Writer(rsp)
172
+		if !opts.DisableCompression && gzipAccepted(req.Header) {
173
+			header.Set(contentEncodingHeader, "gzip")
174
+			gz := gzipPool.Get().(*gzip.Writer)
175
+			defer gzipPool.Put(gz)
176
+
177
+			gz.Reset(w)
178
+			defer gz.Close()
179
+
180
+			w = gz
181
+		}
182
+
183
+		enc := expfmt.NewEncoder(w, contentType)
184
+
185
+		// handleError handles the error according to opts.ErrorHandling
186
+		// and returns true if we have to abort after the handling.
187
+		handleError := func(err error) bool {
188
+			if err == nil {
189
+				return false
190
+			}
191
+			if opts.ErrorLog != nil {
192
+				opts.ErrorLog.Println("error encoding and sending metric family:", err)
193
+			}
194
+			errCnt.WithLabelValues("encoding").Inc()
195
+			switch opts.ErrorHandling {
196
+			case PanicOnError:
197
+				panic(err)
198
+			case HTTPErrorOnError:
199
+				// We cannot really send an HTTP error at this
200
+				// point because we most likely have written
201
+				// something to rsp already. But at least we can
202
+				// stop sending.
203
+				return true
204
+			}
205
+			// Do nothing in all other cases, including ContinueOnError.
206
+			return false
207
+		}
208
+
209
+		for _, mf := range mfs {
210
+			if handleError(enc.Encode(mf)) {
211
+				return
212
+			}
213
+		}
214
+		if closer, ok := enc.(expfmt.Closer); ok {
215
+			// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
216
+			if handleError(closer.Close()) {
217
+				return
218
+			}
219
+		}
220
+	})
221
+
222
+	if opts.Timeout <= 0 {
223
+		return h
224
+	}
225
+	return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
226
+		"Exceeded configured timeout of %v.\n",
227
+		opts.Timeout,
228
+	))
229
+}
230
+
231
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
232
+// HandlerFor function. It instruments the provided http.Handler with two
233
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
234
+// scrapes partitioned by HTTP status code, and a gauge
235
+// "promhttp_metric_handler_requests_in_flight" to track the number of
236
+// simultaneous scrapes. This function idempotently registers collectors for
237
+// both metrics with the provided Registerer. It panics if the registration
238
+// fails. The provided metrics are useful to see how many scrapes hit the
239
+// monitored target (which could be from different Prometheus servers or other
240
+// scrapers), and how often they overlap (which would result in more than one
241
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
242
+// will contain the scrape by which it is exposed, while the scrape counter will
243
+// only get incremented after the scrape is complete (as only then the status
244
+// code is known). For tracking scrape durations, use the
245
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
246
+// scrape.
247
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
248
+	cnt := prometheus.NewCounterVec(
249
+		prometheus.CounterOpts{
250
+			Name: "promhttp_metric_handler_requests_total",
251
+			Help: "Total number of scrapes by HTTP status code.",
252
+		},
253
+		[]string{"code"},
254
+	)
255
+	// Initialize the most likely HTTP status codes.
256
+	cnt.WithLabelValues("200")
257
+	cnt.WithLabelValues("500")
258
+	cnt.WithLabelValues("503")
259
+	if err := reg.Register(cnt); err != nil {
260
+		are := &prometheus.AlreadyRegisteredError{}
261
+		if errors.As(err, are) {
262
+			cnt = are.ExistingCollector.(*prometheus.CounterVec)
263
+		} else {
264
+			panic(err)
265
+		}
266
+	}
267
+
268
+	gge := prometheus.NewGauge(prometheus.GaugeOpts{
269
+		Name: "promhttp_metric_handler_requests_in_flight",
270
+		Help: "Current number of scrapes being served.",
271
+	})
272
+	if err := reg.Register(gge); err != nil {
273
+		are := &prometheus.AlreadyRegisteredError{}
274
+		if errors.As(err, are) {
275
+			gge = are.ExistingCollector.(prometheus.Gauge)
276
+		} else {
277
+			panic(err)
278
+		}
279
+	}
280
+
281
+	return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
282
+}
283
+
284
+// HandlerErrorHandling defines how a Handler serving metrics will handle
285
+// errors.
286
+type HandlerErrorHandling int
287
+
288
+// These constants cause handlers serving metrics to behave as described if
289
+// errors are encountered.
290
+const (
291
+	// Serve an HTTP status code 500 upon the first error
292
+	// encountered. Report the error message in the body. Note that HTTP
293
+	// errors cannot be served anymore once the beginning of a regular
294
+	// payload has been sent. Thus, in the (unlikely) case that encoding the
295
+	// payload into the negotiated wire format fails, serving the response
296
+	// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
297
+	// those errors.
298
+	HTTPErrorOnError HandlerErrorHandling = iota
299
+	// Ignore errors and try to serve as many metrics as possible.  However,
300
+	// if no metrics can be served, serve an HTTP status code 500 and the
301
+	// last error message in the body. Only use this in deliberate "best
302
+	// effort" metrics collection scenarios. In this case, it is highly
303
+	// recommended to provide other means of detecting errors: By setting an
304
+	// ErrorLog in HandlerOpts, the errors are logged. By providing a
305
+	// Registry in HandlerOpts, the exposed metrics include an error counter
306
+	// "promhttp_metric_handler_errors_total", which can be used for
307
+	// alerts.
308
+	ContinueOnError
309
+	// Panic upon the first error encountered (useful for "crash only" apps).
310
+	PanicOnError
311
+)
312
+
313
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
314
+// log.Logger from the standard library implements this interface, and it is
315
+// easy to implement by custom loggers, if they don't do so already anyway.
316
+type Logger interface {
317
+	Println(v ...interface{})
318
+}
319
+
320
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
321
+// zero value of HandlerOpts is a reasonable default.
322
+type HandlerOpts struct {
323
+	// ErrorLog specifies an optional Logger for errors collecting and
324
+	// serving metrics. If nil, errors are not logged at all. Note that the
325
+	// type of a reported error is often prometheus.MultiError, which
326
+	// formats into a multi-line error string. If you want to avoid the
327
+	// latter, create a Logger implementation that detects a
328
+	// prometheus.MultiError and formats the contained errors into one line.
329
+	ErrorLog Logger
330
+	// ErrorHandling defines how errors are handled. Note that errors are
331
+	// logged regardless of the configured ErrorHandling provided ErrorLog
332
+	// is not nil.
333
+	ErrorHandling HandlerErrorHandling
334
+	// If Registry is not nil, it is used to register a metric
335
+	// "promhttp_metric_handler_errors_total", partitioned by "cause". A
336
+	// failed registration causes a panic. Note that this error counter is
337
+	// different from the instrumentation you get from the various
338
+	// InstrumentHandler... helpers. It counts errors that don't necessarily
339
+	// result in a non-2xx HTTP status code. There are two typical cases:
340
+	// (1) Encoding errors that only happen after streaming of the HTTP body
341
+	// has already started (and the status code 200 has been sent). This
342
+	// should only happen with custom collectors. (2) Collection errors with
343
+	// no effect on the HTTP status code because ErrorHandling is set to
344
+	// ContinueOnError.
345
+	Registry prometheus.Registerer
346
+	// If DisableCompression is true, the handler will never compress the
347
+	// response, even if requested by the client.
348
+	DisableCompression bool
349
+	// The number of concurrent HTTP requests is limited to
350
+	// MaxRequestsInFlight. Additional requests are responded to with 503
351
+	// Service Unavailable and a suitable message in the body. If
352
+	// MaxRequestsInFlight is 0 or negative, no limit is applied.
353
+	MaxRequestsInFlight int
354
+	// If handling a request takes longer than Timeout, it is responded to
355
+	// with 503 ServiceUnavailable and a suitable Message. No timeout is
356
+	// applied if Timeout is 0 or negative. Note that with the current
357
+	// implementation, reaching the timeout simply ends the HTTP requests as
358
+	// described above (and even that only if sending of the body hasn't
359
+	// started yet), while the bulk work of gathering all the metrics keeps
360
+	// running in the background (with the eventual result to be thrown
361
+	// away). Until the implementation is improved, it is recommended to
362
+	// implement a separate timeout in potentially slow Collectors.
363
+	Timeout time.Duration
364
+	// If true, the experimental OpenMetrics encoding is added to the
365
+	// possible options during content negotiation. Note that Prometheus
366
+	// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
367
+	// the only way to transmit exemplars. However, the move to OpenMetrics
368
+	// is not completely transparent. Most notably, the values of "quantile"
369
+	// labels of Summaries and "le" labels of Histograms are formatted with
370
+	// a trailing ".0" if they would otherwise look like integer numbers
371
+	// (which changes the identity of the resulting series on the Prometheus
372
+	// server).
373
+	EnableOpenMetrics bool
374
+	// ProcessStartTime allows setting process start timevalue that will be exposed
375
+	// with "Process-Start-Time-Unix" response header along with the metrics
376
+	// payload. This allow callers to have efficient transformations to cumulative
377
+	// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
378
+	// scrape target.
379
+	// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
380
+	// exposition format.
381
+	ProcessStartTime time.Time
382
+}
383
+
384
+// gzipAccepted returns whether the client will accept gzip-encoded content.
385
+func gzipAccepted(header http.Header) bool {
386
+	a := header.Get(acceptEncodingHeader)
387
+	parts := strings.Split(a, ",")
388
+	for _, part := range parts {
389
+		part = strings.TrimSpace(part)
390
+		if part == "gzip" || strings.HasPrefix(part, "gzip;") {
391
+			return true
392
+		}
393
+	}
394
+	return false
395
+}
396
+
397
+// httpError removes any content-encoding header and then calls http.Error with
398
+// the provided error and http.StatusInternalServerError. Error contents is
399
+// supposed to be uncompressed plain text. Same as with a plain http.Error, this
400
+// must not be called if the header or any payload has already been sent.
401
+func httpError(rsp http.ResponseWriter, err error) {
402
+	rsp.Header().Del(contentEncodingHeader)
403
+	http.Error(
404
+		rsp,
405
+		"An error has occurred while serving metrics:\n\n"+err.Error(),
406
+		http.StatusInternalServerError,
407
+	)
408
+}

+ 249
- 0
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go View File

@@ -0,0 +1,249 @@
1
+// Copyright 2017 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package promhttp
15
+
16
+import (
17
+	"crypto/tls"
18
+	"net/http"
19
+	"net/http/httptrace"
20
+	"time"
21
+
22
+	"github.com/prometheus/client_golang/prometheus"
23
+)
24
+
25
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
26
+// functions as RoundTrippers. If f is a function with the appropriate
27
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
28
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
29
+
30
+// RoundTrip implements the RoundTripper interface.
31
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
32
+	return rt(r)
33
+}
34
+
35
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
36
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
37
+// requests currently handled by the wrapped http.RoundTripper.
38
+//
39
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
40
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
41
+	return func(r *http.Request) (*http.Response, error) {
42
+		gauge.Inc()
43
+		defer gauge.Dec()
44
+		return next.RoundTrip(r)
45
+	}
46
+}
47
+
48
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
49
+// http.RoundTripper to observe the request result with the provided CounterVec.
50
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
51
+// those, the only allowed label names are "code" and "method". The function
52
+// panics otherwise. For the "method" label a predefined default label value set
53
+// is used to filter given values. Values besides predefined values will count
54
+// as `unknown` method.`WithExtraMethods` can be used to add more
55
+// methods to the set. Partitioning of the CounterVec happens by HTTP status code
56
+// and/or HTTP method if the respective instance label names are present in the
57
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
58
+//
59
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
60
+// is not incremented.
61
+//
62
+// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
63
+//
64
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
65
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
66
+	rtOpts := defaultOptions()
67
+	for _, o := range opts {
68
+		o.apply(rtOpts)
69
+	}
70
+
71
+	// Curry the counter with dynamic labels before checking the remaining labels.
72
+	code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
73
+
74
+	return func(r *http.Request) (*http.Response, error) {
75
+		resp, err := next.RoundTrip(r)
76
+		if err == nil {
77
+			l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
78
+			for label, resolve := range rtOpts.extraLabelsFromCtx {
79
+				l[label] = resolve(resp.Request.Context())
80
+			}
81
+			addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
82
+		}
83
+		return resp, err
84
+	}
85
+}
86
+
87
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
88
+// http.RoundTripper to observe the request duration with the provided
89
+// ObserverVec.  The ObserverVec must have zero, one, or two non-const
90
+// non-curried labels. For those, the only allowed label names are "code" and
91
+// "method". The function panics otherwise. For the "method" label a predefined
92
+// default label value set is used to filter given values. Values besides
93
+// predefined values will count as `unknown` method. `WithExtraMethods`
94
+// can be used to add more methods to the set. The Observe method of the Observer
95
+// in the ObserverVec is called with the request duration in
96
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
97
+// respective instance label names are present in the ObserverVec. For
98
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
99
+// partitioning of Histograms is expensive and should be used judiciously.
100
+//
101
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
102
+// reported.
103
+//
104
+// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
105
+//
106
+// Note that this method is only guaranteed to never observe negative durations
107
+// if used with Go1.9+.
108
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
109
+	rtOpts := defaultOptions()
110
+	for _, o := range opts {
111
+		o.apply(rtOpts)
112
+	}
113
+
114
+	// Curry the observer with dynamic labels before checking the remaining labels.
115
+	code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
116
+
117
+	return func(r *http.Request) (*http.Response, error) {
118
+		start := time.Now()
119
+		resp, err := next.RoundTrip(r)
120
+		if err == nil {
121
+			l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
122
+			for label, resolve := range rtOpts.extraLabelsFromCtx {
123
+				l[label] = resolve(resp.Request.Context())
124
+			}
125
+			observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
126
+		}
127
+		return resp, err
128
+	}
129
+}
130
+
131
+// InstrumentTrace is used to offer flexibility in instrumenting the available
132
+// httptrace.ClientTrace hook functions. Each function is passed a float64
133
+// representing the time in seconds since the start of the http request. A user
134
+// may choose to use separately buckets Histograms, or implement custom
135
+// instance labels on a per function basis.
136
+type InstrumentTrace struct {
137
+	GotConn              func(float64)
138
+	PutIdleConn          func(float64)
139
+	GotFirstResponseByte func(float64)
140
+	Got100Continue       func(float64)
141
+	DNSStart             func(float64)
142
+	DNSDone              func(float64)
143
+	ConnectStart         func(float64)
144
+	ConnectDone          func(float64)
145
+	TLSHandshakeStart    func(float64)
146
+	TLSHandshakeDone     func(float64)
147
+	WroteHeaders         func(float64)
148
+	Wait100Continue      func(float64)
149
+	WroteRequest         func(float64)
150
+}
151
+
152
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
153
+// RoundTripper and reports times to hook functions provided in the
154
+// InstrumentTrace struct. Hook functions that are not present in the provided
155
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
156
+// time since the start of the request. Only with Go1.9+, those times are
157
+// guaranteed to never be negative. (Earlier Go versions are not using a
158
+// monotonic clock.) Note that partitioning of Histograms is expensive and
159
+// should be used judiciously.
160
+//
161
+// For hook functions that receive an error as an argument, no observations are
162
+// made in the event of a non-nil error value.
163
+//
164
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
165
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
166
+	return func(r *http.Request) (*http.Response, error) {
167
+		start := time.Now()
168
+
169
+		trace := &httptrace.ClientTrace{
170
+			GotConn: func(_ httptrace.GotConnInfo) {
171
+				if it.GotConn != nil {
172
+					it.GotConn(time.Since(start).Seconds())
173
+				}
174
+			},
175
+			PutIdleConn: func(err error) {
176
+				if err != nil {
177
+					return
178
+				}
179
+				if it.PutIdleConn != nil {
180
+					it.PutIdleConn(time.Since(start).Seconds())
181
+				}
182
+			},
183
+			DNSStart: func(_ httptrace.DNSStartInfo) {
184
+				if it.DNSStart != nil {
185
+					it.DNSStart(time.Since(start).Seconds())
186
+				}
187
+			},
188
+			DNSDone: func(_ httptrace.DNSDoneInfo) {
189
+				if it.DNSDone != nil {
190
+					it.DNSDone(time.Since(start).Seconds())
191
+				}
192
+			},
193
+			ConnectStart: func(_, _ string) {
194
+				if it.ConnectStart != nil {
195
+					it.ConnectStart(time.Since(start).Seconds())
196
+				}
197
+			},
198
+			ConnectDone: func(_, _ string, err error) {
199
+				if err != nil {
200
+					return
201
+				}
202
+				if it.ConnectDone != nil {
203
+					it.ConnectDone(time.Since(start).Seconds())
204
+				}
205
+			},
206
+			GotFirstResponseByte: func() {
207
+				if it.GotFirstResponseByte != nil {
208
+					it.GotFirstResponseByte(time.Since(start).Seconds())
209
+				}
210
+			},
211
+			Got100Continue: func() {
212
+				if it.Got100Continue != nil {
213
+					it.Got100Continue(time.Since(start).Seconds())
214
+				}
215
+			},
216
+			TLSHandshakeStart: func() {
217
+				if it.TLSHandshakeStart != nil {
218
+					it.TLSHandshakeStart(time.Since(start).Seconds())
219
+				}
220
+			},
221
+			TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
222
+				if err != nil {
223
+					return
224
+				}
225
+				if it.TLSHandshakeDone != nil {
226
+					it.TLSHandshakeDone(time.Since(start).Seconds())
227
+				}
228
+			},
229
+			WroteHeaders: func() {
230
+				if it.WroteHeaders != nil {
231
+					it.WroteHeaders(time.Since(start).Seconds())
232
+				}
233
+			},
234
+			Wait100Continue: func() {
235
+				if it.Wait100Continue != nil {
236
+					it.Wait100Continue(time.Since(start).Seconds())
237
+				}
238
+			},
239
+			WroteRequest: func(_ httptrace.WroteRequestInfo) {
240
+				if it.WroteRequest != nil {
241
+					it.WroteRequest(time.Since(start).Seconds())
242
+				}
243
+			},
244
+		}
245
+		r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
246
+
247
+		return next.RoundTrip(r)
248
+	}
249
+}

+ 576
- 0
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go View File

@@ -0,0 +1,576 @@
1
+// Copyright 2017 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package promhttp
15
+
16
+import (
17
+	"errors"
18
+	"net/http"
19
+	"strconv"
20
+	"strings"
21
+	"time"
22
+
23
+	dto "github.com/prometheus/client_model/go"
24
+
25
+	"github.com/prometheus/client_golang/prometheus"
26
+)
27
+
28
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
29
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
30
+
31
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
32
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
33
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
34
+	if labels == nil {
35
+		obs.Observe(val)
36
+		return
37
+	}
38
+	obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
39
+}
40
+
41
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
42
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
43
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
44
+	if labels == nil {
45
+		obs.Add(val)
46
+		return
47
+	}
48
+	obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
49
+}
50
+
51
+// InstrumentHandlerInFlight is a middleware that wraps the provided
52
+// http.Handler. It sets the provided prometheus.Gauge to the number of
53
+// requests currently handled by the wrapped http.Handler.
54
+//
55
+// See the example for InstrumentHandlerDuration for example usage.
56
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
57
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
58
+		g.Inc()
59
+		defer g.Dec()
60
+		next.ServeHTTP(w, r)
61
+	})
62
+}
63
+
64
+// InstrumentHandlerDuration is a middleware that wraps the provided
65
+// http.Handler to observe the request duration with the provided ObserverVec.
66
+// The ObserverVec must have valid metric and label names and must have zero,
67
+// one, or two non-const non-curried labels. For those, the only allowed label
68
+// names are "code" and "method". The function panics otherwise. For the "method"
69
+// label a predefined default label value set is used to filter given values.
70
+// Values besides predefined values will count as `unknown` method.
71
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
72
+// method of the Observer in the ObserverVec is called with the request duration
73
+// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
74
+// the respective instance label names are present in the ObserverVec. For
75
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
76
+// partitioning of Histograms is expensive and should be used judiciously.
77
+//
78
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
79
+//
80
+// If the wrapped Handler panics, no values are reported.
81
+//
82
+// Note that this method is only guaranteed to never observe negative durations
83
+// if used with Go1.9+.
84
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
85
+	hOpts := defaultOptions()
86
+	for _, o := range opts {
87
+		o.apply(hOpts)
88
+	}
89
+
90
+	// Curry the observer with dynamic labels before checking the remaining labels.
91
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
92
+
93
+	if code {
94
+		return func(w http.ResponseWriter, r *http.Request) {
95
+			now := time.Now()
96
+			d := newDelegator(w, nil)
97
+			next.ServeHTTP(d, r)
98
+
99
+			l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
100
+			for label, resolve := range hOpts.extraLabelsFromCtx {
101
+				l[label] = resolve(r.Context())
102
+			}
103
+			observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
104
+		}
105
+	}
106
+
107
+	return func(w http.ResponseWriter, r *http.Request) {
108
+		now := time.Now()
109
+		next.ServeHTTP(w, r)
110
+		l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
111
+		for label, resolve := range hOpts.extraLabelsFromCtx {
112
+			l[label] = resolve(r.Context())
113
+		}
114
+		observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
115
+	}
116
+}
117
+
118
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
119
+// to observe the request result with the provided CounterVec. The CounterVec
120
+// must have valid metric and label names and must have zero, one, or two
121
+// non-const non-curried labels. For those, the only allowed label names are
122
+// "code" and "method". The function panics otherwise. For the "method"
123
+// label a predefined default label value set is used to filter given values.
124
+// Values besides predefined values will count as `unknown` method.
125
+// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
126
+// CounterVec happens by HTTP status code and/or HTTP method if the respective
127
+// instance label names are present in the CounterVec. For unpartitioned
128
+// counting, use a CounterVec with zero labels.
129
+//
130
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
131
+//
132
+// If the wrapped Handler panics, the Counter is not incremented.
133
+//
134
+// See the example for InstrumentHandlerDuration for example usage.
135
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
136
+	hOpts := defaultOptions()
137
+	for _, o := range opts {
138
+		o.apply(hOpts)
139
+	}
140
+
141
+	// Curry the counter with dynamic labels before checking the remaining labels.
142
+	code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
143
+
144
+	if code {
145
+		return func(w http.ResponseWriter, r *http.Request) {
146
+			d := newDelegator(w, nil)
147
+			next.ServeHTTP(d, r)
148
+
149
+			l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
150
+			for label, resolve := range hOpts.extraLabelsFromCtx {
151
+				l[label] = resolve(r.Context())
152
+			}
153
+			addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
154
+		}
155
+	}
156
+
157
+	return func(w http.ResponseWriter, r *http.Request) {
158
+		next.ServeHTTP(w, r)
159
+
160
+		l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
161
+		for label, resolve := range hOpts.extraLabelsFromCtx {
162
+			l[label] = resolve(r.Context())
163
+		}
164
+		addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
165
+	}
166
+}
167
+
168
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
169
+// http.Handler to observe with the provided ObserverVec the request duration
170
+// until the response headers are written. The ObserverVec must have valid
171
+// metric and label names and must have zero, one, or two non-const non-curried
172
+// labels. For those, the only allowed label names are "code" and "method". The
173
+// function panics otherwise. For the "method" label a predefined default label
174
+// value set is used to filter given values. Values besides predefined values
175
+// will count as `unknown` method.`WithExtraMethods` can be used to add more
176
+// methods to the set. The Observe method of the Observer in the
177
+// ObserverVec is called with the request duration in seconds. Partitioning
178
+// happens by HTTP status code and/or HTTP method if the respective instance
179
+// label names are present in the ObserverVec. For unpartitioned observations,
180
+// use an ObserverVec with zero labels. Note that partitioning of Histograms is
181
+// expensive and should be used judiciously.
182
+//
183
+// If the wrapped Handler panics before calling WriteHeader, no value is
184
+// reported.
185
+//
186
+// Note that this method is only guaranteed to never observe negative durations
187
+// if used with Go1.9+.
188
+//
189
+// See the example for InstrumentHandlerDuration for example usage.
190
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
191
+	hOpts := defaultOptions()
192
+	for _, o := range opts {
193
+		o.apply(hOpts)
194
+	}
195
+
196
+	// Curry the observer with dynamic labels before checking the remaining labels.
197
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
198
+
199
+	return func(w http.ResponseWriter, r *http.Request) {
200
+		now := time.Now()
201
+		d := newDelegator(w, func(status int) {
202
+			l := labels(code, method, r.Method, status, hOpts.extraMethods...)
203
+			for label, resolve := range hOpts.extraLabelsFromCtx {
204
+				l[label] = resolve(r.Context())
205
+			}
206
+			observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
207
+		})
208
+		next.ServeHTTP(d, r)
209
+	}
210
+}
211
+
212
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
213
+// http.Handler to observe the request size with the provided ObserverVec. The
214
+// ObserverVec must have valid metric and label names and must have zero, one,
215
+// or two non-const non-curried labels. For those, the only allowed label names
216
+// are "code" and "method". The function panics otherwise. For the "method"
217
+// label a predefined default label value set is used to filter given values.
218
+// Values besides predefined values will count as `unknown` method.
219
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
220
+// method of the Observer in the ObserverVec is called with the request size in
221
+// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
222
+// respective instance label names are present in the ObserverVec. For
223
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
224
+// partitioning of Histograms is expensive and should be used judiciously.
225
+//
226
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
227
+//
228
+// If the wrapped Handler panics, no values are reported.
229
+//
230
+// See the example for InstrumentHandlerDuration for example usage.
231
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
232
+	hOpts := defaultOptions()
233
+	for _, o := range opts {
234
+		o.apply(hOpts)
235
+	}
236
+
237
+	// Curry the observer with dynamic labels before checking the remaining labels.
238
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
239
+
240
+	if code {
241
+		return func(w http.ResponseWriter, r *http.Request) {
242
+			d := newDelegator(w, nil)
243
+			next.ServeHTTP(d, r)
244
+			size := computeApproximateRequestSize(r)
245
+
246
+			l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
247
+			for label, resolve := range hOpts.extraLabelsFromCtx {
248
+				l[label] = resolve(r.Context())
249
+			}
250
+			observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
251
+		}
252
+	}
253
+
254
+	return func(w http.ResponseWriter, r *http.Request) {
255
+		next.ServeHTTP(w, r)
256
+		size := computeApproximateRequestSize(r)
257
+
258
+		l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
259
+		for label, resolve := range hOpts.extraLabelsFromCtx {
260
+			l[label] = resolve(r.Context())
261
+		}
262
+		observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
263
+	}
264
+}
265
+
266
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
267
+// http.Handler to observe the response size with the provided ObserverVec. The
268
+// ObserverVec must have valid metric and label names and must have zero, one,
269
+// or two non-const non-curried labels. For those, the only allowed label names
270
+// are "code" and "method". The function panics otherwise. For the "method"
271
+// label a predefined default label value set is used to filter given values.
272
+// Values besides predefined values will count as `unknown` method.
273
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
274
+// method of the Observer in the ObserverVec is called with the response size in
275
+// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
276
+// respective instance label names are present in the ObserverVec. For
277
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
278
+// partitioning of Histograms is expensive and should be used judiciously.
279
+//
280
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
281
+//
282
+// If the wrapped Handler panics, no values are reported.
283
+//
284
+// See the example for InstrumentHandlerDuration for example usage.
285
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
286
+	hOpts := defaultOptions()
287
+	for _, o := range opts {
288
+		o.apply(hOpts)
289
+	}
290
+
291
+	// Curry the observer with dynamic labels before checking the remaining labels.
292
+	code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
293
+
294
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
295
+		d := newDelegator(w, nil)
296
+		next.ServeHTTP(d, r)
297
+
298
+		l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
299
+		for label, resolve := range hOpts.extraLabelsFromCtx {
300
+			l[label] = resolve(r.Context())
301
+		}
302
+		observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
303
+	})
304
+}
305
+
306
+// checkLabels returns whether the provided Collector has a non-const,
307
+// non-curried label named "code" and/or "method". It panics if the provided
308
+// Collector does not have a Desc or has more than one Desc or its Desc is
309
+// invalid. It also panics if the Collector has any non-const, non-curried
310
+// labels that are not named "code" or "method".
311
+func checkLabels(c prometheus.Collector) (code, method bool) {
312
+	// TODO(beorn7): Remove this hacky way to check for instance labels
313
+	// once Descriptors can have their dimensionality queried.
314
+	var (
315
+		desc *prometheus.Desc
316
+		m    prometheus.Metric
317
+		pm   dto.Metric
318
+		lvs  []string
319
+	)
320
+
321
+	// Get the Desc from the Collector.
322
+	descc := make(chan *prometheus.Desc, 1)
323
+	c.Describe(descc)
324
+
325
+	select {
326
+	case desc = <-descc:
327
+	default:
328
+		panic("no description provided by collector")
329
+	}
330
+	select {
331
+	case <-descc:
332
+		panic("more than one description provided by collector")
333
+	default:
334
+	}
335
+
336
+	close(descc)
337
+
338
+	// Make sure the Collector has a valid Desc by registering it with a
339
+	// temporary registry.
340
+	prometheus.NewRegistry().MustRegister(c)
341
+
342
+	// Create a ConstMetric with the Desc. Since we don't know how many
343
+	// variable labels there are, try for as long as it needs.
344
+	for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
345
+		m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
346
+	}
347
+
348
+	// Write out the metric into a proto message and look at the labels.
349
+	// If the value is not the magicString, it is a constLabel, which doesn't interest us.
350
+	// If the label is curried, it doesn't interest us.
351
+	// In all other cases, only "code" or "method" is allowed.
352
+	if err := m.Write(&pm); err != nil {
353
+		panic("error checking metric for labels")
354
+	}
355
+	for _, label := range pm.Label {
356
+		name, value := label.GetName(), label.GetValue()
357
+		if value != magicString || isLabelCurried(c, name) {
358
+			continue
359
+		}
360
+		switch name {
361
+		case "code":
362
+			code = true
363
+		case "method":
364
+			method = true
365
+		default:
366
+			panic("metric partitioned with non-supported labels")
367
+		}
368
+	}
369
+	return
370
+}
371
+
372
+func isLabelCurried(c prometheus.Collector, label string) bool {
373
+	// This is even hackier than the label test above.
374
+	// We essentially try to curry again and see if it works.
375
+	// But for that, we need to type-convert to the two
376
+	// types we use here, ObserverVec or *CounterVec.
377
+	switch v := c.(type) {
378
+	case *prometheus.CounterVec:
379
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
380
+			return false
381
+		}
382
+	case prometheus.ObserverVec:
383
+		if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
384
+			return false
385
+		}
386
+	default:
387
+		panic("unsupported metric vec type")
388
+	}
389
+	return true
390
+}
391
+
392
+func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
393
+	labels := prometheus.Labels{}
394
+
395
+	if !(code || method) {
396
+		return labels
397
+	}
398
+
399
+	if code {
400
+		labels["code"] = sanitizeCode(status)
401
+	}
402
+	if method {
403
+		labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
404
+	}
405
+
406
+	return labels
407
+}
408
+
409
+func computeApproximateRequestSize(r *http.Request) int {
410
+	s := 0
411
+	if r.URL != nil {
412
+		s += len(r.URL.String())
413
+	}
414
+
415
+	s += len(r.Method)
416
+	s += len(r.Proto)
417
+	for name, values := range r.Header {
418
+		s += len(name)
419
+		for _, value := range values {
420
+			s += len(value)
421
+		}
422
+	}
423
+	s += len(r.Host)
424
+
425
+	// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
426
+
427
+	if r.ContentLength != -1 {
428
+		s += int(r.ContentLength)
429
+	}
430
+	return s
431
+}
432
+
433
+// If the wrapped http.Handler has a known method, it will be sanitized and returned.
434
+// Otherwise, "unknown" will be returned. The known method list can be extended
435
+// as needed by using extraMethods parameter.
436
+func sanitizeMethod(m string, extraMethods ...string) string {
437
+	// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
438
+	// the methods chosen as default.
439
+	switch m {
440
+	case "GET", "get":
441
+		return "get"
442
+	case "PUT", "put":
443
+		return "put"
444
+	case "HEAD", "head":
445
+		return "head"
446
+	case "POST", "post":
447
+		return "post"
448
+	case "DELETE", "delete":
449
+		return "delete"
450
+	case "CONNECT", "connect":
451
+		return "connect"
452
+	case "OPTIONS", "options":
453
+		return "options"
454
+	case "NOTIFY", "notify":
455
+		return "notify"
456
+	case "TRACE", "trace":
457
+		return "trace"
458
+	case "PATCH", "patch":
459
+		return "patch"
460
+	default:
461
+		for _, method := range extraMethods {
462
+			if strings.EqualFold(m, method) {
463
+				return strings.ToLower(m)
464
+			}
465
+		}
466
+		return "unknown"
467
+	}
468
+}
469
+
470
+// If the wrapped http.Handler has not set a status code, i.e. the value is
471
+// currently 0, sanitizeCode will return 200, for consistency with behavior in
472
+// the stdlib.
473
+func sanitizeCode(s int) string {
474
+	// See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
475
+	switch s {
476
+	case 100:
477
+		return "100"
478
+	case 101:
479
+		return "101"
480
+
481
+	case 200, 0:
482
+		return "200"
483
+	case 201:
484
+		return "201"
485
+	case 202:
486
+		return "202"
487
+	case 203:
488
+		return "203"
489
+	case 204:
490
+		return "204"
491
+	case 205:
492
+		return "205"
493
+	case 206:
494
+		return "206"
495
+
496
+	case 300:
497
+		return "300"
498
+	case 301:
499
+		return "301"
500
+	case 302:
501
+		return "302"
502
+	case 304:
503
+		return "304"
504
+	case 305:
505
+		return "305"
506
+	case 307:
507
+		return "307"
508
+
509
+	case 400:
510
+		return "400"
511
+	case 401:
512
+		return "401"
513
+	case 402:
514
+		return "402"
515
+	case 403:
516
+		return "403"
517
+	case 404:
518
+		return "404"
519
+	case 405:
520
+		return "405"
521
+	case 406:
522
+		return "406"
523
+	case 407:
524
+		return "407"
525
+	case 408:
526
+		return "408"
527
+	case 409:
528
+		return "409"
529
+	case 410:
530
+		return "410"
531
+	case 411:
532
+		return "411"
533
+	case 412:
534
+		return "412"
535
+	case 413:
536
+		return "413"
537
+	case 414:
538
+		return "414"
539
+	case 415:
540
+		return "415"
541
+	case 416:
542
+		return "416"
543
+	case 417:
544
+		return "417"
545
+	case 418:
546
+		return "418"
547
+
548
+	case 500:
549
+		return "500"
550
+	case 501:
551
+		return "501"
552
+	case 502:
553
+		return "502"
554
+	case 503:
555
+		return "503"
556
+	case 504:
557
+		return "504"
558
+	case 505:
559
+		return "505"
560
+
561
+	case 428:
562
+		return "428"
563
+	case 429:
564
+		return "429"
565
+	case 431:
566
+		return "431"
567
+	case 511:
568
+		return "511"
569
+
570
+	default:
571
+		if s >= 100 && s <= 599 {
572
+			return strconv.Itoa(s)
573
+		}
574
+		return "unknown"
575
+	}
576
+}

+ 84
- 0
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go View File

@@ -0,0 +1,84 @@
1
+// Copyright 2022 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package promhttp
15
+
16
+import (
17
+	"context"
18
+
19
+	"github.com/prometheus/client_golang/prometheus"
20
+)
21
+
22
+// Option are used to configure both handler (middleware) or round tripper.
23
+type Option interface {
24
+	apply(*options)
25
+}
26
+
27
+// LabelValueFromCtx are used to compute the label value from request context.
28
+// Context can be filled with values from request through middleware.
29
+type LabelValueFromCtx func(ctx context.Context) string
30
+
31
+// options store options for both a handler or round tripper.
32
+type options struct {
33
+	extraMethods       []string
34
+	getExemplarFn      func(requestCtx context.Context) prometheus.Labels
35
+	extraLabelsFromCtx map[string]LabelValueFromCtx
36
+}
37
+
38
+func defaultOptions() *options {
39
+	return &options{
40
+		getExemplarFn:      func(ctx context.Context) prometheus.Labels { return nil },
41
+		extraLabelsFromCtx: map[string]LabelValueFromCtx{},
42
+	}
43
+}
44
+
45
+func (o *options) emptyDynamicLabels() prometheus.Labels {
46
+	labels := prometheus.Labels{}
47
+
48
+	for label := range o.extraLabelsFromCtx {
49
+		labels[label] = ""
50
+	}
51
+
52
+	return labels
53
+}
54
+
55
+type optionApplyFunc func(*options)
56
+
57
+func (o optionApplyFunc) apply(opt *options) { o(opt) }
58
+
59
+// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
60
+// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
61
+//
62
+// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
63
+func WithExtraMethods(methods ...string) Option {
64
+	return optionApplyFunc(func(o *options) {
65
+		o.extraMethods = methods
66
+	})
67
+}
68
+
69
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
70
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
71
+// metric will continue to observe/increment.
72
+func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
73
+	return optionApplyFunc(func(o *options) {
74
+		o.getExemplarFn = getExemplarFn
75
+	})
76
+}
77
+
78
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
79
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
80
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
81
+	return optionApplyFunc(func(o *options) {
82
+		o.extraLabelsFromCtx[name] = valueFn
83
+	})
84
+}

+ 1075
- 0
vendor/github.com/prometheus/client_golang/prometheus/registry.go
File diff suppressed because it is too large
View File


+ 785
- 0
vendor/github.com/prometheus/client_golang/prometheus/summary.go View File

@@ -0,0 +1,785 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"fmt"
18
+	"math"
19
+	"runtime"
20
+	"sort"
21
+	"sync"
22
+	"sync/atomic"
23
+	"time"
24
+
25
+	dto "github.com/prometheus/client_model/go"
26
+
27
+	"github.com/beorn7/perks/quantile"
28
+	"google.golang.org/protobuf/proto"
29
+	"google.golang.org/protobuf/types/known/timestamppb"
30
+)
31
+
32
+// quantileLabel is used for the label that defines the quantile in a
33
+// summary.
34
+const quantileLabel = "quantile"
35
+
36
+// A Summary captures individual observations from an event or sample stream and
37
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
38
+// of observations, 2. observation count, 3. rank estimations.
39
+//
40
+// A typical use-case is the observation of request latencies. By default, a
41
+// Summary provides the median, the 90th and the 99th percentile of the latency
42
+// as rank estimations. However, the default behavior will change in the
43
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
44
+// default. For a sane transition, it is recommended to set the desired rank
45
+// estimations explicitly.
46
+//
47
+// Note that the rank estimations cannot be aggregated in a meaningful way with
48
+// the Prometheus query language (i.e. you cannot average or add them). If you
49
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
50
+// queries served across all instances of a service), consider the Histogram
51
+// metric type. See the Prometheus documentation for more details.
52
+//
53
+// To create Summary instances, use NewSummary.
54
+type Summary interface {
55
+	Metric
56
+	Collector
57
+
58
+	// Observe adds a single observation to the summary. Observations are
59
+	// usually positive or zero. Negative observations are accepted but
60
+	// prevent current versions of Prometheus from properly detecting
61
+	// counter resets in the sum of observations. See
62
+	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
63
+	// for details.
64
+	Observe(float64)
65
+}
66
+
67
+var errQuantileLabelNotAllowed = fmt.Errorf(
68
+	"%q is not allowed as label name in summaries", quantileLabel,
69
+)
70
+
71
+// Default values for SummaryOpts.
72
+const (
73
+	// DefMaxAge is the default duration for which observations stay
74
+	// relevant.
75
+	DefMaxAge time.Duration = 10 * time.Minute
76
+	// DefAgeBuckets is the default number of buckets used to calculate the
77
+	// age of observations.
78
+	DefAgeBuckets = 5
79
+	// DefBufCap is the standard buffer size for collecting Summary observations.
80
+	DefBufCap = 500
81
+)
82
+
83
+// SummaryOpts bundles the options for creating a Summary metric. It is
84
+// mandatory to set Name to a non-empty string. While all other fields are
85
+// optional and can safely be left at their zero value, it is recommended to set
86
+// a help string and to explicitly set the Objectives field to the desired value
87
+// as the default value will change in the upcoming v1.0.0 of the library.
88
+type SummaryOpts struct {
89
+	// Namespace, Subsystem, and Name are components of the fully-qualified
90
+	// name of the Summary (created by joining these components with
91
+	// "_"). Only Name is mandatory, the others merely help structuring the
92
+	// name. Note that the fully-qualified name of the Summary must be a
93
+	// valid Prometheus metric name.
94
+	Namespace string
95
+	Subsystem string
96
+	Name      string
97
+
98
+	// Help provides information about this Summary.
99
+	//
100
+	// Metrics with the same fully-qualified name must have the same Help
101
+	// string.
102
+	Help string
103
+
104
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
105
+	// with the same fully-qualified name must have the same label names in
106
+	// their ConstLabels.
107
+	//
108
+	// Due to the way a Summary is represented in the Prometheus text format
109
+	// and how it is handled by the Prometheus server internally, “quantile”
110
+	// is an illegal label name. Construction of a Summary or SummaryVec
111
+	// will panic if this label name is used in ConstLabels.
112
+	//
113
+	// ConstLabels are only used rarely. In particular, do not use them to
114
+	// attach the same labels to all your metrics. Those use cases are
115
+	// better covered by target labels set by the scraping Prometheus
116
+	// server, or by one specific metric (e.g. a build_info or a
117
+	// machine_role metric). See also
118
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
119
+	ConstLabels Labels
120
+
121
+	// Objectives defines the quantile rank estimates with their respective
122
+	// absolute error. If Objectives[q] = e, then the value reported for q
123
+	// will be the φ-quantile value for some φ between q-e and q+e.  The
124
+	// default value is an empty map, resulting in a summary without
125
+	// quantiles.
126
+	Objectives map[float64]float64
127
+
128
+	// MaxAge defines the duration for which an observation stays relevant
129
+	// for the summary. Only applies to pre-calculated quantiles, does not
130
+	// apply to _sum and _count. Must be positive. The default value is
131
+	// DefMaxAge.
132
+	MaxAge time.Duration
133
+
134
+	// AgeBuckets is the number of buckets used to exclude observations that
135
+	// are older than MaxAge from the summary. A higher number has a
136
+	// resource penalty, so only increase it if the higher resolution is
137
+	// really required. For very high observation rates, you might want to
138
+	// reduce the number of age buckets. With only one age bucket, you will
139
+	// effectively see a complete reset of the summary each time MaxAge has
140
+	// passed. The default value is DefAgeBuckets.
141
+	AgeBuckets uint32
142
+
143
+	// BufCap defines the default sample stream buffer size.  The default
144
+	// value of DefBufCap should suffice for most uses. If there is a need
145
+	// to increase the value, a multiple of 500 is recommended (because that
146
+	// is the internal buffer size of the underlying package
147
+	// "github.com/bmizerany/perks/quantile").
148
+	BufCap uint32
149
+
150
+	// now is for testing purposes, by default it's time.Now.
151
+	now func() time.Time
152
+}
153
+
154
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
155
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
156
+// is optional and can safely be left to its default value.
157
+type SummaryVecOpts struct {
158
+	SummaryOpts
159
+
160
+	// VariableLabels are used to partition the metric vector by the given set
161
+	// of labels. Each label value will be constrained with the optional Constraint
162
+	// function, if provided.
163
+	VariableLabels ConstrainableLabels
164
+}
165
+
166
+// Problem with the sliding-window decay algorithm... The Merge method of
167
+// perk/quantile is actually not working as advertised - and it might be
168
+// unfixable, as the underlying algorithm is apparently not capable of merging
169
+// summaries in the first place. To avoid using Merge, we are currently adding
170
+// observations to _each_ age bucket, i.e. the effort to add a sample is
171
+// essentially multiplied by the number of age buckets. When rotating age
172
+// buckets, we empty the previous head stream. On scrape time, we simply take
173
+// the quantiles from the head stream (no merging required). Result: More effort
174
+// on observation time, less effort on scrape time, which is exactly the
175
+// opposite of what we try to accomplish, but at least the results are correct.
176
+//
177
+// The quite elegant previous contraption to merge the age buckets efficiently
178
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
179
+// can't be used anymore.
180
+
181
+// NewSummary creates a new Summary based on the provided SummaryOpts.
182
+func NewSummary(opts SummaryOpts) Summary {
183
+	return newSummary(
184
+		NewDesc(
185
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
186
+			opts.Help,
187
+			nil,
188
+			opts.ConstLabels,
189
+		),
190
+		opts,
191
+	)
192
+}
193
+
194
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
195
+	if len(desc.variableLabels.names) != len(labelValues) {
196
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
197
+	}
198
+
199
+	for _, n := range desc.variableLabels.names {
200
+		if n == quantileLabel {
201
+			panic(errQuantileLabelNotAllowed)
202
+		}
203
+	}
204
+	for _, lp := range desc.constLabelPairs {
205
+		if lp.GetName() == quantileLabel {
206
+			panic(errQuantileLabelNotAllowed)
207
+		}
208
+	}
209
+
210
+	if opts.Objectives == nil {
211
+		opts.Objectives = map[float64]float64{}
212
+	}
213
+
214
+	if opts.MaxAge < 0 {
215
+		panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
216
+	}
217
+	if opts.MaxAge == 0 {
218
+		opts.MaxAge = DefMaxAge
219
+	}
220
+
221
+	if opts.AgeBuckets == 0 {
222
+		opts.AgeBuckets = DefAgeBuckets
223
+	}
224
+
225
+	if opts.BufCap == 0 {
226
+		opts.BufCap = DefBufCap
227
+	}
228
+
229
+	if opts.now == nil {
230
+		opts.now = time.Now
231
+	}
232
+	if len(opts.Objectives) == 0 {
233
+		// Use the lock-free implementation of a Summary without objectives.
234
+		s := &noObjectivesSummary{
235
+			desc:       desc,
236
+			labelPairs: MakeLabelPairs(desc, labelValues),
237
+			counts:     [2]*summaryCounts{{}, {}},
238
+		}
239
+		s.init(s) // Init self-collection.
240
+		s.createdTs = timestamppb.New(opts.now())
241
+		return s
242
+	}
243
+
244
+	s := &summary{
245
+		desc: desc,
246
+
247
+		objectives:       opts.Objectives,
248
+		sortedObjectives: make([]float64, 0, len(opts.Objectives)),
249
+
250
+		labelPairs: MakeLabelPairs(desc, labelValues),
251
+
252
+		hotBuf:         make([]float64, 0, opts.BufCap),
253
+		coldBuf:        make([]float64, 0, opts.BufCap),
254
+		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
255
+	}
256
+	s.headStreamExpTime = opts.now().Add(s.streamDuration)
257
+	s.hotBufExpTime = s.headStreamExpTime
258
+
259
+	for i := uint32(0); i < opts.AgeBuckets; i++ {
260
+		s.streams = append(s.streams, s.newStream())
261
+	}
262
+	s.headStream = s.streams[0]
263
+
264
+	for qu := range s.objectives {
265
+		s.sortedObjectives = append(s.sortedObjectives, qu)
266
+	}
267
+	sort.Float64s(s.sortedObjectives)
268
+
269
+	s.init(s) // Init self-collection.
270
+	s.createdTs = timestamppb.New(opts.now())
271
+	return s
272
+}
273
+
274
+type summary struct {
275
+	selfCollector
276
+
277
+	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
278
+	mtx    sync.Mutex // Protects every other moving part.
279
+	// Lock bufMtx before mtx if both are needed.
280
+
281
+	desc *Desc
282
+
283
+	objectives       map[float64]float64
284
+	sortedObjectives []float64
285
+
286
+	labelPairs []*dto.LabelPair
287
+
288
+	sum float64
289
+	cnt uint64
290
+
291
+	hotBuf, coldBuf []float64
292
+
293
+	streams                          []*quantile.Stream
294
+	streamDuration                   time.Duration
295
+	headStream                       *quantile.Stream
296
+	headStreamIdx                    int
297
+	headStreamExpTime, hotBufExpTime time.Time
298
+
299
+	createdTs *timestamppb.Timestamp
300
+}
301
+
302
+func (s *summary) Desc() *Desc {
303
+	return s.desc
304
+}
305
+
306
+func (s *summary) Observe(v float64) {
307
+	s.bufMtx.Lock()
308
+	defer s.bufMtx.Unlock()
309
+
310
+	now := time.Now()
311
+	if now.After(s.hotBufExpTime) {
312
+		s.asyncFlush(now)
313
+	}
314
+	s.hotBuf = append(s.hotBuf, v)
315
+	if len(s.hotBuf) == cap(s.hotBuf) {
316
+		s.asyncFlush(now)
317
+	}
318
+}
319
+
320
+func (s *summary) Write(out *dto.Metric) error {
321
+	sum := &dto.Summary{
322
+		CreatedTimestamp: s.createdTs,
323
+	}
324
+	qs := make([]*dto.Quantile, 0, len(s.objectives))
325
+
326
+	s.bufMtx.Lock()
327
+	s.mtx.Lock()
328
+	// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
329
+	s.swapBufs(time.Now())
330
+	s.bufMtx.Unlock()
331
+
332
+	s.flushColdBuf()
333
+	sum.SampleCount = proto.Uint64(s.cnt)
334
+	sum.SampleSum = proto.Float64(s.sum)
335
+
336
+	for _, rank := range s.sortedObjectives {
337
+		var q float64
338
+		if s.headStream.Count() == 0 {
339
+			q = math.NaN()
340
+		} else {
341
+			q = s.headStream.Query(rank)
342
+		}
343
+		qs = append(qs, &dto.Quantile{
344
+			Quantile: proto.Float64(rank),
345
+			Value:    proto.Float64(q),
346
+		})
347
+	}
348
+
349
+	s.mtx.Unlock()
350
+
351
+	if len(qs) > 0 {
352
+		sort.Sort(quantSort(qs))
353
+	}
354
+	sum.Quantile = qs
355
+
356
+	out.Summary = sum
357
+	out.Label = s.labelPairs
358
+	return nil
359
+}
360
+
361
+func (s *summary) newStream() *quantile.Stream {
362
+	return quantile.NewTargeted(s.objectives)
363
+}
364
+
365
+// asyncFlush needs bufMtx locked.
366
+func (s *summary) asyncFlush(now time.Time) {
367
+	s.mtx.Lock()
368
+	s.swapBufs(now)
369
+
370
+	// Unblock the original goroutine that was responsible for the mutation
371
+	// that triggered the compaction.  But hold onto the global non-buffer
372
+	// state mutex until the operation finishes.
373
+	go func() {
374
+		s.flushColdBuf()
375
+		s.mtx.Unlock()
376
+	}()
377
+}
378
+
379
+// rotateStreams needs mtx AND bufMtx locked.
380
+func (s *summary) maybeRotateStreams() {
381
+	for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
382
+		s.headStream.Reset()
383
+		s.headStreamIdx++
384
+		if s.headStreamIdx >= len(s.streams) {
385
+			s.headStreamIdx = 0
386
+		}
387
+		s.headStream = s.streams[s.headStreamIdx]
388
+		s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
389
+	}
390
+}
391
+
392
+// flushColdBuf needs mtx locked.
393
+func (s *summary) flushColdBuf() {
394
+	for _, v := range s.coldBuf {
395
+		for _, stream := range s.streams {
396
+			stream.Insert(v)
397
+		}
398
+		s.cnt++
399
+		s.sum += v
400
+	}
401
+	s.coldBuf = s.coldBuf[0:0]
402
+	s.maybeRotateStreams()
403
+}
404
+
405
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
406
+func (s *summary) swapBufs(now time.Time) {
407
+	if len(s.coldBuf) != 0 {
408
+		panic("coldBuf is not empty")
409
+	}
410
+	s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
411
+	// hotBuf is now empty and gets new expiration set.
412
+	for now.After(s.hotBufExpTime) {
413
+		s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
414
+	}
415
+}
416
+
417
+type summaryCounts struct {
418
+	// sumBits contains the bits of the float64 representing the sum of all
419
+	// observations. sumBits and count have to go first in the struct to
420
+	// guarantee alignment for atomic operations.
421
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
422
+	sumBits uint64
423
+	count   uint64
424
+}
425
+
426
+type noObjectivesSummary struct {
427
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
428
+	// The most significant bit is the hot index [0 or 1] of the count field
429
+	// below. Observe calls update the hot one. All remaining bits count the
430
+	// number of Observe calls. Observe starts by incrementing this counter,
431
+	// and finish by incrementing the count field in the respective
432
+	// summaryCounts, as a marker for completion.
433
+	//
434
+	// Calls of the Write method (which are non-mutating reads from the
435
+	// perspective of the summary) swap the hot–cold under the writeMtx
436
+	// lock. A cooldown is awaited (while locked) by comparing the number of
437
+	// observations with the initiation count. Once they match, then the
438
+	// last observation on the now cool one has completed. All cool fields must
439
+	// be merged into the new hot before releasing writeMtx.
440
+
441
+	// Fields with atomic access first! See alignment constraint:
442
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
443
+	countAndHotIdx uint64
444
+
445
+	selfCollector
446
+	desc     *Desc
447
+	writeMtx sync.Mutex // Only used in the Write method.
448
+
449
+	// Two counts, one is "hot" for lock-free observations, the other is
450
+	// "cold" for writing out a dto.Metric. It has to be an array of
451
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
452
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
453
+	counts [2]*summaryCounts
454
+
455
+	labelPairs []*dto.LabelPair
456
+
457
+	createdTs *timestamppb.Timestamp
458
+}
459
+
460
+func (s *noObjectivesSummary) Desc() *Desc {
461
+	return s.desc
462
+}
463
+
464
+func (s *noObjectivesSummary) Observe(v float64) {
465
+	// We increment h.countAndHotIdx so that the counter in the lower
466
+	// 63 bits gets incremented. At the same time, we get the new value
467
+	// back, which we can use to find the currently-hot counts.
468
+	n := atomic.AddUint64(&s.countAndHotIdx, 1)
469
+	hotCounts := s.counts[n>>63]
470
+
471
+	for {
472
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
473
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
474
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
475
+			break
476
+		}
477
+	}
478
+	// Increment count last as we take it as a signal that the observation
479
+	// is complete.
480
+	atomic.AddUint64(&hotCounts.count, 1)
481
+}
482
+
483
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
484
+	// For simplicity, we protect this whole method by a mutex. It is not in
485
+	// the hot path, i.e. Observe is called much more often than Write. The
486
+	// complication of making Write lock-free isn't worth it, if possible at
487
+	// all.
488
+	s.writeMtx.Lock()
489
+	defer s.writeMtx.Unlock()
490
+
491
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
492
+	// without touching the count bits. See the struct comments for a full
493
+	// description of the algorithm.
494
+	n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
495
+	// count is contained unchanged in the lower 63 bits.
496
+	count := n & ((1 << 63) - 1)
497
+	// The most significant bit tells us which counts is hot. The complement
498
+	// is thus the cold one.
499
+	hotCounts := s.counts[n>>63]
500
+	coldCounts := s.counts[(^n)>>63]
501
+
502
+	// Await cooldown.
503
+	for count != atomic.LoadUint64(&coldCounts.count) {
504
+		runtime.Gosched() // Let observations get work done.
505
+	}
506
+
507
+	sum := &dto.Summary{
508
+		SampleCount:      proto.Uint64(count),
509
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
510
+		CreatedTimestamp: s.createdTs,
511
+	}
512
+
513
+	out.Summary = sum
514
+	out.Label = s.labelPairs
515
+
516
+	// Finally add all the cold counts to the new hot counts and reset the cold counts.
517
+	atomic.AddUint64(&hotCounts.count, count)
518
+	atomic.StoreUint64(&coldCounts.count, 0)
519
+	for {
520
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
521
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
522
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
523
+			atomic.StoreUint64(&coldCounts.sumBits, 0)
524
+			break
525
+		}
526
+	}
527
+	return nil
528
+}
529
+
530
+type quantSort []*dto.Quantile
531
+
532
+func (s quantSort) Len() int {
533
+	return len(s)
534
+}
535
+
536
+func (s quantSort) Swap(i, j int) {
537
+	s[i], s[j] = s[j], s[i]
538
+}
539
+
540
+func (s quantSort) Less(i, j int) bool {
541
+	return s[i].GetQuantile() < s[j].GetQuantile()
542
+}
543
+
544
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
545
+// same Desc, but have different values for their variable labels. This is used
546
+// if you want to count the same thing partitioned by various dimensions
547
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
548
+// instances with NewSummaryVec.
549
+type SummaryVec struct {
550
+	*MetricVec
551
+}
552
+
553
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
554
+// partitioned by the given label names.
555
+//
556
+// Due to the way a Summary is represented in the Prometheus text format and how
557
+// it is handled by the Prometheus server internally, “quantile” is an illegal
558
+// label name. NewSummaryVec will panic if this label name is used.
559
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
560
+	return V2.NewSummaryVec(SummaryVecOpts{
561
+		SummaryOpts:    opts,
562
+		VariableLabels: UnconstrainedLabels(labelNames),
563
+	})
564
+}
565
+
566
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
567
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
568
+	for _, ln := range opts.VariableLabels.labelNames() {
569
+		if ln == quantileLabel {
570
+			panic(errQuantileLabelNotAllowed)
571
+		}
572
+	}
573
+	desc := V2.NewDesc(
574
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
575
+		opts.Help,
576
+		opts.VariableLabels,
577
+		opts.ConstLabels,
578
+	)
579
+	return &SummaryVec{
580
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
581
+			return newSummary(desc, opts.SummaryOpts, lvs...)
582
+		}),
583
+	}
584
+}
585
+
586
+// GetMetricWithLabelValues returns the Summary for the given slice of label
587
+// values (same order as the variable labels in Desc). If that combination of
588
+// label values is accessed for the first time, a new Summary is created.
589
+//
590
+// It is possible to call this method without using the returned Summary to only
591
+// create the new Summary but leave it at its starting value, a Summary without
592
+// any observations.
593
+//
594
+// Keeping the Summary for later use is possible (and should be considered if
595
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
596
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
597
+// the Summary will still exist, but it will not be exported anymore, even if a
598
+// Summary with the same label values is created later. See also the CounterVec
599
+// example.
600
+//
601
+// An error is returned if the number of label values is not the same as the
602
+// number of variable labels in Desc (minus any curried labels).
603
+//
604
+// Note that for more than one label value, this method is prone to mistakes
605
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
606
+// an alternative to avoid that type of mistake. For higher label numbers, the
607
+// latter has a much more readable (albeit more verbose) syntax, but it comes
608
+// with a performance overhead (for creating and processing the Labels map).
609
+// See also the GaugeVec example.
610
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
611
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
612
+	if metric != nil {
613
+		return metric.(Observer), err
614
+	}
615
+	return nil, err
616
+}
617
+
618
+// GetMetricWith returns the Summary for the given Labels map (the label names
619
+// must match those of the variable labels in Desc). If that label map is
620
+// accessed for the first time, a new Summary is created. Implications of
621
+// creating a Summary without using it and keeping the Summary for later use are
622
+// the same as for GetMetricWithLabelValues.
623
+//
624
+// An error is returned if the number and names of the Labels are inconsistent
625
+// with those of the variable labels in Desc (minus any curried labels).
626
+//
627
+// This method is used for the same purpose as
628
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
629
+// methods.
630
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
631
+	metric, err := v.MetricVec.GetMetricWith(labels)
632
+	if metric != nil {
633
+		return metric.(Observer), err
634
+	}
635
+	return nil, err
636
+}
637
+
638
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
639
+// GetMetricWithLabelValues would have returned an error. Not returning an
640
+// error allows shortcuts like
641
+//
642
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
643
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
644
+	s, err := v.GetMetricWithLabelValues(lvs...)
645
+	if err != nil {
646
+		panic(err)
647
+	}
648
+	return s
649
+}
650
+
651
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
652
+// returned an error. Not returning an error allows shortcuts like
653
+//
654
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
655
+func (v *SummaryVec) With(labels Labels) Observer {
656
+	s, err := v.GetMetricWith(labels)
657
+	if err != nil {
658
+		panic(err)
659
+	}
660
+	return s
661
+}
662
+
663
+// CurryWith returns a vector curried with the provided labels, i.e. the
664
+// returned vector has those labels pre-set for all labeled operations performed
665
+// on it. The cardinality of the curried vector is reduced accordingly. The
666
+// order of the remaining labels stays the same (just with the curried labels
667
+// taken out of the sequence – which is relevant for the
668
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
669
+// vector, but only with labels not yet used for currying before.
670
+//
671
+// The metrics contained in the SummaryVec are shared between the curried and
672
+// uncurried vectors. They are just accessed differently. Curried and uncurried
673
+// vectors behave identically in terms of collection. Only one must be
674
+// registered with a given registry (usually the uncurried version). The Reset
675
+// method deletes all metrics, even if called on a curried vector.
676
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
677
+	vec, err := v.MetricVec.CurryWith(labels)
678
+	if vec != nil {
679
+		return &SummaryVec{vec}, err
680
+	}
681
+	return nil, err
682
+}
683
+
684
+// MustCurryWith works as CurryWith but panics where CurryWith would have
685
+// returned an error.
686
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
687
+	vec, err := v.CurryWith(labels)
688
+	if err != nil {
689
+		panic(err)
690
+	}
691
+	return vec
692
+}
693
+
694
+type constSummary struct {
695
+	desc       *Desc
696
+	count      uint64
697
+	sum        float64
698
+	quantiles  map[float64]float64
699
+	labelPairs []*dto.LabelPair
700
+	createdTs  *timestamppb.Timestamp
701
+}
702
+
703
+func (s *constSummary) Desc() *Desc {
704
+	return s.desc
705
+}
706
+
707
+func (s *constSummary) Write(out *dto.Metric) error {
708
+	sum := &dto.Summary{
709
+		CreatedTimestamp: s.createdTs,
710
+	}
711
+	qs := make([]*dto.Quantile, 0, len(s.quantiles))
712
+
713
+	sum.SampleCount = proto.Uint64(s.count)
714
+	sum.SampleSum = proto.Float64(s.sum)
715
+
716
+	for rank, q := range s.quantiles {
717
+		qs = append(qs, &dto.Quantile{
718
+			Quantile: proto.Float64(rank),
719
+			Value:    proto.Float64(q),
720
+		})
721
+	}
722
+
723
+	if len(qs) > 0 {
724
+		sort.Sort(quantSort(qs))
725
+	}
726
+	sum.Quantile = qs
727
+
728
+	out.Summary = sum
729
+	out.Label = s.labelPairs
730
+
731
+	return nil
732
+}
733
+
734
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
735
+// values for the count, sum, and quantiles. As those parameters cannot be
736
+// changed, the returned value does not implement the Summary interface (but
737
+// only the Metric interface). Users of this package will not have much use for
738
+// it in regular operations. However, when implementing custom Collectors, it is
739
+// useful as a throw-away metric that is generated on the fly to send it to
740
+// Prometheus in the Collect method.
741
+//
742
+// quantiles maps ranks to quantile values. For example, a median latency of
743
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
744
+//
745
+//	map[float64]float64{0.5: 0.23, 0.99: 0.56}
746
+//
747
+// NewConstSummary returns an error if the length of labelValues is not
748
+// consistent with the variable labels in Desc or if Desc is invalid.
749
+func NewConstSummary(
750
+	desc *Desc,
751
+	count uint64,
752
+	sum float64,
753
+	quantiles map[float64]float64,
754
+	labelValues ...string,
755
+) (Metric, error) {
756
+	if desc.err != nil {
757
+		return nil, desc.err
758
+	}
759
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
760
+		return nil, err
761
+	}
762
+	return &constSummary{
763
+		desc:       desc,
764
+		count:      count,
765
+		sum:        sum,
766
+		quantiles:  quantiles,
767
+		labelPairs: MakeLabelPairs(desc, labelValues),
768
+	}, nil
769
+}
770
+
771
+// MustNewConstSummary is a version of NewConstSummary that panics where
772
+// NewConstMetric would have returned an error.
773
+func MustNewConstSummary(
774
+	desc *Desc,
775
+	count uint64,
776
+	sum float64,
777
+	quantiles map[float64]float64,
778
+	labelValues ...string,
779
+) Metric {
780
+	m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
781
+	if err != nil {
782
+		panic(err)
783
+	}
784
+	return m
785
+}

+ 81
- 0
vendor/github.com/prometheus/client_golang/prometheus/timer.go View File

@@ -0,0 +1,81 @@
1
+// Copyright 2016 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import "time"
17
+
18
+// Timer is a helper type to time functions. Use NewTimer to create new
19
+// instances.
20
+type Timer struct {
21
+	begin    time.Time
22
+	observer Observer
23
+}
24
+
25
+// NewTimer creates a new Timer. The provided Observer is used to observe a
26
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
27
+// later on will be also supported.
28
+// Timer is usually used to time a function call in the
29
+// following way:
30
+//
31
+//	func TimeMe() {
32
+//	    timer := NewTimer(myHistogram)
33
+//	    defer timer.ObserveDuration()
34
+//	    // Do actual work.
35
+//	}
36
+//
37
+// or
38
+//
39
+//	func TimeMeWithExemplar() {
40
+//		    timer := NewTimer(myHistogram)
41
+//		    defer timer.ObserveDurationWithExemplar(exemplar)
42
+//		    // Do actual work.
43
+//		}
44
+func NewTimer(o Observer) *Timer {
45
+	return &Timer{
46
+		begin:    time.Now(),
47
+		observer: o,
48
+	}
49
+}
50
+
51
+// ObserveDuration records the duration passed since the Timer was created with
52
+// NewTimer. It calls the Observe method of the Observer provided during
53
+// construction with the duration in seconds as an argument. The observed
54
+// duration is also returned. ObserveDuration is usually called with a defer
55
+// statement.
56
+//
57
+// Note that this method is only guaranteed to never observe negative durations
58
+// if used with Go1.9+.
59
+func (t *Timer) ObserveDuration() time.Duration {
60
+	d := time.Since(t.begin)
61
+	if t.observer != nil {
62
+		t.observer.Observe(d.Seconds())
63
+	}
64
+	return d
65
+}
66
+
67
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
68
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
69
+// be casted to ExemplarObserver.
70
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
71
+	d := time.Since(t.begin)
72
+	eo, ok := t.observer.(ExemplarObserver)
73
+	if ok && exemplar != nil {
74
+		eo.ObserveWithExemplar(d.Seconds(), exemplar)
75
+		return d
76
+	}
77
+	if t.observer != nil {
78
+		t.observer.Observe(d.Seconds())
79
+	}
80
+	return d
81
+}

+ 42
- 0
vendor/github.com/prometheus/client_golang/prometheus/untyped.go View File

@@ -0,0 +1,42 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+// UntypedOpts is an alias for Opts. See there for doc comments.
17
+type UntypedOpts Opts
18
+
19
+// UntypedFunc works like GaugeFunc but the collected metric is of type
20
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
21
+// type.
22
+//
23
+// To create UntypedFunc instances, use NewUntypedFunc.
24
+type UntypedFunc interface {
25
+	Metric
26
+	Collector
27
+}
28
+
29
+// NewUntypedFunc creates a new UntypedFunc based on the provided
30
+// UntypedOpts. The value reported is determined by calling the given function
31
+// from within the Write method. Take into account that metric collection may
32
+// happen concurrently. If that results in concurrent calls to Write, like in
33
+// the case where an UntypedFunc is directly registered with Prometheus, the
34
+// provided function must be concurrency-safe.
35
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
36
+	return newValueFunc(NewDesc(
37
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
38
+		opts.Help,
39
+		nil,
40
+		opts.ConstLabels,
41
+	), UntypedValue, function)
42
+}

+ 274
- 0
vendor/github.com/prometheus/client_golang/prometheus/value.go View File

@@ -0,0 +1,274 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"errors"
18
+	"fmt"
19
+	"sort"
20
+	"time"
21
+	"unicode/utf8"
22
+
23
+	"github.com/prometheus/client_golang/prometheus/internal"
24
+
25
+	dto "github.com/prometheus/client_model/go"
26
+	"google.golang.org/protobuf/proto"
27
+	"google.golang.org/protobuf/types/known/timestamppb"
28
+)
29
+
30
+// ValueType is an enumeration of metric types that represent a simple value.
31
+type ValueType int
32
+
33
+// Possible values for the ValueType enum. Use UntypedValue to mark a metric
34
+// with an unknown type.
35
+const (
36
+	_ ValueType = iota
37
+	CounterValue
38
+	GaugeValue
39
+	UntypedValue
40
+)
41
+
42
+var (
43
+	CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
44
+	GaugeMetricTypePtr   = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
45
+	UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
46
+)
47
+
48
+func (v ValueType) ToDTO() *dto.MetricType {
49
+	switch v {
50
+	case CounterValue:
51
+		return CounterMetricTypePtr
52
+	case GaugeValue:
53
+		return GaugeMetricTypePtr
54
+	default:
55
+		return UntypedMetricTypePtr
56
+	}
57
+}
58
+
59
+// valueFunc is a generic metric for simple values retrieved on collect time
60
+// from a function. It implements Metric and Collector. Its effective type is
61
+// determined by ValueType. This is a low-level building block used by the
62
+// library to back the implementations of CounterFunc, GaugeFunc, and
63
+// UntypedFunc.
64
+type valueFunc struct {
65
+	selfCollector
66
+
67
+	desc       *Desc
68
+	valType    ValueType
69
+	function   func() float64
70
+	labelPairs []*dto.LabelPair
71
+}
72
+
73
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
74
+// ValueType. The value reported is determined by calling the given function
75
+// from within the Write method. Take into account that metric collection may
76
+// happen concurrently. If that results in concurrent calls to Write, like in
77
+// the case where a valueFunc is directly registered with Prometheus, the
78
+// provided function must be concurrency-safe.
79
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
80
+	result := &valueFunc{
81
+		desc:       desc,
82
+		valType:    valueType,
83
+		function:   function,
84
+		labelPairs: MakeLabelPairs(desc, nil),
85
+	}
86
+	result.init(result)
87
+	return result
88
+}
89
+
90
+func (v *valueFunc) Desc() *Desc {
91
+	return v.desc
92
+}
93
+
94
+func (v *valueFunc) Write(out *dto.Metric) error {
95
+	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
96
+}
97
+
98
+// NewConstMetric returns a metric with one fixed value that cannot be
99
+// changed. Users of this package will not have much use for it in regular
100
+// operations. However, when implementing custom Collectors, it is useful as a
101
+// throw-away metric that is generated on the fly to send it to Prometheus in
102
+// the Collect method. NewConstMetric returns an error if the length of
103
+// labelValues is not consistent with the variable labels in Desc or if Desc is
104
+// invalid.
105
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
106
+	if desc.err != nil {
107
+		return nil, desc.err
108
+	}
109
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
110
+		return nil, err
111
+	}
112
+
113
+	metric := &dto.Metric{}
114
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
115
+		return nil, err
116
+	}
117
+
118
+	return &constMetric{
119
+		desc:   desc,
120
+		metric: metric,
121
+	}, nil
122
+}
123
+
124
+// MustNewConstMetric is a version of NewConstMetric that panics where
125
+// NewConstMetric would have returned an error.
126
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
127
+	m, err := NewConstMetric(desc, valueType, value, labelValues...)
128
+	if err != nil {
129
+		panic(err)
130
+	}
131
+	return m
132
+}
133
+
134
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
135
+// with created timestamp set and returns an error for other metric types.
136
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
137
+	if desc.err != nil {
138
+		return nil, desc.err
139
+	}
140
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
141
+		return nil, err
142
+	}
143
+	switch valueType {
144
+	case CounterValue:
145
+		break
146
+	default:
147
+		return nil, errors.New("created timestamps are only supported for counters")
148
+	}
149
+
150
+	metric := &dto.Metric{}
151
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
152
+		return nil, err
153
+	}
154
+
155
+	return &constMetric{
156
+		desc:   desc,
157
+		metric: metric,
158
+	}, nil
159
+}
160
+
161
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
162
+// NewConstMetricWithCreatedTimestamp would have returned an error.
163
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
164
+	m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
165
+	if err != nil {
166
+		panic(err)
167
+	}
168
+	return m
169
+}
170
+
171
+type constMetric struct {
172
+	desc   *Desc
173
+	metric *dto.Metric
174
+}
175
+
176
+func (m *constMetric) Desc() *Desc {
177
+	return m.desc
178
+}
179
+
180
+func (m *constMetric) Write(out *dto.Metric) error {
181
+	out.Label = m.metric.Label
182
+	out.Counter = m.metric.Counter
183
+	out.Gauge = m.metric.Gauge
184
+	out.Untyped = m.metric.Untyped
185
+	return nil
186
+}
187
+
188
+func populateMetric(
189
+	t ValueType,
190
+	v float64,
191
+	labelPairs []*dto.LabelPair,
192
+	e *dto.Exemplar,
193
+	m *dto.Metric,
194
+	ct *timestamppb.Timestamp,
195
+) error {
196
+	m.Label = labelPairs
197
+	switch t {
198
+	case CounterValue:
199
+		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
200
+	case GaugeValue:
201
+		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
202
+	case UntypedValue:
203
+		m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
204
+	default:
205
+		return fmt.Errorf("encountered unknown type %v", t)
206
+	}
207
+	return nil
208
+}
209
+
210
+// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
211
+// variable and constant labels in the provided Desc. The values for the
212
+// variable labels are defined by the labelValues slice, which must be in the
213
+// same order as the corresponding variable labels in the Desc.
214
+//
215
+// This function is only needed for custom Metric implementations. See MetricVec
216
+// example.
217
+func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
218
+	totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
219
+	if totalLen == 0 {
220
+		// Super fast path.
221
+		return nil
222
+	}
223
+	if len(desc.variableLabels.names) == 0 {
224
+		// Moderately fast path.
225
+		return desc.constLabelPairs
226
+	}
227
+	labelPairs := make([]*dto.LabelPair, 0, totalLen)
228
+	for i, l := range desc.variableLabels.names {
229
+		labelPairs = append(labelPairs, &dto.LabelPair{
230
+			Name:  proto.String(l),
231
+			Value: proto.String(labelValues[i]),
232
+		})
233
+	}
234
+	labelPairs = append(labelPairs, desc.constLabelPairs...)
235
+	sort.Sort(internal.LabelPairSorter(labelPairs))
236
+	return labelPairs
237
+}
238
+
239
+// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
240
+const ExemplarMaxRunes = 128
241
+
242
+// newExemplar creates a new dto.Exemplar from the provided values. An error is
243
+// returned if any of the label names or values are invalid or if the total
244
+// number of runes in the label names and values exceeds ExemplarMaxRunes.
245
+func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
246
+	e := &dto.Exemplar{}
247
+	e.Value = proto.Float64(value)
248
+	tsProto := timestamppb.New(ts)
249
+	if err := tsProto.CheckValid(); err != nil {
250
+		return nil, err
251
+	}
252
+	e.Timestamp = tsProto
253
+	labelPairs := make([]*dto.LabelPair, 0, len(l))
254
+	var runes int
255
+	for name, value := range l {
256
+		if !checkLabelName(name) {
257
+			return nil, fmt.Errorf("exemplar label name %q is invalid", name)
258
+		}
259
+		runes += utf8.RuneCountInString(name)
260
+		if !utf8.ValidString(value) {
261
+			return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
262
+		}
263
+		runes += utf8.RuneCountInString(value)
264
+		labelPairs = append(labelPairs, &dto.LabelPair{
265
+			Name:  proto.String(name),
266
+			Value: proto.String(value),
267
+		})
268
+	}
269
+	if runes > ExemplarMaxRunes {
270
+		return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
271
+	}
272
+	e.Label = labelPairs
273
+	return e, nil
274
+}

+ 709
- 0
vendor/github.com/prometheus/client_golang/prometheus/vec.go View File

@@ -0,0 +1,709 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"fmt"
18
+	"sync"
19
+
20
+	"github.com/prometheus/common/model"
21
+)
22
+
23
+// MetricVec is a Collector to bundle metrics of the same name that differ in
24
+// their label values. MetricVec is not used directly but as a building block
25
+// for implementations of vectors of a given metric type, like GaugeVec,
26
+// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
27
+// used for custom Metric implementations.
28
+//
29
+// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
30
+// FooVec and initialize it with NewMetricVec. Implement wrappers for
31
+// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
32
+// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
33
+// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
34
+// add the convenience methods WithLabelValues, With, and MustCurryWith, which
35
+// panic instead of returning errors. See also the MetricVec example.
36
+type MetricVec struct {
37
+	*metricMap
38
+
39
+	curry []curriedLabelValue
40
+
41
+	// hashAdd and hashAddByte can be replaced for testing collision handling.
42
+	hashAdd     func(h uint64, s string) uint64
43
+	hashAddByte func(h uint64, b byte) uint64
44
+}
45
+
46
+// NewMetricVec returns an initialized metricVec.
47
+func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
48
+	return &MetricVec{
49
+		metricMap: &metricMap{
50
+			metrics:   map[uint64][]metricWithLabelValues{},
51
+			desc:      desc,
52
+			newMetric: newMetric,
53
+		},
54
+		hashAdd:     hashAdd,
55
+		hashAddByte: hashAddByte,
56
+	}
57
+}
58
+
59
+// DeleteLabelValues removes the metric where the variable labels are the same
60
+// as those passed in as labels (same order as the VariableLabels in Desc). It
61
+// returns true if a metric was deleted.
62
+//
63
+// It is not an error if the number of label values is not the same as the
64
+// number of VariableLabels in Desc. However, such inconsistent label count can
65
+// never match an actual metric, so the method will always return false in that
66
+// case.
67
+//
68
+// Note that for more than one label value, this method is prone to mistakes
69
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
70
+// alternative to avoid that type of mistake. For higher label numbers, the
71
+// latter has a much more readable (albeit more verbose) syntax, but it comes
72
+// with a performance overhead (for creating and processing the Labels map).
73
+// See also the CounterVec example.
74
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
75
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
76
+
77
+	h, err := m.hashLabelValues(lvs)
78
+	if err != nil {
79
+		return false
80
+	}
81
+
82
+	return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
83
+}
84
+
85
+// Delete deletes the metric where the variable labels are the same as those
86
+// passed in as labels. It returns true if a metric was deleted.
87
+//
88
+// It is not an error if the number and names of the Labels are inconsistent
89
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
90
+// can never match an actual metric, so the method will always return false in
91
+// that case.
92
+//
93
+// This method is used for the same purpose as DeleteLabelValues(...string). See
94
+// there for pros and cons of the two methods.
95
+func (m *MetricVec) Delete(labels Labels) bool {
96
+	labels, closer := constrainLabels(m.desc, labels)
97
+	defer closer()
98
+
99
+	h, err := m.hashLabels(labels)
100
+	if err != nil {
101
+		return false
102
+	}
103
+
104
+	return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
105
+}
106
+
107
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
108
+// passed in as labels. The order of the labels does not matter.
109
+// It returns the number of metrics deleted.
110
+//
111
+// Note that curried labels will never be matched if deleting from the curried vector.
112
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
113
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
114
+	labels, closer := constrainLabels(m.desc, labels)
115
+	defer closer()
116
+
117
+	return m.metricMap.deleteByLabels(labels, m.curry)
118
+}
119
+
120
+// Without explicit forwarding of Describe, Collect, Reset, those methods won't
121
+// show up in GoDoc.
122
+
123
+// Describe implements Collector.
124
+func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
125
+
126
+// Collect implements Collector.
127
+func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
128
+
129
+// Reset deletes all metrics in this vector.
130
+func (m *MetricVec) Reset() { m.metricMap.Reset() }
131
+
132
+// CurryWith returns a vector curried with the provided labels, i.e. the
133
+// returned vector has those labels pre-set for all labeled operations performed
134
+// on it. The cardinality of the curried vector is reduced accordingly. The
135
+// order of the remaining labels stays the same (just with the curried labels
136
+// taken out of the sequence – which is relevant for the
137
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
138
+// vector, but only with labels not yet used for currying before.
139
+//
140
+// The metrics contained in the MetricVec are shared between the curried and
141
+// uncurried vectors. They are just accessed differently. Curried and uncurried
142
+// vectors behave identically in terms of collection. Only one must be
143
+// registered with a given registry (usually the uncurried version). The Reset
144
+// method deletes all metrics, even if called on a curried vector.
145
+//
146
+// Note that CurryWith is usually not called directly but through a wrapper
147
+// around MetricVec, implementing a vector for a specific Metric
148
+// implementation, for example GaugeVec.
149
+func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
150
+	var (
151
+		newCurry []curriedLabelValue
152
+		oldCurry = m.curry
153
+		iCurry   int
154
+	)
155
+	for i, labelName := range m.desc.variableLabels.names {
156
+		val, ok := labels[labelName]
157
+		if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
158
+			if ok {
159
+				return nil, fmt.Errorf("label name %q is already curried", labelName)
160
+			}
161
+			newCurry = append(newCurry, oldCurry[iCurry])
162
+			iCurry++
163
+		} else {
164
+			if !ok {
165
+				continue // Label stays uncurried.
166
+			}
167
+			newCurry = append(newCurry, curriedLabelValue{
168
+				i,
169
+				m.desc.variableLabels.constrain(labelName, val),
170
+			})
171
+		}
172
+	}
173
+	if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
174
+		return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
175
+	}
176
+
177
+	return &MetricVec{
178
+		metricMap:   m.metricMap,
179
+		curry:       newCurry,
180
+		hashAdd:     m.hashAdd,
181
+		hashAddByte: m.hashAddByte,
182
+	}, nil
183
+}
184
+
185
+// GetMetricWithLabelValues returns the Metric for the given slice of label
186
+// values (same order as the variable labels in Desc). If that combination of
187
+// label values is accessed for the first time, a new Metric is created (by
188
+// calling the newMetric function provided during construction of the
189
+// MetricVec).
190
+//
191
+// It is possible to call this method without using the returned Metric to only
192
+// create the new Metric but leave it in its initial state.
193
+//
194
+// Keeping the Metric for later use is possible (and should be considered if
195
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
196
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
197
+// Metric will still exist, but it will not be exported anymore, even if a
198
+// Metric with the same label values is created later.
199
+//
200
+// An error is returned if the number of label values is not the same as the
201
+// number of variable labels in Desc (minus any curried labels).
202
+//
203
+// Note that for more than one label value, this method is prone to mistakes
204
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
205
+// an alternative to avoid that type of mistake. For higher label numbers, the
206
+// latter has a much more readable (albeit more verbose) syntax, but it comes
207
+// with a performance overhead (for creating and processing the Labels map).
208
+//
209
+// Note that GetMetricWithLabelValues is usually not called directly but through
210
+// a wrapper around MetricVec, implementing a vector for a specific Metric
211
+// implementation, for example GaugeVec.
212
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
213
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
214
+	h, err := m.hashLabelValues(lvs)
215
+	if err != nil {
216
+		return nil, err
217
+	}
218
+
219
+	return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
220
+}
221
+
222
+// GetMetricWith returns the Metric for the given Labels map (the label names
223
+// must match those of the variable labels in Desc). If that label map is
224
+// accessed for the first time, a new Metric is created. Implications of
225
+// creating a Metric without using it and keeping the Metric for later use
226
+// are the same as for GetMetricWithLabelValues.
227
+//
228
+// An error is returned if the number and names of the Labels are inconsistent
229
+// with those of the variable labels in Desc (minus any curried labels).
230
+//
231
+// This method is used for the same purpose as
232
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
233
+// methods.
234
+//
235
+// Note that GetMetricWith is usually not called directly but through a wrapper
236
+// around MetricVec, implementing a vector for a specific Metric implementation,
237
+// for example GaugeVec.
238
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
239
+	labels, closer := constrainLabels(m.desc, labels)
240
+	defer closer()
241
+
242
+	h, err := m.hashLabels(labels)
243
+	if err != nil {
244
+		return nil, err
245
+	}
246
+
247
+	return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
248
+}
249
+
250
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
251
+	if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
252
+		return 0, err
253
+	}
254
+
255
+	var (
256
+		h             = hashNew()
257
+		curry         = m.curry
258
+		iVals, iCurry int
259
+	)
260
+	for i := 0; i < len(m.desc.variableLabels.names); i++ {
261
+		if iCurry < len(curry) && curry[iCurry].index == i {
262
+			h = m.hashAdd(h, curry[iCurry].value)
263
+			iCurry++
264
+		} else {
265
+			h = m.hashAdd(h, vals[iVals])
266
+			iVals++
267
+		}
268
+		h = m.hashAddByte(h, model.SeparatorByte)
269
+	}
270
+	return h, nil
271
+}
272
+
273
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
274
+	if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
275
+		return 0, err
276
+	}
277
+
278
+	var (
279
+		h      = hashNew()
280
+		curry  = m.curry
281
+		iCurry int
282
+	)
283
+	for i, labelName := range m.desc.variableLabels.names {
284
+		val, ok := labels[labelName]
285
+		if iCurry < len(curry) && curry[iCurry].index == i {
286
+			if ok {
287
+				return 0, fmt.Errorf("label name %q is already curried", labelName)
288
+			}
289
+			h = m.hashAdd(h, curry[iCurry].value)
290
+			iCurry++
291
+		} else {
292
+			if !ok {
293
+				return 0, fmt.Errorf("label name %q missing in label map", labelName)
294
+			}
295
+			h = m.hashAdd(h, val)
296
+		}
297
+		h = m.hashAddByte(h, model.SeparatorByte)
298
+	}
299
+	return h, nil
300
+}
301
+
302
+// metricWithLabelValues provides the metric and its label values for
303
+// disambiguation on hash collision.
304
+type metricWithLabelValues struct {
305
+	values []string
306
+	metric Metric
307
+}
308
+
309
+// curriedLabelValue sets the curried value for a label at the given index.
310
+type curriedLabelValue struct {
311
+	index int
312
+	value string
313
+}
314
+
315
+// metricMap is a helper for metricVec and shared between differently curried
316
+// metricVecs.
317
+type metricMap struct {
318
+	mtx       sync.RWMutex // Protects metrics.
319
+	metrics   map[uint64][]metricWithLabelValues
320
+	desc      *Desc
321
+	newMetric func(labelValues ...string) Metric
322
+}
323
+
324
+// Describe implements Collector. It will send exactly one Desc to the provided
325
+// channel.
326
+func (m *metricMap) Describe(ch chan<- *Desc) {
327
+	ch <- m.desc
328
+}
329
+
330
+// Collect implements Collector.
331
+func (m *metricMap) Collect(ch chan<- Metric) {
332
+	m.mtx.RLock()
333
+	defer m.mtx.RUnlock()
334
+
335
+	for _, metrics := range m.metrics {
336
+		for _, metric := range metrics {
337
+			ch <- metric.metric
338
+		}
339
+	}
340
+}
341
+
342
+// Reset deletes all metrics in this vector.
343
+func (m *metricMap) Reset() {
344
+	m.mtx.Lock()
345
+	defer m.mtx.Unlock()
346
+
347
+	for h := range m.metrics {
348
+		delete(m.metrics, h)
349
+	}
350
+}
351
+
352
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
353
+// there are multiple matches in the bucket, use lvs to select a metric and
354
+// remove only that metric.
355
+func (m *metricMap) deleteByHashWithLabelValues(
356
+	h uint64, lvs []string, curry []curriedLabelValue,
357
+) bool {
358
+	m.mtx.Lock()
359
+	defer m.mtx.Unlock()
360
+
361
+	metrics, ok := m.metrics[h]
362
+	if !ok {
363
+		return false
364
+	}
365
+
366
+	i := findMetricWithLabelValues(metrics, lvs, curry)
367
+	if i >= len(metrics) {
368
+		return false
369
+	}
370
+
371
+	if len(metrics) > 1 {
372
+		old := metrics
373
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
374
+		old[len(old)-1] = metricWithLabelValues{}
375
+	} else {
376
+		delete(m.metrics, h)
377
+	}
378
+	return true
379
+}
380
+
381
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
382
+// are multiple matches in the bucket, use lvs to select a metric and remove
383
+// only that metric.
384
+func (m *metricMap) deleteByHashWithLabels(
385
+	h uint64, labels Labels, curry []curriedLabelValue,
386
+) bool {
387
+	m.mtx.Lock()
388
+	defer m.mtx.Unlock()
389
+
390
+	metrics, ok := m.metrics[h]
391
+	if !ok {
392
+		return false
393
+	}
394
+	i := findMetricWithLabels(m.desc, metrics, labels, curry)
395
+	if i >= len(metrics) {
396
+		return false
397
+	}
398
+
399
+	if len(metrics) > 1 {
400
+		old := metrics
401
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
402
+		old[len(old)-1] = metricWithLabelValues{}
403
+	} else {
404
+		delete(m.metrics, h)
405
+	}
406
+	return true
407
+}
408
+
409
+// deleteByLabels deletes a metric if the given labels are present in the metric.
410
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
411
+	m.mtx.Lock()
412
+	defer m.mtx.Unlock()
413
+
414
+	var numDeleted int
415
+
416
+	for h, metrics := range m.metrics {
417
+		i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
418
+		if i >= len(metrics) {
419
+			// Didn't find matching labels in this metric slice.
420
+			continue
421
+		}
422
+		delete(m.metrics, h)
423
+		numDeleted++
424
+	}
425
+
426
+	return numDeleted
427
+}
428
+
429
+// findMetricWithPartialLabel returns the index of the matching metric or
430
+// len(metrics) if not found.
431
+func findMetricWithPartialLabels(
432
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
433
+) int {
434
+	for i, metric := range metrics {
435
+		if matchPartialLabels(desc, metric.values, labels, curry) {
436
+			return i
437
+		}
438
+	}
439
+	return len(metrics)
440
+}
441
+
442
+// indexOf searches the given slice of strings for the target string and returns
443
+// the index or len(items) as well as a boolean whether the search succeeded.
444
+func indexOf(target string, items []string) (int, bool) {
445
+	for i, l := range items {
446
+		if l == target {
447
+			return i, true
448
+		}
449
+	}
450
+	return len(items), false
451
+}
452
+
453
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
454
+// and returns whether it matches either the "base" value or the curried value accordingly.
455
+// It also indicates whether the match is against a curried or uncurried value.
456
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
457
+	for _, curriedValue := range curry {
458
+		if curriedValue.index == index {
459
+			// This label was curried. See if the curried value matches our target.
460
+			return curriedValue.value == targetValue, true
461
+		}
462
+	}
463
+	// This label was not curried. See if the current value matches our target label.
464
+	return values[index] == targetValue, false
465
+}
466
+
467
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
468
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
469
+	for l, v := range labels {
470
+		// Check if the target label exists in our metrics and get the index.
471
+		varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
472
+		if validLabel {
473
+			// Check the value of that label against the target value.
474
+			// We don't consider curried values in partial matches.
475
+			matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
476
+			if matches && !curried {
477
+				continue
478
+			}
479
+		}
480
+		return false
481
+	}
482
+	return true
483
+}
484
+
485
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
486
+// or creates it and returns the new one.
487
+//
488
+// This function holds the mutex.
489
+func (m *metricMap) getOrCreateMetricWithLabelValues(
490
+	hash uint64, lvs []string, curry []curriedLabelValue,
491
+) Metric {
492
+	m.mtx.RLock()
493
+	metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
494
+	m.mtx.RUnlock()
495
+	if ok {
496
+		return metric
497
+	}
498
+
499
+	m.mtx.Lock()
500
+	defer m.mtx.Unlock()
501
+	metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
502
+	if !ok {
503
+		inlinedLVs := inlineLabelValues(lvs, curry)
504
+		metric = m.newMetric(inlinedLVs...)
505
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
506
+	}
507
+	return metric
508
+}
509
+
510
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
511
+// or creates it and returns the new one.
512
+//
513
+// This function holds the mutex.
514
+func (m *metricMap) getOrCreateMetricWithLabels(
515
+	hash uint64, labels Labels, curry []curriedLabelValue,
516
+) Metric {
517
+	m.mtx.RLock()
518
+	metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
519
+	m.mtx.RUnlock()
520
+	if ok {
521
+		return metric
522
+	}
523
+
524
+	m.mtx.Lock()
525
+	defer m.mtx.Unlock()
526
+	metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
527
+	if !ok {
528
+		lvs := extractLabelValues(m.desc, labels, curry)
529
+		metric = m.newMetric(lvs...)
530
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
531
+	}
532
+	return metric
533
+}
534
+
535
+// getMetricWithHashAndLabelValues gets a metric while handling possible
536
+// collisions in the hash space. Must be called while holding the read mutex.
537
+func (m *metricMap) getMetricWithHashAndLabelValues(
538
+	h uint64, lvs []string, curry []curriedLabelValue,
539
+) (Metric, bool) {
540
+	metrics, ok := m.metrics[h]
541
+	if ok {
542
+		if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
543
+			return metrics[i].metric, true
544
+		}
545
+	}
546
+	return nil, false
547
+}
548
+
549
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
550
+// the hash space. Must be called while holding read mutex.
551
+func (m *metricMap) getMetricWithHashAndLabels(
552
+	h uint64, labels Labels, curry []curriedLabelValue,
553
+) (Metric, bool) {
554
+	metrics, ok := m.metrics[h]
555
+	if ok {
556
+		if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
557
+			return metrics[i].metric, true
558
+		}
559
+	}
560
+	return nil, false
561
+}
562
+
563
+// findMetricWithLabelValues returns the index of the matching metric or
564
+// len(metrics) if not found.
565
+func findMetricWithLabelValues(
566
+	metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
567
+) int {
568
+	for i, metric := range metrics {
569
+		if matchLabelValues(metric.values, lvs, curry) {
570
+			return i
571
+		}
572
+	}
573
+	return len(metrics)
574
+}
575
+
576
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
577
+// if not found.
578
+func findMetricWithLabels(
579
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
580
+) int {
581
+	for i, metric := range metrics {
582
+		if matchLabels(desc, metric.values, labels, curry) {
583
+			return i
584
+		}
585
+	}
586
+	return len(metrics)
587
+}
588
+
589
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
590
+	if len(values) != len(lvs)+len(curry) {
591
+		return false
592
+	}
593
+	var iLVs, iCurry int
594
+	for i, v := range values {
595
+		if iCurry < len(curry) && curry[iCurry].index == i {
596
+			if v != curry[iCurry].value {
597
+				return false
598
+			}
599
+			iCurry++
600
+			continue
601
+		}
602
+		if v != lvs[iLVs] {
603
+			return false
604
+		}
605
+		iLVs++
606
+	}
607
+	return true
608
+}
609
+
610
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
611
+	if len(values) != len(labels)+len(curry) {
612
+		return false
613
+	}
614
+	iCurry := 0
615
+	for i, k := range desc.variableLabels.names {
616
+		if iCurry < len(curry) && curry[iCurry].index == i {
617
+			if values[i] != curry[iCurry].value {
618
+				return false
619
+			}
620
+			iCurry++
621
+			continue
622
+		}
623
+		if values[i] != labels[k] {
624
+			return false
625
+		}
626
+	}
627
+	return true
628
+}
629
+
630
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
631
+	labelValues := make([]string, len(labels)+len(curry))
632
+	iCurry := 0
633
+	for i, k := range desc.variableLabels.names {
634
+		if iCurry < len(curry) && curry[iCurry].index == i {
635
+			labelValues[i] = curry[iCurry].value
636
+			iCurry++
637
+			continue
638
+		}
639
+		labelValues[i] = labels[k]
640
+	}
641
+	return labelValues
642
+}
643
+
644
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
645
+	labelValues := make([]string, len(lvs)+len(curry))
646
+	var iCurry, iLVs int
647
+	for i := range labelValues {
648
+		if iCurry < len(curry) && curry[iCurry].index == i {
649
+			labelValues[i] = curry[iCurry].value
650
+			iCurry++
651
+			continue
652
+		}
653
+		labelValues[i] = lvs[iLVs]
654
+		iLVs++
655
+	}
656
+	return labelValues
657
+}
658
+
659
+var labelsPool = &sync.Pool{
660
+	New: func() interface{} {
661
+		return make(Labels)
662
+	},
663
+}
664
+
665
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
666
+	if len(desc.variableLabels.labelConstraints) == 0 {
667
+		// Fast path when there's no constraints
668
+		return labels, func() {}
669
+	}
670
+
671
+	constrainedLabels := labelsPool.Get().(Labels)
672
+	for l, v := range labels {
673
+		constrainedLabels[l] = desc.variableLabels.constrain(l, v)
674
+	}
675
+
676
+	return constrainedLabels, func() {
677
+		for k := range constrainedLabels {
678
+			delete(constrainedLabels, k)
679
+		}
680
+		labelsPool.Put(constrainedLabels)
681
+	}
682
+}
683
+
684
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
685
+	if len(desc.variableLabels.labelConstraints) == 0 {
686
+		// Fast path when there's no constraints
687
+		return lvs
688
+	}
689
+
690
+	constrainedValues := make([]string, len(lvs))
691
+	var iCurry, iLVs int
692
+	for i := 0; i < len(lvs)+len(curry); i++ {
693
+		if iCurry < len(curry) && curry[iCurry].index == i {
694
+			iCurry++
695
+			continue
696
+		}
697
+
698
+		if i < len(desc.variableLabels.names) {
699
+			constrainedValues[iLVs] = desc.variableLabels.constrain(
700
+				desc.variableLabels.names[i],
701
+				lvs[iLVs],
702
+			)
703
+		} else {
704
+			constrainedValues[iLVs] = lvs[iLVs]
705
+		}
706
+		iLVs++
707
+	}
708
+	return constrainedValues
709
+}

+ 23
- 0
vendor/github.com/prometheus/client_golang/prometheus/vnext.go View File

@@ -0,0 +1,23 @@
1
+// Copyright 2022 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+type v2 struct{}
17
+
18
+// V2 is a struct that can be referenced to access experimental API that might
19
+// be present in v2 of client golang someday. It offers extended functionality
20
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
21
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
22
+// in the same codebase.
23
+var V2 = v2{}

+ 214
- 0
vendor/github.com/prometheus/client_golang/prometheus/wrap.go View File

@@ -0,0 +1,214 @@
1
+// Copyright 2018 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package prometheus
15
+
16
+import (
17
+	"fmt"
18
+	"sort"
19
+
20
+	"github.com/prometheus/client_golang/prometheus/internal"
21
+
22
+	dto "github.com/prometheus/client_model/go"
23
+	"google.golang.org/protobuf/proto"
24
+)
25
+
26
+// WrapRegistererWith returns a Registerer wrapping the provided
27
+// Registerer. Collectors registered with the returned Registerer will be
28
+// registered with the wrapped Registerer in a modified way. The modified
29
+// Collector adds the provided Labels to all Metrics it collects (as
30
+// ConstLabels). The Metrics collected by the unmodified Collector must not
31
+// duplicate any of those labels. Wrapping a nil value is valid, resulting
32
+// in a no-op Registerer.
33
+//
34
+// WrapRegistererWith provides a way to add fixed labels to a subset of
35
+// Collectors. It should not be used to add fixed labels to all metrics
36
+// exposed. See also
37
+// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
38
+//
39
+// Conflicts between Collectors registered through the original Registerer with
40
+// Collectors registered through the wrapping Registerer will still be
41
+// detected. Any AlreadyRegisteredError returned by the Register method of
42
+// either Registerer will contain the ExistingCollector in the form it was
43
+// provided to the respective registry.
44
+//
45
+// The Collector example demonstrates a use of WrapRegistererWith.
46
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
47
+	return &wrappingRegisterer{
48
+		wrappedRegisterer: reg,
49
+		labels:            labels,
50
+	}
51
+}
52
+
53
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
54
+// Registerer. Collectors registered with the returned Registerer will be
55
+// registered with the wrapped Registerer in a modified way. The modified
56
+// Collector adds the provided prefix to the name of all Metrics it collects.
57
+// Wrapping a nil value is valid, resulting in a no-op Registerer.
58
+//
59
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
60
+// a sub-system. To make this work, register metrics of the sub-system with the
61
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
62
+// to use the same prefix for all metrics exposed. In particular, do not prefix
63
+// metric names that are standardized across applications, as that would break
64
+// horizontal monitoring, for example the metrics provided by the Go collector
65
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
66
+// fact, those metrics are already prefixed with “go_” or “process_”,
67
+// respectively.)
68
+//
69
+// Conflicts between Collectors registered through the original Registerer with
70
+// Collectors registered through the wrapping Registerer will still be
71
+// detected. Any AlreadyRegisteredError returned by the Register method of
72
+// either Registerer will contain the ExistingCollector in the form it was
73
+// provided to the respective registry.
74
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
75
+	return &wrappingRegisterer{
76
+		wrappedRegisterer: reg,
77
+		prefix:            prefix,
78
+	}
79
+}
80
+
81
+type wrappingRegisterer struct {
82
+	wrappedRegisterer Registerer
83
+	prefix            string
84
+	labels            Labels
85
+}
86
+
87
+func (r *wrappingRegisterer) Register(c Collector) error {
88
+	if r.wrappedRegisterer == nil {
89
+		return nil
90
+	}
91
+	return r.wrappedRegisterer.Register(&wrappingCollector{
92
+		wrappedCollector: c,
93
+		prefix:           r.prefix,
94
+		labels:           r.labels,
95
+	})
96
+}
97
+
98
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
99
+	if r.wrappedRegisterer == nil {
100
+		return
101
+	}
102
+	for _, c := range cs {
103
+		if err := r.Register(c); err != nil {
104
+			panic(err)
105
+		}
106
+	}
107
+}
108
+
109
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
110
+	if r.wrappedRegisterer == nil {
111
+		return false
112
+	}
113
+	return r.wrappedRegisterer.Unregister(&wrappingCollector{
114
+		wrappedCollector: c,
115
+		prefix:           r.prefix,
116
+		labels:           r.labels,
117
+	})
118
+}
119
+
120
+type wrappingCollector struct {
121
+	wrappedCollector Collector
122
+	prefix           string
123
+	labels           Labels
124
+}
125
+
126
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
127
+	wrappedCh := make(chan Metric)
128
+	go func() {
129
+		c.wrappedCollector.Collect(wrappedCh)
130
+		close(wrappedCh)
131
+	}()
132
+	for m := range wrappedCh {
133
+		ch <- &wrappingMetric{
134
+			wrappedMetric: m,
135
+			prefix:        c.prefix,
136
+			labels:        c.labels,
137
+		}
138
+	}
139
+}
140
+
141
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
142
+	wrappedCh := make(chan *Desc)
143
+	go func() {
144
+		c.wrappedCollector.Describe(wrappedCh)
145
+		close(wrappedCh)
146
+	}()
147
+	for desc := range wrappedCh {
148
+		ch <- wrapDesc(desc, c.prefix, c.labels)
149
+	}
150
+}
151
+
152
+func (c *wrappingCollector) unwrapRecursively() Collector {
153
+	switch wc := c.wrappedCollector.(type) {
154
+	case *wrappingCollector:
155
+		return wc.unwrapRecursively()
156
+	default:
157
+		return wc
158
+	}
159
+}
160
+
161
+type wrappingMetric struct {
162
+	wrappedMetric Metric
163
+	prefix        string
164
+	labels        Labels
165
+}
166
+
167
+func (m *wrappingMetric) Desc() *Desc {
168
+	return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
169
+}
170
+
171
+func (m *wrappingMetric) Write(out *dto.Metric) error {
172
+	if err := m.wrappedMetric.Write(out); err != nil {
173
+		return err
174
+	}
175
+	if len(m.labels) == 0 {
176
+		// No wrapping labels.
177
+		return nil
178
+	}
179
+	for ln, lv := range m.labels {
180
+		out.Label = append(out.Label, &dto.LabelPair{
181
+			Name:  proto.String(ln),
182
+			Value: proto.String(lv),
183
+		})
184
+	}
185
+	sort.Sort(internal.LabelPairSorter(out.Label))
186
+	return nil
187
+}
188
+
189
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
190
+	constLabels := Labels{}
191
+	for _, lp := range desc.constLabelPairs {
192
+		constLabels[*lp.Name] = *lp.Value
193
+	}
194
+	for ln, lv := range labels {
195
+		if _, alreadyUsed := constLabels[ln]; alreadyUsed {
196
+			return &Desc{
197
+				fqName:          desc.fqName,
198
+				help:            desc.help,
199
+				variableLabels:  desc.variableLabels,
200
+				constLabelPairs: desc.constLabelPairs,
201
+				err:             fmt.Errorf("attempted wrapping with already existing label name %q", ln),
202
+			}
203
+		}
204
+		constLabels[ln] = lv
205
+	}
206
+	// NewDesc will do remaining validations.
207
+	newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
208
+	// Propagate errors if there was any. This will override any errer
209
+	// created by NewDesc above, i.e. earlier errors get precedence.
210
+	if desc.err != nil {
211
+		newDesc.err = desc.err
212
+	}
213
+	return newDesc
214
+}

+ 201
- 0
vendor/github.com/prometheus/client_model/LICENSE View File

@@ -0,0 +1,201 @@
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        http://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   APPENDIX: How to apply the Apache License to your work.
179
+
180
+      To apply the Apache License to your work, attach the following
181
+      boilerplate notice, with the fields enclosed by brackets "[]"
182
+      replaced with your own identifying information. (Don't include
183
+      the brackets!)  The text should be enclosed in the appropriate
184
+      comment syntax for the file format. We also recommend that a
185
+      file or class name and description of purpose be included on the
186
+      same "printed page" as the copyright notice for easier
187
+      identification within third-party archives.
188
+
189
+   Copyright [yyyy] [name of copyright owner]
190
+
191
+   Licensed under the Apache License, Version 2.0 (the "License");
192
+   you may not use this file except in compliance with the License.
193
+   You may obtain a copy of the License at
194
+
195
+       http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+   Unless required by applicable law or agreed to in writing, software
198
+   distributed under the License is distributed on an "AS IS" BASIS,
199
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+   See the License for the specific language governing permissions and
201
+   limitations under the License.

+ 5
- 0
vendor/github.com/prometheus/client_model/NOTICE View File

@@ -0,0 +1,5 @@
1
+Data model artifacts for Prometheus.
2
+Copyright 2012-2015 The Prometheus Authors
3
+
4
+This product includes software developed at
5
+SoundCloud Ltd. (http://soundcloud.com/).

+ 1373
- 0
vendor/github.com/prometheus/client_model/go/metrics.pb.go
File diff suppressed because it is too large
View File


+ 201
- 0
vendor/github.com/prometheus/common/LICENSE View File

@@ -0,0 +1,201 @@
1
+                                 Apache License
2
+                           Version 2.0, January 2004
3
+                        http://www.apache.org/licenses/
4
+
5
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+   1. Definitions.
8
+
9
+      "License" shall mean the terms and conditions for use, reproduction,
10
+      and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+      "Licensor" shall mean the copyright owner or entity authorized by
13
+      the copyright owner that is granting the License.
14
+
15
+      "Legal Entity" shall mean the union of the acting entity and all
16
+      other entities that control, are controlled by, or are under common
17
+      control with that entity. For the purposes of this definition,
18
+      "control" means (i) the power, direct or indirect, to cause the
19
+      direction or management of such entity, whether by contract or
20
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+      outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+      "You" (or "Your") shall mean an individual or Legal Entity
24
+      exercising permissions granted by this License.
25
+
26
+      "Source" form shall mean the preferred form for making modifications,
27
+      including but not limited to software source code, documentation
28
+      source, and configuration files.
29
+
30
+      "Object" form shall mean any form resulting from mechanical
31
+      transformation or translation of a Source form, including but
32
+      not limited to compiled object code, generated documentation,
33
+      and conversions to other media types.
34
+
35
+      "Work" shall mean the work of authorship, whether in Source or
36
+      Object form, made available under the License, as indicated by a
37
+      copyright notice that is included in or attached to the work
38
+      (an example is provided in the Appendix below).
39
+
40
+      "Derivative Works" shall mean any work, whether in Source or Object
41
+      form, that is based on (or derived from) the Work and for which the
42
+      editorial revisions, annotations, elaborations, or other modifications
43
+      represent, as a whole, an original work of authorship. For the purposes
44
+      of this License, Derivative Works shall not include works that remain
45
+      separable from, or merely link (or bind by name) to the interfaces of,
46
+      the Work and Derivative Works thereof.
47
+
48
+      "Contribution" shall mean any work of authorship, including
49
+      the original version of the Work and any modifications or additions
50
+      to that Work or Derivative Works thereof, that is intentionally
51
+      submitted to Licensor for inclusion in the Work by the copyright owner
52
+      or by an individual or Legal Entity authorized to submit on behalf of
53
+      the copyright owner. For the purposes of this definition, "submitted"
54
+      means any form of electronic, verbal, or written communication sent
55
+      to the Licensor or its representatives, including but not limited to
56
+      communication on electronic mailing lists, source code control systems,
57
+      and issue tracking systems that are managed by, or on behalf of, the
58
+      Licensor for the purpose of discussing and improving the Work, but
59
+      excluding communication that is conspicuously marked or otherwise
60
+      designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+      "Contributor" shall mean Licensor and any individual or Legal Entity
63
+      on behalf of whom a Contribution has been received by Licensor and
64
+      subsequently incorporated within the Work.
65
+
66
+   2. Grant of Copyright License. Subject to the terms and conditions of
67
+      this License, each Contributor hereby grants to You a perpetual,
68
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+      copyright license to reproduce, prepare Derivative Works of,
70
+      publicly display, publicly perform, sublicense, and distribute the
71
+      Work and such Derivative Works in Source or Object form.
72
+
73
+   3. Grant of Patent License. Subject to the terms and conditions of
74
+      this License, each Contributor hereby grants to You a perpetual,
75
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+      (except as stated in this section) patent license to make, have made,
77
+      use, offer to sell, sell, import, and otherwise transfer the Work,
78
+      where such license applies only to those patent claims licensable
79
+      by such Contributor that are necessarily infringed by their
80
+      Contribution(s) alone or by combination of their Contribution(s)
81
+      with the Work to which such Contribution(s) was submitted. If You
82
+      institute patent litigation against any entity (including a
83
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+      or a Contribution incorporated within the Work constitutes direct
85
+      or contributory patent infringement, then any patent licenses
86
+      granted to You under this License for that Work shall terminate
87
+      as of the date such litigation is filed.
88
+
89
+   4. Redistribution. You may reproduce and distribute copies of the
90
+      Work or Derivative Works thereof in any medium, with or without
91
+      modifications, and in Source or Object form, provided that You
92
+      meet the following conditions:
93
+
94
+      (a) You must give any other recipients of the Work or
95
+          Derivative Works a copy of this License; and
96
+
97
+      (b) You must cause any modified files to carry prominent notices
98
+          stating that You changed the files; and
99
+
100
+      (c) You must retain, in the Source form of any Derivative Works
101
+          that You distribute, all copyright, patent, trademark, and
102
+          attribution notices from the Source form of the Work,
103
+          excluding those notices that do not pertain to any part of
104
+          the Derivative Works; and
105
+
106
+      (d) If the Work includes a "NOTICE" text file as part of its
107
+          distribution, then any Derivative Works that You distribute must
108
+          include a readable copy of the attribution notices contained
109
+          within such NOTICE file, excluding those notices that do not
110
+          pertain to any part of the Derivative Works, in at least one
111
+          of the following places: within a NOTICE text file distributed
112
+          as part of the Derivative Works; within the Source form or
113
+          documentation, if provided along with the Derivative Works; or,
114
+          within a display generated by the Derivative Works, if and
115
+          wherever such third-party notices normally appear. The contents
116
+          of the NOTICE file are for informational purposes only and
117
+          do not modify the License. You may add Your own attribution
118
+          notices within Derivative Works that You distribute, alongside
119
+          or as an addendum to the NOTICE text from the Work, provided
120
+          that such additional attribution notices cannot be construed
121
+          as modifying the License.
122
+
123
+      You may add Your own copyright statement to Your modifications and
124
+      may provide additional or different license terms and conditions
125
+      for use, reproduction, or distribution of Your modifications, or
126
+      for any such Derivative Works as a whole, provided Your use,
127
+      reproduction, and distribution of the Work otherwise complies with
128
+      the conditions stated in this License.
129
+
130
+   5. Submission of Contributions. Unless You explicitly state otherwise,
131
+      any Contribution intentionally submitted for inclusion in the Work
132
+      by You to the Licensor shall be under the terms and conditions of
133
+      this License, without any additional terms or conditions.
134
+      Notwithstanding the above, nothing herein shall supersede or modify
135
+      the terms of any separate license agreement you may have executed
136
+      with Licensor regarding such Contributions.
137
+
138
+   6. Trademarks. This License does not grant permission to use the trade
139
+      names, trademarks, service marks, or product names of the Licensor,
140
+      except as required for reasonable and customary use in describing the
141
+      origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+   7. Disclaimer of Warranty. Unless required by applicable law or
144
+      agreed to in writing, Licensor provides the Work (and each
145
+      Contributor provides its Contributions) on an "AS IS" BASIS,
146
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+      implied, including, without limitation, any warranties or conditions
148
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+      PARTICULAR PURPOSE. You are solely responsible for determining the
150
+      appropriateness of using or redistributing the Work and assume any
151
+      risks associated with Your exercise of permissions under this License.
152
+
153
+   8. Limitation of Liability. In no event and under no legal theory,
154
+      whether in tort (including negligence), contract, or otherwise,
155
+      unless required by applicable law (such as deliberate and grossly
156
+      negligent acts) or agreed to in writing, shall any Contributor be
157
+      liable to You for damages, including any direct, indirect, special,
158
+      incidental, or consequential damages of any character arising as a
159
+      result of this License or out of the use or inability to use the
160
+      Work (including but not limited to damages for loss of goodwill,
161
+      work stoppage, computer failure or malfunction, or any and all
162
+      other commercial damages or losses), even if such Contributor
163
+      has been advised of the possibility of such damages.
164
+
165
+   9. Accepting Warranty or Additional Liability. While redistributing
166
+      the Work or Derivative Works thereof, You may choose to offer,
167
+      and charge a fee for, acceptance of support, warranty, indemnity,
168
+      or other liability obligations and/or rights consistent with this
169
+      License. However, in accepting such obligations, You may act only
170
+      on Your own behalf and on Your sole responsibility, not on behalf
171
+      of any other Contributor, and only if You agree to indemnify,
172
+      defend, and hold each Contributor harmless for any liability
173
+      incurred by, or claims asserted against, such Contributor by reason
174
+      of your accepting any such warranty or additional liability.
175
+
176
+   END OF TERMS AND CONDITIONS
177
+
178
+   APPENDIX: How to apply the Apache License to your work.
179
+
180
+      To apply the Apache License to your work, attach the following
181
+      boilerplate notice, with the fields enclosed by brackets "[]"
182
+      replaced with your own identifying information. (Don't include
183
+      the brackets!)  The text should be enclosed in the appropriate
184
+      comment syntax for the file format. We also recommend that a
185
+      file or class name and description of purpose be included on the
186
+      same "printed page" as the copyright notice for easier
187
+      identification within third-party archives.
188
+
189
+   Copyright [yyyy] [name of copyright owner]
190
+
191
+   Licensed under the Apache License, Version 2.0 (the "License");
192
+   you may not use this file except in compliance with the License.
193
+   You may obtain a copy of the License at
194
+
195
+       http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+   Unless required by applicable law or agreed to in writing, software
198
+   distributed under the License is distributed on an "AS IS" BASIS,
199
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+   See the License for the specific language governing permissions and
201
+   limitations under the License.

+ 5
- 0
vendor/github.com/prometheus/common/NOTICE View File

@@ -0,0 +1,5 @@
1
+Common libraries shared by Prometheus Go components.
2
+Copyright 2015 The Prometheus Authors
3
+
4
+This product includes software developed at
5
+SoundCloud Ltd. (http://soundcloud.com/).

+ 428
- 0
vendor/github.com/prometheus/common/expfmt/decode.go View File

@@ -0,0 +1,428 @@
1
+// Copyright 2015 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package expfmt
15
+
16
+import (
17
+	"fmt"
18
+	"io"
19
+	"math"
20
+	"mime"
21
+	"net/http"
22
+
23
+	dto "github.com/prometheus/client_model/go"
24
+
25
+	"github.com/matttproud/golang_protobuf_extensions/pbutil"
26
+	"github.com/prometheus/common/model"
27
+)
28
+
29
+// Decoder types decode an input stream into metric families.
30
+type Decoder interface {
31
+	Decode(*dto.MetricFamily) error
32
+}
33
+
34
+// DecodeOptions contains options used by the Decoder and in sample extraction.
35
+type DecodeOptions struct {
36
+	// Timestamp is added to each value from the stream that has no explicit timestamp set.
37
+	Timestamp model.Time
38
+}
39
+
40
+// ResponseFormat extracts the correct format from a HTTP response header.
41
+// If no matching format can be found FormatUnknown is returned.
42
+func ResponseFormat(h http.Header) Format {
43
+	ct := h.Get(hdrContentType)
44
+
45
+	mediatype, params, err := mime.ParseMediaType(ct)
46
+	if err != nil {
47
+		return FmtUnknown
48
+	}
49
+
50
+	const textType = "text/plain"
51
+
52
+	switch mediatype {
53
+	case ProtoType:
54
+		if p, ok := params["proto"]; ok && p != ProtoProtocol {
55
+			return FmtUnknown
56
+		}
57
+		if e, ok := params["encoding"]; ok && e != "delimited" {
58
+			return FmtUnknown
59
+		}
60
+		return FmtProtoDelim
61
+
62
+	case textType:
63
+		if v, ok := params["version"]; ok && v != TextVersion {
64
+			return FmtUnknown
65
+		}
66
+		return FmtText
67
+	}
68
+
69
+	return FmtUnknown
70
+}
71
+
72
+// NewDecoder returns a new decoder based on the given input format.
73
+// If the input format does not imply otherwise, a text format decoder is returned.
74
+func NewDecoder(r io.Reader, format Format) Decoder {
75
+	switch format {
76
+	case FmtProtoDelim:
77
+		return &protoDecoder{r: r}
78
+	}
79
+	return &textDecoder{r: r}
80
+}
81
+
82
+// protoDecoder implements the Decoder interface for protocol buffers.
83
+type protoDecoder struct {
84
+	r io.Reader
85
+}
86
+
87
+// Decode implements the Decoder interface.
88
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
89
+	_, err := pbutil.ReadDelimited(d.r, v)
90
+	if err != nil {
91
+		return err
92
+	}
93
+	if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
94
+		return fmt.Errorf("invalid metric name %q", v.GetName())
95
+	}
96
+	for _, m := range v.GetMetric() {
97
+		if m == nil {
98
+			continue
99
+		}
100
+		for _, l := range m.GetLabel() {
101
+			if l == nil {
102
+				continue
103
+			}
104
+			if !model.LabelValue(l.GetValue()).IsValid() {
105
+				return fmt.Errorf("invalid label value %q", l.GetValue())
106
+			}
107
+			if !model.LabelName(l.GetName()).IsValid() {
108
+				return fmt.Errorf("invalid label name %q", l.GetName())
109
+			}
110
+		}
111
+	}
112
+	return nil
113
+}
114
+
115
+// textDecoder implements the Decoder interface for the text protocol.
116
+type textDecoder struct {
117
+	r    io.Reader
118
+	fams map[string]*dto.MetricFamily
119
+	err  error
120
+}
121
+
122
+// Decode implements the Decoder interface.
123
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
124
+	if d.err == nil {
125
+		// Read all metrics in one shot.
126
+		var p TextParser
127
+		d.fams, d.err = p.TextToMetricFamilies(d.r)
128
+		// If we don't get an error, store io.EOF for the end.
129
+		if d.err == nil {
130
+			d.err = io.EOF
131
+		}
132
+	}
133
+	// Pick off one MetricFamily per Decode until there's nothing left.
134
+	for key, fam := range d.fams {
135
+		v.Name = fam.Name
136
+		v.Help = fam.Help
137
+		v.Type = fam.Type
138
+		v.Metric = fam.Metric
139
+		delete(d.fams, key)
140
+		return nil
141
+	}
142
+	return d.err
143
+}
144
+
145
+// SampleDecoder wraps a Decoder to extract samples from the metric families
146
+// decoded by the wrapped Decoder.
147
+type SampleDecoder struct {
148
+	Dec  Decoder
149
+	Opts *DecodeOptions
150
+
151
+	f dto.MetricFamily
152
+}
153
+
154
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
155
+// samples from the decoded MetricFamily into the provided model.Vector.
156
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
157
+	err := sd.Dec.Decode(&sd.f)
158
+	if err != nil {
159
+		return err
160
+	}
161
+	*s, err = extractSamples(&sd.f, sd.Opts)
162
+	return err
163
+}
164
+
165
+// ExtractSamples builds a slice of samples from the provided metric
166
+// families. If an error occurs during sample extraction, it continues to
167
+// extract from the remaining metric families. The returned error is the last
168
+// error that has occurred.
169
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
170
+	var (
171
+		all     model.Vector
172
+		lastErr error
173
+	)
174
+	for _, f := range fams {
175
+		some, err := extractSamples(f, o)
176
+		if err != nil {
177
+			lastErr = err
178
+			continue
179
+		}
180
+		all = append(all, some...)
181
+	}
182
+	return all, lastErr
183
+}
184
+
185
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
186
+	switch f.GetType() {
187
+	case dto.MetricType_COUNTER:
188
+		return extractCounter(o, f), nil
189
+	case dto.MetricType_GAUGE:
190
+		return extractGauge(o, f), nil
191
+	case dto.MetricType_SUMMARY:
192
+		return extractSummary(o, f), nil
193
+	case dto.MetricType_UNTYPED:
194
+		return extractUntyped(o, f), nil
195
+	case dto.MetricType_HISTOGRAM:
196
+		return extractHistogram(o, f), nil
197
+	}
198
+	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
199
+}
200
+
201
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
202
+	samples := make(model.Vector, 0, len(f.Metric))
203
+
204
+	for _, m := range f.Metric {
205
+		if m.Counter == nil {
206
+			continue
207
+		}
208
+
209
+		lset := make(model.LabelSet, len(m.Label)+1)
210
+		for _, p := range m.Label {
211
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
212
+		}
213
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
214
+
215
+		smpl := &model.Sample{
216
+			Metric: model.Metric(lset),
217
+			Value:  model.SampleValue(m.Counter.GetValue()),
218
+		}
219
+
220
+		if m.TimestampMs != nil {
221
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
222
+		} else {
223
+			smpl.Timestamp = o.Timestamp
224
+		}
225
+
226
+		samples = append(samples, smpl)
227
+	}
228
+
229
+	return samples
230
+}
231
+
232
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
233
+	samples := make(model.Vector, 0, len(f.Metric))
234
+
235
+	for _, m := range f.Metric {
236
+		if m.Gauge == nil {
237
+			continue
238
+		}
239
+
240
+		lset := make(model.LabelSet, len(m.Label)+1)
241
+		for _, p := range m.Label {
242
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
243
+		}
244
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
245
+
246
+		smpl := &model.Sample{
247
+			Metric: model.Metric(lset),
248
+			Value:  model.SampleValue(m.Gauge.GetValue()),
249
+		}
250
+
251
+		if m.TimestampMs != nil {
252
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
253
+		} else {
254
+			smpl.Timestamp = o.Timestamp
255
+		}
256
+
257
+		samples = append(samples, smpl)
258
+	}
259
+
260
+	return samples
261
+}
262
+
263
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
264
+	samples := make(model.Vector, 0, len(f.Metric))
265
+
266
+	for _, m := range f.Metric {
267
+		if m.Untyped == nil {
268
+			continue
269
+		}
270
+
271
+		lset := make(model.LabelSet, len(m.Label)+1)
272
+		for _, p := range m.Label {
273
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
274
+		}
275
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
276
+
277
+		smpl := &model.Sample{
278
+			Metric: model.Metric(lset),
279
+			Value:  model.SampleValue(m.Untyped.GetValue()),
280
+		}
281
+
282
+		if m.TimestampMs != nil {
283
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
284
+		} else {
285
+			smpl.Timestamp = o.Timestamp
286
+		}
287
+
288
+		samples = append(samples, smpl)
289
+	}
290
+
291
+	return samples
292
+}
293
+
294
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
295
+	samples := make(model.Vector, 0, len(f.Metric))
296
+
297
+	for _, m := range f.Metric {
298
+		if m.Summary == nil {
299
+			continue
300
+		}
301
+
302
+		timestamp := o.Timestamp
303
+		if m.TimestampMs != nil {
304
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
305
+		}
306
+
307
+		for _, q := range m.Summary.Quantile {
308
+			lset := make(model.LabelSet, len(m.Label)+2)
309
+			for _, p := range m.Label {
310
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
311
+			}
312
+			// BUG(matt): Update other names to "quantile".
313
+			lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
314
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
315
+
316
+			samples = append(samples, &model.Sample{
317
+				Metric:    model.Metric(lset),
318
+				Value:     model.SampleValue(q.GetValue()),
319
+				Timestamp: timestamp,
320
+			})
321
+		}
322
+
323
+		lset := make(model.LabelSet, len(m.Label)+1)
324
+		for _, p := range m.Label {
325
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
326
+		}
327
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
328
+
329
+		samples = append(samples, &model.Sample{
330
+			Metric:    model.Metric(lset),
331
+			Value:     model.SampleValue(m.Summary.GetSampleSum()),
332
+			Timestamp: timestamp,
333
+		})
334
+
335
+		lset = make(model.LabelSet, len(m.Label)+1)
336
+		for _, p := range m.Label {
337
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
338
+		}
339
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
340
+
341
+		samples = append(samples, &model.Sample{
342
+			Metric:    model.Metric(lset),
343
+			Value:     model.SampleValue(m.Summary.GetSampleCount()),
344
+			Timestamp: timestamp,
345
+		})
346
+	}
347
+
348
+	return samples
349
+}
350
+
351
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
352
+	samples := make(model.Vector, 0, len(f.Metric))
353
+
354
+	for _, m := range f.Metric {
355
+		if m.Histogram == nil {
356
+			continue
357
+		}
358
+
359
+		timestamp := o.Timestamp
360
+		if m.TimestampMs != nil {
361
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
362
+		}
363
+
364
+		infSeen := false
365
+
366
+		for _, q := range m.Histogram.Bucket {
367
+			lset := make(model.LabelSet, len(m.Label)+2)
368
+			for _, p := range m.Label {
369
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
370
+			}
371
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
372
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
373
+
374
+			if math.IsInf(q.GetUpperBound(), +1) {
375
+				infSeen = true
376
+			}
377
+
378
+			samples = append(samples, &model.Sample{
379
+				Metric:    model.Metric(lset),
380
+				Value:     model.SampleValue(q.GetCumulativeCount()),
381
+				Timestamp: timestamp,
382
+			})
383
+		}
384
+
385
+		lset := make(model.LabelSet, len(m.Label)+1)
386
+		for _, p := range m.Label {
387
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
388
+		}
389
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
390
+
391
+		samples = append(samples, &model.Sample{
392
+			Metric:    model.Metric(lset),
393
+			Value:     model.SampleValue(m.Histogram.GetSampleSum()),
394
+			Timestamp: timestamp,
395
+		})
396
+
397
+		lset = make(model.LabelSet, len(m.Label)+1)
398
+		for _, p := range m.Label {
399
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
400
+		}
401
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
402
+
403
+		count := &model.Sample{
404
+			Metric:    model.Metric(lset),
405
+			Value:     model.SampleValue(m.Histogram.GetSampleCount()),
406
+			Timestamp: timestamp,
407
+		}
408
+		samples = append(samples, count)
409
+
410
+		if !infSeen {
411
+			// Append an infinity bucket sample.
412
+			lset := make(model.LabelSet, len(m.Label)+2)
413
+			for _, p := range m.Label {
414
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
415
+			}
416
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
417
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
418
+
419
+			samples = append(samples, &model.Sample{
420
+				Metric:    model.Metric(lset),
421
+				Value:     count.Value,
422
+				Timestamp: timestamp,
423
+			})
424
+		}
425
+	}
426
+
427
+	return samples
428
+}

+ 165
- 0
vendor/github.com/prometheus/common/expfmt/encode.go View File

@@ -0,0 +1,165 @@
1
+// Copyright 2015 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package expfmt
15
+
16
+import (
17
+	"fmt"
18
+	"io"
19
+	"net/http"
20
+
21
+	"github.com/matttproud/golang_protobuf_extensions/pbutil"
22
+	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
23
+	"google.golang.org/protobuf/encoding/prototext"
24
+
25
+	dto "github.com/prometheus/client_model/go"
26
+)
27
+
28
+// Encoder types encode metric families into an underlying wire protocol.
29
+type Encoder interface {
30
+	Encode(*dto.MetricFamily) error
31
+}
32
+
33
+// Closer is implemented by Encoders that need to be closed to finalize
34
+// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
35
+//
36
+// Note that all Encoder implementations returned from this package implement
37
+// Closer, too, even if the Close call is a no-op. This happens in preparation
38
+// for adding a Close method to the Encoder interface directly in a (mildly
39
+// breaking) release in the future.
40
+type Closer interface {
41
+	Close() error
42
+}
43
+
44
+type encoderCloser struct {
45
+	encode func(*dto.MetricFamily) error
46
+	close  func() error
47
+}
48
+
49
+func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
50
+	return ec.encode(v)
51
+}
52
+
53
+func (ec encoderCloser) Close() error {
54
+	return ec.close()
55
+}
56
+
57
+// Negotiate returns the Content-Type based on the given Accept header. If no
58
+// appropriate accepted type is found, FmtText is returned (which is the
59
+// Prometheus text format). This function will never negotiate FmtOpenMetrics,
60
+// as the support is still experimental. To include the option to negotiate
61
+// FmtOpenMetrics, use NegotiateOpenMetrics.
62
+func Negotiate(h http.Header) Format {
63
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
64
+		ver := ac.Params["version"]
65
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
66
+			switch ac.Params["encoding"] {
67
+			case "delimited":
68
+				return FmtProtoDelim
69
+			case "text":
70
+				return FmtProtoText
71
+			case "compact-text":
72
+				return FmtProtoCompact
73
+			}
74
+		}
75
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
76
+			return FmtText
77
+		}
78
+	}
79
+	return FmtText
80
+}
81
+
82
+// NegotiateIncludingOpenMetrics works like Negotiate but includes
83
+// FmtOpenMetrics as an option for the result. Note that this function is
84
+// temporary and will disappear once FmtOpenMetrics is fully supported and as
85
+// such may be negotiated by the normal Negotiate function.
86
+func NegotiateIncludingOpenMetrics(h http.Header) Format {
87
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
88
+		ver := ac.Params["version"]
89
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
90
+			switch ac.Params["encoding"] {
91
+			case "delimited":
92
+				return FmtProtoDelim
93
+			case "text":
94
+				return FmtProtoText
95
+			case "compact-text":
96
+				return FmtProtoCompact
97
+			}
98
+		}
99
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
100
+			return FmtText
101
+		}
102
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
103
+			if ver == OpenMetricsVersion_1_0_0 {
104
+				return FmtOpenMetrics_1_0_0
105
+			}
106
+			return FmtOpenMetrics_0_0_1
107
+		}
108
+	}
109
+	return FmtText
110
+}
111
+
112
+// NewEncoder returns a new encoder based on content type negotiation. All
113
+// Encoder implementations returned by NewEncoder also implement Closer, and
114
+// callers should always call the Close method. It is currently only required
115
+// for FmtOpenMetrics, but a future (breaking) release will add the Close method
116
+// to the Encoder interface directly. The current version of the Encoder
117
+// interface is kept for backwards compatibility.
118
+func NewEncoder(w io.Writer, format Format) Encoder {
119
+	switch format {
120
+	case FmtProtoDelim:
121
+		return encoderCloser{
122
+			encode: func(v *dto.MetricFamily) error {
123
+				_, err := pbutil.WriteDelimited(w, v)
124
+				return err
125
+			},
126
+			close: func() error { return nil },
127
+		}
128
+	case FmtProtoCompact:
129
+		return encoderCloser{
130
+			encode: func(v *dto.MetricFamily) error {
131
+				_, err := fmt.Fprintln(w, v.String())
132
+				return err
133
+			},
134
+			close: func() error { return nil },
135
+		}
136
+	case FmtProtoText:
137
+		return encoderCloser{
138
+			encode: func(v *dto.MetricFamily) error {
139
+				_, err := fmt.Fprintln(w, prototext.Format(v))
140
+				return err
141
+			},
142
+			close: func() error { return nil },
143
+		}
144
+	case FmtText:
145
+		return encoderCloser{
146
+			encode: func(v *dto.MetricFamily) error {
147
+				_, err := MetricFamilyToText(w, v)
148
+				return err
149
+			},
150
+			close: func() error { return nil },
151
+		}
152
+	case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
153
+		return encoderCloser{
154
+			encode: func(v *dto.MetricFamily) error {
155
+				_, err := MetricFamilyToOpenMetrics(w, v)
156
+				return err
157
+			},
158
+			close: func() error {
159
+				_, err := FinalizeOpenMetrics(w)
160
+				return err
161
+			},
162
+		}
163
+	}
164
+	panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
165
+}

+ 43
- 0
vendor/github.com/prometheus/common/expfmt/expfmt.go View File

@@ -0,0 +1,43 @@
1
+// Copyright 2015 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Package expfmt contains tools for reading and writing Prometheus metrics.
15
+package expfmt
16
+
17
+// Format specifies the HTTP content type of the different wire protocols.
18
+type Format string
19
+
20
+// Constants to assemble the Content-Type values for the different wire protocols.
21
+const (
22
+	TextVersion              = "0.0.4"
23
+	ProtoType                = `application/vnd.google.protobuf`
24
+	ProtoProtocol            = `io.prometheus.client.MetricFamily`
25
+	ProtoFmt                 = ProtoType + "; proto=" + ProtoProtocol + ";"
26
+	OpenMetricsType          = `application/openmetrics-text`
27
+	OpenMetricsVersion_0_0_1 = "0.0.1"
28
+	OpenMetricsVersion_1_0_0 = "1.0.0"
29
+
30
+	// The Content-Type values for the different wire protocols.
31
+	FmtUnknown           Format = `<unknown>`
32
+	FmtText              Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
33
+	FmtProtoDelim        Format = ProtoFmt + ` encoding=delimited`
34
+	FmtProtoText         Format = ProtoFmt + ` encoding=text`
35
+	FmtProtoCompact      Format = ProtoFmt + ` encoding=compact-text`
36
+	FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
37
+	FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
38
+)
39
+
40
+const (
41
+	hdrContentType = "Content-Type"
42
+	hdrAccept      = "Accept"
43
+)

+ 37
- 0
vendor/github.com/prometheus/common/expfmt/fuzz.go View File

@@ -0,0 +1,37 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+// Build only when actually fuzzing
15
+//go:build gofuzz
16
+// +build gofuzz
17
+
18
+package expfmt
19
+
20
+import "bytes"
21
+
22
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
23
+//
24
+//	go-fuzz-build github.com/prometheus/common/expfmt
25
+//	go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
26
+//
27
+// Further input samples should go in the folder fuzz/corpus.
28
+func Fuzz(in []byte) int {
29
+	parser := TextParser{}
30
+	_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
31
+
32
+	if err != nil {
33
+		return 0
34
+	}
35
+
36
+	return 1
37
+}

+ 527
- 0
vendor/github.com/prometheus/common/expfmt/openmetrics_create.go View File

@@ -0,0 +1,527 @@
1
+// Copyright 2020 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package expfmt
15
+
16
+import (
17
+	"bufio"
18
+	"bytes"
19
+	"fmt"
20
+	"io"
21
+	"math"
22
+	"strconv"
23
+	"strings"
24
+
25
+	"github.com/prometheus/common/model"
26
+
27
+	dto "github.com/prometheus/client_model/go"
28
+)
29
+
30
+// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
31
+// OpenMetrics text format and writes the resulting lines to 'out'. It returns
32
+// the number of bytes written and any error encountered. The output will have
33
+// the same order as the input, no further sorting is performed. Furthermore,
34
+// this function assumes the input is already sanitized and does not perform any
35
+// sanity checks. If the input contains duplicate metrics or invalid metric or
36
+// label names, the conversion will result in invalid text format output.
37
+//
38
+// This function fulfills the type 'expfmt.encoder'.
39
+//
40
+// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
41
+// on individual metric families, it is the responsibility of the caller to
42
+// append this line to 'out' once all metric families have been written.
43
+// Conveniently, this can be done by calling FinalizeOpenMetrics.
44
+//
45
+// The output should be fully OpenMetrics compliant. However, there are a few
46
+// missing features and peculiarities to avoid complications when switching from
47
+// Prometheus to OpenMetrics or vice versa:
48
+//
49
+//   - Counters are expected to have the `_total` suffix in their metric name. In
50
+//     the output, the suffix will be truncated from the `# TYPE` and `# HELP`
51
+//     line. A counter with a missing `_total` suffix is not an error. However,
52
+//     its type will be set to `unknown` in that case to avoid invalid OpenMetrics
53
+//     output.
54
+//
55
+//   - No support for the following (optional) features: `# UNIT` line, `_created`
56
+//     line, info type, stateset type, gaugehistogram type.
57
+//
58
+//   - The size of exemplar labels is not checked (i.e. it's possible to create
59
+//     exemplars that are larger than allowed by the OpenMetrics specification).
60
+//
61
+//   - The value of Counters is not checked. (OpenMetrics doesn't allow counters
62
+//     with a `NaN` value.)
63
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
64
+	name := in.GetName()
65
+	if name == "" {
66
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
67
+	}
68
+
69
+	// Try the interface upgrade. If it doesn't work, we'll use a
70
+	// bufio.Writer from the sync.Pool.
71
+	w, ok := out.(enhancedWriter)
72
+	if !ok {
73
+		b := bufPool.Get().(*bufio.Writer)
74
+		b.Reset(out)
75
+		w = b
76
+		defer func() {
77
+			bErr := b.Flush()
78
+			if err == nil {
79
+				err = bErr
80
+			}
81
+			bufPool.Put(b)
82
+		}()
83
+	}
84
+
85
+	var (
86
+		n          int
87
+		metricType = in.GetType()
88
+		shortName  = name
89
+	)
90
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
91
+		shortName = name[:len(name)-6]
92
+	}
93
+
94
+	// Comments, first HELP, then TYPE.
95
+	if in.Help != nil {
96
+		n, err = w.WriteString("# HELP ")
97
+		written += n
98
+		if err != nil {
99
+			return
100
+		}
101
+		n, err = w.WriteString(shortName)
102
+		written += n
103
+		if err != nil {
104
+			return
105
+		}
106
+		err = w.WriteByte(' ')
107
+		written++
108
+		if err != nil {
109
+			return
110
+		}
111
+		n, err = writeEscapedString(w, *in.Help, true)
112
+		written += n
113
+		if err != nil {
114
+			return
115
+		}
116
+		err = w.WriteByte('\n')
117
+		written++
118
+		if err != nil {
119
+			return
120
+		}
121
+	}
122
+	n, err = w.WriteString("# TYPE ")
123
+	written += n
124
+	if err != nil {
125
+		return
126
+	}
127
+	n, err = w.WriteString(shortName)
128
+	written += n
129
+	if err != nil {
130
+		return
131
+	}
132
+	switch metricType {
133
+	case dto.MetricType_COUNTER:
134
+		if strings.HasSuffix(name, "_total") {
135
+			n, err = w.WriteString(" counter\n")
136
+		} else {
137
+			n, err = w.WriteString(" unknown\n")
138
+		}
139
+	case dto.MetricType_GAUGE:
140
+		n, err = w.WriteString(" gauge\n")
141
+	case dto.MetricType_SUMMARY:
142
+		n, err = w.WriteString(" summary\n")
143
+	case dto.MetricType_UNTYPED:
144
+		n, err = w.WriteString(" unknown\n")
145
+	case dto.MetricType_HISTOGRAM:
146
+		n, err = w.WriteString(" histogram\n")
147
+	default:
148
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
149
+	}
150
+	written += n
151
+	if err != nil {
152
+		return
153
+	}
154
+
155
+	// Finally the samples, one line for each.
156
+	for _, metric := range in.Metric {
157
+		switch metricType {
158
+		case dto.MetricType_COUNTER:
159
+			if metric.Counter == nil {
160
+				return written, fmt.Errorf(
161
+					"expected counter in metric %s %s", name, metric,
162
+				)
163
+			}
164
+			// Note that we have ensured above that either the name
165
+			// ends on `_total` or that the rendered type is
166
+			// `unknown`. Therefore, no `_total` must be added here.
167
+			n, err = writeOpenMetricsSample(
168
+				w, name, "", metric, "", 0,
169
+				metric.Counter.GetValue(), 0, false,
170
+				metric.Counter.Exemplar,
171
+			)
172
+		case dto.MetricType_GAUGE:
173
+			if metric.Gauge == nil {
174
+				return written, fmt.Errorf(
175
+					"expected gauge in metric %s %s", name, metric,
176
+				)
177
+			}
178
+			n, err = writeOpenMetricsSample(
179
+				w, name, "", metric, "", 0,
180
+				metric.Gauge.GetValue(), 0, false,
181
+				nil,
182
+			)
183
+		case dto.MetricType_UNTYPED:
184
+			if metric.Untyped == nil {
185
+				return written, fmt.Errorf(
186
+					"expected untyped in metric %s %s", name, metric,
187
+				)
188
+			}
189
+			n, err = writeOpenMetricsSample(
190
+				w, name, "", metric, "", 0,
191
+				metric.Untyped.GetValue(), 0, false,
192
+				nil,
193
+			)
194
+		case dto.MetricType_SUMMARY:
195
+			if metric.Summary == nil {
196
+				return written, fmt.Errorf(
197
+					"expected summary in metric %s %s", name, metric,
198
+				)
199
+			}
200
+			for _, q := range metric.Summary.Quantile {
201
+				n, err = writeOpenMetricsSample(
202
+					w, name, "", metric,
203
+					model.QuantileLabel, q.GetQuantile(),
204
+					q.GetValue(), 0, false,
205
+					nil,
206
+				)
207
+				written += n
208
+				if err != nil {
209
+					return
210
+				}
211
+			}
212
+			n, err = writeOpenMetricsSample(
213
+				w, name, "_sum", metric, "", 0,
214
+				metric.Summary.GetSampleSum(), 0, false,
215
+				nil,
216
+			)
217
+			written += n
218
+			if err != nil {
219
+				return
220
+			}
221
+			n, err = writeOpenMetricsSample(
222
+				w, name, "_count", metric, "", 0,
223
+				0, metric.Summary.GetSampleCount(), true,
224
+				nil,
225
+			)
226
+		case dto.MetricType_HISTOGRAM:
227
+			if metric.Histogram == nil {
228
+				return written, fmt.Errorf(
229
+					"expected histogram in metric %s %s", name, metric,
230
+				)
231
+			}
232
+			infSeen := false
233
+			for _, b := range metric.Histogram.Bucket {
234
+				n, err = writeOpenMetricsSample(
235
+					w, name, "_bucket", metric,
236
+					model.BucketLabel, b.GetUpperBound(),
237
+					0, b.GetCumulativeCount(), true,
238
+					b.Exemplar,
239
+				)
240
+				written += n
241
+				if err != nil {
242
+					return
243
+				}
244
+				if math.IsInf(b.GetUpperBound(), +1) {
245
+					infSeen = true
246
+				}
247
+			}
248
+			if !infSeen {
249
+				n, err = writeOpenMetricsSample(
250
+					w, name, "_bucket", metric,
251
+					model.BucketLabel, math.Inf(+1),
252
+					0, metric.Histogram.GetSampleCount(), true,
253
+					nil,
254
+				)
255
+				written += n
256
+				if err != nil {
257
+					return
258
+				}
259
+			}
260
+			n, err = writeOpenMetricsSample(
261
+				w, name, "_sum", metric, "", 0,
262
+				metric.Histogram.GetSampleSum(), 0, false,
263
+				nil,
264
+			)
265
+			written += n
266
+			if err != nil {
267
+				return
268
+			}
269
+			n, err = writeOpenMetricsSample(
270
+				w, name, "_count", metric, "", 0,
271
+				0, metric.Histogram.GetSampleCount(), true,
272
+				nil,
273
+			)
274
+		default:
275
+			return written, fmt.Errorf(
276
+				"unexpected type in metric %s %s", name, metric,
277
+			)
278
+		}
279
+		written += n
280
+		if err != nil {
281
+			return
282
+		}
283
+	}
284
+	return
285
+}
286
+
287
+// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
288
+func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
289
+	return w.Write([]byte("# EOF\n"))
290
+}
291
+
292
+// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
293
+// w, given the metric name, the metric proto message itself, optionally an
294
+// additional label name with a float64 value (use empty string as label name if
295
+// not required), the value (optionally as float64 or uint64, determined by
296
+// useIntValue), and optionally an exemplar (use nil if not required). The
297
+// function returns the number of bytes written and any error encountered.
298
+func writeOpenMetricsSample(
299
+	w enhancedWriter,
300
+	name, suffix string,
301
+	metric *dto.Metric,
302
+	additionalLabelName string, additionalLabelValue float64,
303
+	floatValue float64, intValue uint64, useIntValue bool,
304
+	exemplar *dto.Exemplar,
305
+) (int, error) {
306
+	var written int
307
+	n, err := w.WriteString(name)
308
+	written += n
309
+	if err != nil {
310
+		return written, err
311
+	}
312
+	if suffix != "" {
313
+		n, err = w.WriteString(suffix)
314
+		written += n
315
+		if err != nil {
316
+			return written, err
317
+		}
318
+	}
319
+	n, err = writeOpenMetricsLabelPairs(
320
+		w, metric.Label, additionalLabelName, additionalLabelValue,
321
+	)
322
+	written += n
323
+	if err != nil {
324
+		return written, err
325
+	}
326
+	err = w.WriteByte(' ')
327
+	written++
328
+	if err != nil {
329
+		return written, err
330
+	}
331
+	if useIntValue {
332
+		n, err = writeUint(w, intValue)
333
+	} else {
334
+		n, err = writeOpenMetricsFloat(w, floatValue)
335
+	}
336
+	written += n
337
+	if err != nil {
338
+		return written, err
339
+	}
340
+	if metric.TimestampMs != nil {
341
+		err = w.WriteByte(' ')
342
+		written++
343
+		if err != nil {
344
+			return written, err
345
+		}
346
+		// TODO(beorn7): Format this directly without converting to a float first.
347
+		n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
348
+		written += n
349
+		if err != nil {
350
+			return written, err
351
+		}
352
+	}
353
+	if exemplar != nil {
354
+		n, err = writeExemplar(w, exemplar)
355
+		written += n
356
+		if err != nil {
357
+			return written, err
358
+		}
359
+	}
360
+	err = w.WriteByte('\n')
361
+	written++
362
+	if err != nil {
363
+		return written, err
364
+	}
365
+	return written, nil
366
+}
367
+
368
+// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
369
+// in OpenMetrics style.
370
+func writeOpenMetricsLabelPairs(
371
+	w enhancedWriter,
372
+	in []*dto.LabelPair,
373
+	additionalLabelName string, additionalLabelValue float64,
374
+) (int, error) {
375
+	if len(in) == 0 && additionalLabelName == "" {
376
+		return 0, nil
377
+	}
378
+	var (
379
+		written   int
380
+		separator byte = '{'
381
+	)
382
+	for _, lp := range in {
383
+		err := w.WriteByte(separator)
384
+		written++
385
+		if err != nil {
386
+			return written, err
387
+		}
388
+		n, err := w.WriteString(lp.GetName())
389
+		written += n
390
+		if err != nil {
391
+			return written, err
392
+		}
393
+		n, err = w.WriteString(`="`)
394
+		written += n
395
+		if err != nil {
396
+			return written, err
397
+		}
398
+		n, err = writeEscapedString(w, lp.GetValue(), true)
399
+		written += n
400
+		if err != nil {
401
+			return written, err
402
+		}
403
+		err = w.WriteByte('"')
404
+		written++
405
+		if err != nil {
406
+			return written, err
407
+		}
408
+		separator = ','
409
+	}
410
+	if additionalLabelName != "" {
411
+		err := w.WriteByte(separator)
412
+		written++
413
+		if err != nil {
414
+			return written, err
415
+		}
416
+		n, err := w.WriteString(additionalLabelName)
417
+		written += n
418
+		if err != nil {
419
+			return written, err
420
+		}
421
+		n, err = w.WriteString(`="`)
422
+		written += n
423
+		if err != nil {
424
+			return written, err
425
+		}
426
+		n, err = writeOpenMetricsFloat(w, additionalLabelValue)
427
+		written += n
428
+		if err != nil {
429
+			return written, err
430
+		}
431
+		err = w.WriteByte('"')
432
+		written++
433
+		if err != nil {
434
+			return written, err
435
+		}
436
+	}
437
+	err := w.WriteByte('}')
438
+	written++
439
+	if err != nil {
440
+		return written, err
441
+	}
442
+	return written, nil
443
+}
444
+
445
+// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
446
+// function returns the number of bytes written and any error encountered.
447
+func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
448
+	written := 0
449
+	n, err := w.WriteString(" # ")
450
+	written += n
451
+	if err != nil {
452
+		return written, err
453
+	}
454
+	n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
455
+	written += n
456
+	if err != nil {
457
+		return written, err
458
+	}
459
+	err = w.WriteByte(' ')
460
+	written++
461
+	if err != nil {
462
+		return written, err
463
+	}
464
+	n, err = writeOpenMetricsFloat(w, e.GetValue())
465
+	written += n
466
+	if err != nil {
467
+		return written, err
468
+	}
469
+	if e.Timestamp != nil {
470
+		err = w.WriteByte(' ')
471
+		written++
472
+		if err != nil {
473
+			return written, err
474
+		}
475
+		err = (*e).Timestamp.CheckValid()
476
+		if err != nil {
477
+			return written, err
478
+		}
479
+		ts := (*e).Timestamp.AsTime()
480
+		// TODO(beorn7): Format this directly from components of ts to
481
+		// avoid overflow/underflow and precision issues of the float
482
+		// conversion.
483
+		n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
484
+		written += n
485
+		if err != nil {
486
+			return written, err
487
+		}
488
+	}
489
+	return written, nil
490
+}
491
+
492
+// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
493
+// number would otherwise contain neither a "." nor an "e".
494
+func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
495
+	switch {
496
+	case f == 1:
497
+		return w.WriteString("1.0")
498
+	case f == 0:
499
+		return w.WriteString("0.0")
500
+	case f == -1:
501
+		return w.WriteString("-1.0")
502
+	case math.IsNaN(f):
503
+		return w.WriteString("NaN")
504
+	case math.IsInf(f, +1):
505
+		return w.WriteString("+Inf")
506
+	case math.IsInf(f, -1):
507
+		return w.WriteString("-Inf")
508
+	default:
509
+		bp := numBufPool.Get().(*[]byte)
510
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
511
+		if !bytes.ContainsAny(*bp, "e.") {
512
+			*bp = append(*bp, '.', '0')
513
+		}
514
+		written, err := w.Write(*bp)
515
+		numBufPool.Put(bp)
516
+		return written, err
517
+	}
518
+}
519
+
520
+// writeUint is like writeInt just for uint64.
521
+func writeUint(w enhancedWriter, u uint64) (int, error) {
522
+	bp := numBufPool.Get().(*[]byte)
523
+	*bp = strconv.AppendUint((*bp)[:0], u, 10)
524
+	written, err := w.Write(*bp)
525
+	numBufPool.Put(bp)
526
+	return written, err
527
+}

+ 464
- 0
vendor/github.com/prometheus/common/expfmt/text_create.go View File

@@ -0,0 +1,464 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package expfmt
15
+
16
+import (
17
+	"bufio"
18
+	"fmt"
19
+	"io"
20
+	"math"
21
+	"strconv"
22
+	"strings"
23
+	"sync"
24
+
25
+	"github.com/prometheus/common/model"
26
+
27
+	dto "github.com/prometheus/client_model/go"
28
+)
29
+
30
+// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
31
+// implements it.
32
+type enhancedWriter interface {
33
+	io.Writer
34
+	WriteRune(r rune) (n int, err error)
35
+	WriteString(s string) (n int, err error)
36
+	WriteByte(c byte) error
37
+}
38
+
39
+const (
40
+	initialNumBufSize = 24
41
+)
42
+
43
+var (
44
+	bufPool = sync.Pool{
45
+		New: func() interface{} {
46
+			return bufio.NewWriter(io.Discard)
47
+		},
48
+	}
49
+	numBufPool = sync.Pool{
50
+		New: func() interface{} {
51
+			b := make([]byte, 0, initialNumBufSize)
52
+			return &b
53
+		},
54
+	}
55
+)
56
+
57
+// MetricFamilyToText converts a MetricFamily proto message into text format and
58
+// writes the resulting lines to 'out'. It returns the number of bytes written
59
+// and any error encountered. The output will have the same order as the input,
60
+// no further sorting is performed. Furthermore, this function assumes the input
61
+// is already sanitized and does not perform any sanity checks. If the input
62
+// contains duplicate metrics or invalid metric or label names, the conversion
63
+// will result in invalid text format output.
64
+//
65
+// This method fulfills the type 'prometheus.encoder'.
66
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
67
+	// Fail-fast checks.
68
+	if len(in.Metric) == 0 {
69
+		return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
70
+	}
71
+	name := in.GetName()
72
+	if name == "" {
73
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
74
+	}
75
+
76
+	// Try the interface upgrade. If it doesn't work, we'll use a
77
+	// bufio.Writer from the sync.Pool.
78
+	w, ok := out.(enhancedWriter)
79
+	if !ok {
80
+		b := bufPool.Get().(*bufio.Writer)
81
+		b.Reset(out)
82
+		w = b
83
+		defer func() {
84
+			bErr := b.Flush()
85
+			if err == nil {
86
+				err = bErr
87
+			}
88
+			bufPool.Put(b)
89
+		}()
90
+	}
91
+
92
+	var n int
93
+
94
+	// Comments, first HELP, then TYPE.
95
+	if in.Help != nil {
96
+		n, err = w.WriteString("# HELP ")
97
+		written += n
98
+		if err != nil {
99
+			return
100
+		}
101
+		n, err = w.WriteString(name)
102
+		written += n
103
+		if err != nil {
104
+			return
105
+		}
106
+		err = w.WriteByte(' ')
107
+		written++
108
+		if err != nil {
109
+			return
110
+		}
111
+		n, err = writeEscapedString(w, *in.Help, false)
112
+		written += n
113
+		if err != nil {
114
+			return
115
+		}
116
+		err = w.WriteByte('\n')
117
+		written++
118
+		if err != nil {
119
+			return
120
+		}
121
+	}
122
+	n, err = w.WriteString("# TYPE ")
123
+	written += n
124
+	if err != nil {
125
+		return
126
+	}
127
+	n, err = w.WriteString(name)
128
+	written += n
129
+	if err != nil {
130
+		return
131
+	}
132
+	metricType := in.GetType()
133
+	switch metricType {
134
+	case dto.MetricType_COUNTER:
135
+		n, err = w.WriteString(" counter\n")
136
+	case dto.MetricType_GAUGE:
137
+		n, err = w.WriteString(" gauge\n")
138
+	case dto.MetricType_SUMMARY:
139
+		n, err = w.WriteString(" summary\n")
140
+	case dto.MetricType_UNTYPED:
141
+		n, err = w.WriteString(" untyped\n")
142
+	case dto.MetricType_HISTOGRAM:
143
+		n, err = w.WriteString(" histogram\n")
144
+	default:
145
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
146
+	}
147
+	written += n
148
+	if err != nil {
149
+		return
150
+	}
151
+
152
+	// Finally the samples, one line for each.
153
+	for _, metric := range in.Metric {
154
+		switch metricType {
155
+		case dto.MetricType_COUNTER:
156
+			if metric.Counter == nil {
157
+				return written, fmt.Errorf(
158
+					"expected counter in metric %s %s", name, metric,
159
+				)
160
+			}
161
+			n, err = writeSample(
162
+				w, name, "", metric, "", 0,
163
+				metric.Counter.GetValue(),
164
+			)
165
+		case dto.MetricType_GAUGE:
166
+			if metric.Gauge == nil {
167
+				return written, fmt.Errorf(
168
+					"expected gauge in metric %s %s", name, metric,
169
+				)
170
+			}
171
+			n, err = writeSample(
172
+				w, name, "", metric, "", 0,
173
+				metric.Gauge.GetValue(),
174
+			)
175
+		case dto.MetricType_UNTYPED:
176
+			if metric.Untyped == nil {
177
+				return written, fmt.Errorf(
178
+					"expected untyped in metric %s %s", name, metric,
179
+				)
180
+			}
181
+			n, err = writeSample(
182
+				w, name, "", metric, "", 0,
183
+				metric.Untyped.GetValue(),
184
+			)
185
+		case dto.MetricType_SUMMARY:
186
+			if metric.Summary == nil {
187
+				return written, fmt.Errorf(
188
+					"expected summary in metric %s %s", name, metric,
189
+				)
190
+			}
191
+			for _, q := range metric.Summary.Quantile {
192
+				n, err = writeSample(
193
+					w, name, "", metric,
194
+					model.QuantileLabel, q.GetQuantile(),
195
+					q.GetValue(),
196
+				)
197
+				written += n
198
+				if err != nil {
199
+					return
200
+				}
201
+			}
202
+			n, err = writeSample(
203
+				w, name, "_sum", metric, "", 0,
204
+				metric.Summary.GetSampleSum(),
205
+			)
206
+			written += n
207
+			if err != nil {
208
+				return
209
+			}
210
+			n, err = writeSample(
211
+				w, name, "_count", metric, "", 0,
212
+				float64(metric.Summary.GetSampleCount()),
213
+			)
214
+		case dto.MetricType_HISTOGRAM:
215
+			if metric.Histogram == nil {
216
+				return written, fmt.Errorf(
217
+					"expected histogram in metric %s %s", name, metric,
218
+				)
219
+			}
220
+			infSeen := false
221
+			for _, b := range metric.Histogram.Bucket {
222
+				n, err = writeSample(
223
+					w, name, "_bucket", metric,
224
+					model.BucketLabel, b.GetUpperBound(),
225
+					float64(b.GetCumulativeCount()),
226
+				)
227
+				written += n
228
+				if err != nil {
229
+					return
230
+				}
231
+				if math.IsInf(b.GetUpperBound(), +1) {
232
+					infSeen = true
233
+				}
234
+			}
235
+			if !infSeen {
236
+				n, err = writeSample(
237
+					w, name, "_bucket", metric,
238
+					model.BucketLabel, math.Inf(+1),
239
+					float64(metric.Histogram.GetSampleCount()),
240
+				)
241
+				written += n
242
+				if err != nil {
243
+					return
244
+				}
245
+			}
246
+			n, err = writeSample(
247
+				w, name, "_sum", metric, "", 0,
248
+				metric.Histogram.GetSampleSum(),
249
+			)
250
+			written += n
251
+			if err != nil {
252
+				return
253
+			}
254
+			n, err = writeSample(
255
+				w, name, "_count", metric, "", 0,
256
+				float64(metric.Histogram.GetSampleCount()),
257
+			)
258
+		default:
259
+			return written, fmt.Errorf(
260
+				"unexpected type in metric %s %s", name, metric,
261
+			)
262
+		}
263
+		written += n
264
+		if err != nil {
265
+			return
266
+		}
267
+	}
268
+	return
269
+}
270
+
271
+// writeSample writes a single sample in text format to w, given the metric
272
+// name, the metric proto message itself, optionally an additional label name
273
+// with a float64 value (use empty string as label name if not required), and
274
+// the value. The function returns the number of bytes written and any error
275
+// encountered.
276
+func writeSample(
277
+	w enhancedWriter,
278
+	name, suffix string,
279
+	metric *dto.Metric,
280
+	additionalLabelName string, additionalLabelValue float64,
281
+	value float64,
282
+) (int, error) {
283
+	var written int
284
+	n, err := w.WriteString(name)
285
+	written += n
286
+	if err != nil {
287
+		return written, err
288
+	}
289
+	if suffix != "" {
290
+		n, err = w.WriteString(suffix)
291
+		written += n
292
+		if err != nil {
293
+			return written, err
294
+		}
295
+	}
296
+	n, err = writeLabelPairs(
297
+		w, metric.Label, additionalLabelName, additionalLabelValue,
298
+	)
299
+	written += n
300
+	if err != nil {
301
+		return written, err
302
+	}
303
+	err = w.WriteByte(' ')
304
+	written++
305
+	if err != nil {
306
+		return written, err
307
+	}
308
+	n, err = writeFloat(w, value)
309
+	written += n
310
+	if err != nil {
311
+		return written, err
312
+	}
313
+	if metric.TimestampMs != nil {
314
+		err = w.WriteByte(' ')
315
+		written++
316
+		if err != nil {
317
+			return written, err
318
+		}
319
+		n, err = writeInt(w, *metric.TimestampMs)
320
+		written += n
321
+		if err != nil {
322
+			return written, err
323
+		}
324
+	}
325
+	err = w.WriteByte('\n')
326
+	written++
327
+	if err != nil {
328
+		return written, err
329
+	}
330
+	return written, nil
331
+}
332
+
333
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
334
+// explicitly given additional label pair into text formatted as required by the
335
+// text format and writes it to 'w'. An empty slice in combination with an empty
336
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
337
+// label pairs are written, escaped as required by the text format, and enclosed
338
+// in '{...}'. The function returns the number of bytes written and any error
339
+// encountered.
340
+func writeLabelPairs(
341
+	w enhancedWriter,
342
+	in []*dto.LabelPair,
343
+	additionalLabelName string, additionalLabelValue float64,
344
+) (int, error) {
345
+	if len(in) == 0 && additionalLabelName == "" {
346
+		return 0, nil
347
+	}
348
+	var (
349
+		written   int
350
+		separator byte = '{'
351
+	)
352
+	for _, lp := range in {
353
+		err := w.WriteByte(separator)
354
+		written++
355
+		if err != nil {
356
+			return written, err
357
+		}
358
+		n, err := w.WriteString(lp.GetName())
359
+		written += n
360
+		if err != nil {
361
+			return written, err
362
+		}
363
+		n, err = w.WriteString(`="`)
364
+		written += n
365
+		if err != nil {
366
+			return written, err
367
+		}
368
+		n, err = writeEscapedString(w, lp.GetValue(), true)
369
+		written += n
370
+		if err != nil {
371
+			return written, err
372
+		}
373
+		err = w.WriteByte('"')
374
+		written++
375
+		if err != nil {
376
+			return written, err
377
+		}
378
+		separator = ','
379
+	}
380
+	if additionalLabelName != "" {
381
+		err := w.WriteByte(separator)
382
+		written++
383
+		if err != nil {
384
+			return written, err
385
+		}
386
+		n, err := w.WriteString(additionalLabelName)
387
+		written += n
388
+		if err != nil {
389
+			return written, err
390
+		}
391
+		n, err = w.WriteString(`="`)
392
+		written += n
393
+		if err != nil {
394
+			return written, err
395
+		}
396
+		n, err = writeFloat(w, additionalLabelValue)
397
+		written += n
398
+		if err != nil {
399
+			return written, err
400
+		}
401
+		err = w.WriteByte('"')
402
+		written++
403
+		if err != nil {
404
+			return written, err
405
+		}
406
+	}
407
+	err := w.WriteByte('}')
408
+	written++
409
+	if err != nil {
410
+		return written, err
411
+	}
412
+	return written, nil
413
+}
414
+
415
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
416
+// includeDoubleQuote is true - '"' by '\"'.
417
+var (
418
+	escaper       = strings.NewReplacer("\\", `\\`, "\n", `\n`)
419
+	quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
420
+)
421
+
422
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
423
+	if includeDoubleQuote {
424
+		return quotedEscaper.WriteString(w, v)
425
+	}
426
+	return escaper.WriteString(w, v)
427
+}
428
+
429
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
430
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
431
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
432
+func writeFloat(w enhancedWriter, f float64) (int, error) {
433
+	switch {
434
+	case f == 1:
435
+		return 1, w.WriteByte('1')
436
+	case f == 0:
437
+		return 1, w.WriteByte('0')
438
+	case f == -1:
439
+		return w.WriteString("-1")
440
+	case math.IsNaN(f):
441
+		return w.WriteString("NaN")
442
+	case math.IsInf(f, +1):
443
+		return w.WriteString("+Inf")
444
+	case math.IsInf(f, -1):
445
+		return w.WriteString("-Inf")
446
+	default:
447
+		bp := numBufPool.Get().(*[]byte)
448
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
449
+		written, err := w.Write(*bp)
450
+		numBufPool.Put(bp)
451
+		return written, err
452
+	}
453
+}
454
+
455
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
456
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
457
+// allocations.
458
+func writeInt(w enhancedWriter, i int64) (int, error) {
459
+	bp := numBufPool.Get().(*[]byte)
460
+	*bp = strconv.AppendInt((*bp)[:0], i, 10)
461
+	written, err := w.Write(*bp)
462
+	numBufPool.Put(bp)
463
+	return written, err
464
+}

+ 779
- 0
vendor/github.com/prometheus/common/expfmt/text_parse.go View File

@@ -0,0 +1,779 @@
1
+// Copyright 2014 The Prometheus Authors
2
+// Licensed under the Apache License, Version 2.0 (the "License");
3
+// you may not use this file except in compliance with the License.
4
+// You may obtain a copy of the License at
5
+//
6
+// http://www.apache.org/licenses/LICENSE-2.0
7
+//
8
+// Unless required by applicable law or agreed to in writing, software
9
+// distributed under the License is distributed on an "AS IS" BASIS,
10
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+// See the License for the specific language governing permissions and
12
+// limitations under the License.
13
+
14
+package expfmt
15
+
16
+import (
17
+	"bufio"
18
+	"bytes"
19
+	"fmt"
20
+	"io"
21
+	"math"
22
+	"strconv"
23
+	"strings"
24
+
25
+	dto "github.com/prometheus/client_model/go"
26
+
27
+	"github.com/prometheus/common/model"
28
+	"google.golang.org/protobuf/proto"
29
+)
30
+
31
+// A stateFn is a function that represents a state in a state machine. By
32
+// executing it, the state is progressed to the next state. The stateFn returns
33
+// another stateFn, which represents the new state. The end state is represented
34
+// by nil.
35
+type stateFn func() stateFn
36
+
37
+// ParseError signals errors while parsing the simple and flat text-based
38
+// exchange format.
39
+type ParseError struct {
40
+	Line int
41
+	Msg  string
42
+}
43
+
44
+// Error implements the error interface.
45
+func (e ParseError) Error() string {
46
+	return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
47
+}
48
+
49
+// TextParser is used to parse the simple and flat text-based exchange format. Its
50
+// zero value is ready to use.
51
+type TextParser struct {
52
+	metricFamiliesByName map[string]*dto.MetricFamily
53
+	buf                  *bufio.Reader // Where the parsed input is read through.
54
+	err                  error         // Most recent error.
55
+	lineCount            int           // Tracks the line count for error messages.
56
+	currentByte          byte          // The most recent byte read.
57
+	currentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes.
58
+	currentMF            *dto.MetricFamily
59
+	currentMetric        *dto.Metric
60
+	currentLabelPair     *dto.LabelPair
61
+
62
+	// The remaining member variables are only used for summaries/histograms.
63
+	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
64
+	// Summary specific.
65
+	summaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature.
66
+	currentQuantile float64
67
+	// Histogram specific.
68
+	histograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature.
69
+	currentBucket float64
70
+	// These tell us if the currently processed line ends on '_count' or
71
+	// '_sum' respectively and belong to a summary/histogram, representing the sample
72
+	// count and sum of that summary/histogram.
73
+	currentIsSummaryCount, currentIsSummarySum     bool
74
+	currentIsHistogramCount, currentIsHistogramSum bool
75
+}
76
+
77
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
78
+// format and creates MetricFamily proto messages. It returns the MetricFamily
79
+// proto messages in a map where the metric names are the keys, along with any
80
+// error encountered.
81
+//
82
+// If the input contains duplicate metrics (i.e. lines with the same metric name
83
+// and exactly the same label set), the resulting MetricFamily will contain
84
+// duplicate Metric proto messages. Similar is true for duplicate label
85
+// names. Checks for duplicates have to be performed separately, if required.
86
+// Also note that neither the metrics within each MetricFamily are sorted nor
87
+// the label pairs within each Metric. Sorting is not required for the most
88
+// frequent use of this method, which is sample ingestion in the Prometheus
89
+// server. However, for presentation purposes, you might want to sort the
90
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
91
+// the metric family injection hook of the Prometheus registry.
92
+//
93
+// Summaries and histograms are rather special beasts. You would probably not
94
+// use them in the simple text format anyway. This method can deal with
95
+// summaries and histograms if they are presented in exactly the way the
96
+// text.Create function creates them.
97
+//
98
+// This method must not be called concurrently. If you want to parse different
99
+// input concurrently, instantiate a separate Parser for each goroutine.
100
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
101
+	p.reset(in)
102
+	for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
103
+		// Magic happens here...
104
+	}
105
+	// Get rid of empty metric families.
106
+	for k, mf := range p.metricFamiliesByName {
107
+		if len(mf.GetMetric()) == 0 {
108
+			delete(p.metricFamiliesByName, k)
109
+		}
110
+	}
111
+	// If p.err is io.EOF now, we have run into a premature end of the input
112
+	// stream. Turn this error into something nicer and more
113
+	// meaningful. (io.EOF is often used as a signal for the legitimate end
114
+	// of an input stream.)
115
+	if p.err == io.EOF {
116
+		p.parseError("unexpected end of input stream")
117
+	}
118
+	return p.metricFamiliesByName, p.err
119
+}
120
+
121
+func (p *TextParser) reset(in io.Reader) {
122
+	p.metricFamiliesByName = map[string]*dto.MetricFamily{}
123
+	if p.buf == nil {
124
+		p.buf = bufio.NewReader(in)
125
+	} else {
126
+		p.buf.Reset(in)
127
+	}
128
+	p.err = nil
129
+	p.lineCount = 0
130
+	if p.summaries == nil || len(p.summaries) > 0 {
131
+		p.summaries = map[uint64]*dto.Metric{}
132
+	}
133
+	if p.histograms == nil || len(p.histograms) > 0 {
134
+		p.histograms = map[uint64]*dto.Metric{}
135
+	}
136
+	p.currentQuantile = math.NaN()
137
+	p.currentBucket = math.NaN()
138
+}
139
+
140
+// startOfLine represents the state where the next byte read from p.buf is the
141
+// start of a line (or whitespace leading up to it).
142
+func (p *TextParser) startOfLine() stateFn {
143
+	p.lineCount++
144
+	if p.skipBlankTab(); p.err != nil {
145
+		// This is the only place that we expect to see io.EOF,
146
+		// which is not an error but the signal that we are done.
147
+		// Any other error that happens to align with the start of
148
+		// a line is still an error.
149
+		if p.err == io.EOF {
150
+			p.err = nil
151
+		}
152
+		return nil
153
+	}
154
+	switch p.currentByte {
155
+	case '#':
156
+		return p.startComment
157
+	case '\n':
158
+		return p.startOfLine // Empty line, start the next one.
159
+	}
160
+	return p.readingMetricName
161
+}
162
+
163
+// startComment represents the state where the next byte read from p.buf is the
164
+// start of a comment (or whitespace leading up to it).
165
+func (p *TextParser) startComment() stateFn {
166
+	if p.skipBlankTab(); p.err != nil {
167
+		return nil // Unexpected end of input.
168
+	}
169
+	if p.currentByte == '\n' {
170
+		return p.startOfLine
171
+	}
172
+	if p.readTokenUntilWhitespace(); p.err != nil {
173
+		return nil // Unexpected end of input.
174
+	}
175
+	// If we have hit the end of line already, there is nothing left
176
+	// to do. This is not considered a syntax error.
177
+	if p.currentByte == '\n' {
178
+		return p.startOfLine
179
+	}
180
+	keyword := p.currentToken.String()
181
+	if keyword != "HELP" && keyword != "TYPE" {
182
+		// Generic comment, ignore by fast forwarding to end of line.
183
+		for p.currentByte != '\n' {
184
+			if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
185
+				return nil // Unexpected end of input.
186
+			}
187
+		}
188
+		return p.startOfLine
189
+	}
190
+	// There is something. Next has to be a metric name.
191
+	if p.skipBlankTab(); p.err != nil {
192
+		return nil // Unexpected end of input.
193
+	}
194
+	if p.readTokenAsMetricName(); p.err != nil {
195
+		return nil // Unexpected end of input.
196
+	}
197
+	if p.currentByte == '\n' {
198
+		// At the end of the line already.
199
+		// Again, this is not considered a syntax error.
200
+		return p.startOfLine
201
+	}
202
+	if !isBlankOrTab(p.currentByte) {
203
+		p.parseError("invalid metric name in comment")
204
+		return nil
205
+	}
206
+	p.setOrCreateCurrentMF()
207
+	if p.skipBlankTab(); p.err != nil {
208
+		return nil // Unexpected end of input.
209
+	}
210
+	if p.currentByte == '\n' {
211
+		// At the end of the line already.
212
+		// Again, this is not considered a syntax error.
213
+		return p.startOfLine
214
+	}
215
+	switch keyword {
216
+	case "HELP":
217
+		return p.readingHelp
218
+	case "TYPE":
219
+		return p.readingType
220
+	}
221
+	panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
222
+}
223
+
224
+// readingMetricName represents the state where the last byte read (now in
225
+// p.currentByte) is the first byte of a metric name.
226
+func (p *TextParser) readingMetricName() stateFn {
227
+	if p.readTokenAsMetricName(); p.err != nil {
228
+		return nil
229
+	}
230
+	if p.currentToken.Len() == 0 {
231
+		p.parseError("invalid metric name")
232
+		return nil
233
+	}
234
+	p.setOrCreateCurrentMF()
235
+	// Now is the time to fix the type if it hasn't happened yet.
236
+	if p.currentMF.Type == nil {
237
+		p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
238
+	}
239
+	p.currentMetric = &dto.Metric{}
240
+	// Do not append the newly created currentMetric to
241
+	// currentMF.Metric right now. First wait if this is a summary,
242
+	// and the metric exists already, which we can only know after
243
+	// having read all the labels.
244
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
245
+		return nil // Unexpected end of input.
246
+	}
247
+	return p.readingLabels
248
+}
249
+
250
+// readingLabels represents the state where the last byte read (now in
251
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
252
+// first byte of the value (otherwise).
253
+func (p *TextParser) readingLabels() stateFn {
254
+	// Summaries/histograms are special. We have to reset the
255
+	// currentLabels map, currentQuantile and currentBucket before starting to
256
+	// read labels.
257
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
258
+		p.currentLabels = map[string]string{}
259
+		p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
260
+		p.currentQuantile = math.NaN()
261
+		p.currentBucket = math.NaN()
262
+	}
263
+	if p.currentByte != '{' {
264
+		return p.readingValue
265
+	}
266
+	return p.startLabelName
267
+}
268
+
269
+// startLabelName represents the state where the next byte read from p.buf is
270
+// the start of a label name (or whitespace leading up to it).
271
+func (p *TextParser) startLabelName() stateFn {
272
+	if p.skipBlankTab(); p.err != nil {
273
+		return nil // Unexpected end of input.
274
+	}
275
+	if p.currentByte == '}' {
276
+		if p.skipBlankTab(); p.err != nil {
277
+			return nil // Unexpected end of input.
278
+		}
279
+		return p.readingValue
280
+	}
281
+	if p.readTokenAsLabelName(); p.err != nil {
282
+		return nil // Unexpected end of input.
283
+	}
284
+	if p.currentToken.Len() == 0 {
285
+		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
286
+		return nil
287
+	}
288
+	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
289
+	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
290
+		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
291
+		return nil
292
+	}
293
+	// Special summary/histogram treatment. Don't add 'quantile' and 'le'
294
+	// labels to 'real' labels.
295
+	if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
296
+		!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
297
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
298
+	}
299
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
300
+		return nil // Unexpected end of input.
301
+	}
302
+	if p.currentByte != '=' {
303
+		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
304
+		return nil
305
+	}
306
+	// Check for duplicate label names.
307
+	labels := make(map[string]struct{})
308
+	for _, l := range p.currentMetric.Label {
309
+		lName := l.GetName()
310
+		if _, exists := labels[lName]; !exists {
311
+			labels[lName] = struct{}{}
312
+		} else {
313
+			p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
314
+			return nil
315
+		}
316
+	}
317
+	return p.startLabelValue
318
+}
319
+
320
+// startLabelValue represents the state where the next byte read from p.buf is
321
+// the start of a (quoted) label value (or whitespace leading up to it).
322
+func (p *TextParser) startLabelValue() stateFn {
323
+	if p.skipBlankTab(); p.err != nil {
324
+		return nil // Unexpected end of input.
325
+	}
326
+	if p.currentByte != '"' {
327
+		p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
328
+		return nil
329
+	}
330
+	if p.readTokenAsLabelValue(); p.err != nil {
331
+		return nil
332
+	}
333
+	if !model.LabelValue(p.currentToken.String()).IsValid() {
334
+		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
335
+		return nil
336
+	}
337
+	p.currentLabelPair.Value = proto.String(p.currentToken.String())
338
+	// Special treatment of summaries:
339
+	// - Quantile labels are special, will result in dto.Quantile later.
340
+	// - Other labels have to be added to currentLabels for signature calculation.
341
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
342
+		if p.currentLabelPair.GetName() == model.QuantileLabel {
343
+			if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
344
+				// Create a more helpful error message.
345
+				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
346
+				return nil
347
+			}
348
+		} else {
349
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
350
+		}
351
+	}
352
+	// Similar special treatment of histograms.
353
+	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
354
+		if p.currentLabelPair.GetName() == model.BucketLabel {
355
+			if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
356
+				// Create a more helpful error message.
357
+				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
358
+				return nil
359
+			}
360
+		} else {
361
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
362
+		}
363
+	}
364
+	if p.skipBlankTab(); p.err != nil {
365
+		return nil // Unexpected end of input.
366
+	}
367
+	switch p.currentByte {
368
+	case ',':
369
+		return p.startLabelName
370
+
371
+	case '}':
372
+		if p.skipBlankTab(); p.err != nil {
373
+			return nil // Unexpected end of input.
374
+		}
375
+		return p.readingValue
376
+	default:
377
+		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
378
+		return nil
379
+	}
380
+}
381
+
382
+// readingValue represents the state where the last byte read (now in
383
+// p.currentByte) is the first byte of the sample value (i.e. a float).
384
+func (p *TextParser) readingValue() stateFn {
385
+	// When we are here, we have read all the labels, so for the
386
+	// special case of a summary/histogram, we can finally find out
387
+	// if the metric already exists.
388
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
389
+		signature := model.LabelsToSignature(p.currentLabels)
390
+		if summary := p.summaries[signature]; summary != nil {
391
+			p.currentMetric = summary
392
+		} else {
393
+			p.summaries[signature] = p.currentMetric
394
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
395
+		}
396
+	} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
397
+		signature := model.LabelsToSignature(p.currentLabels)
398
+		if histogram := p.histograms[signature]; histogram != nil {
399
+			p.currentMetric = histogram
400
+		} else {
401
+			p.histograms[signature] = p.currentMetric
402
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
403
+		}
404
+	} else {
405
+		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
406
+	}
407
+	if p.readTokenUntilWhitespace(); p.err != nil {
408
+		return nil // Unexpected end of input.
409
+	}
410
+	value, err := parseFloat(p.currentToken.String())
411
+	if err != nil {
412
+		// Create a more helpful error message.
413
+		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
414
+		return nil
415
+	}
416
+	switch p.currentMF.GetType() {
417
+	case dto.MetricType_COUNTER:
418
+		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
419
+	case dto.MetricType_GAUGE:
420
+		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
421
+	case dto.MetricType_UNTYPED:
422
+		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
423
+	case dto.MetricType_SUMMARY:
424
+		// *sigh*
425
+		if p.currentMetric.Summary == nil {
426
+			p.currentMetric.Summary = &dto.Summary{}
427
+		}
428
+		switch {
429
+		case p.currentIsSummaryCount:
430
+			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
431
+		case p.currentIsSummarySum:
432
+			p.currentMetric.Summary.SampleSum = proto.Float64(value)
433
+		case !math.IsNaN(p.currentQuantile):
434
+			p.currentMetric.Summary.Quantile = append(
435
+				p.currentMetric.Summary.Quantile,
436
+				&dto.Quantile{
437
+					Quantile: proto.Float64(p.currentQuantile),
438
+					Value:    proto.Float64(value),
439
+				},
440
+			)
441
+		}
442
+	case dto.MetricType_HISTOGRAM:
443
+		// *sigh*
444
+		if p.currentMetric.Histogram == nil {
445
+			p.currentMetric.Histogram = &dto.Histogram{}
446
+		}
447
+		switch {
448
+		case p.currentIsHistogramCount:
449
+			p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
450
+		case p.currentIsHistogramSum:
451
+			p.currentMetric.Histogram.SampleSum = proto.Float64(value)
452
+		case !math.IsNaN(p.currentBucket):
453
+			p.currentMetric.Histogram.Bucket = append(
454
+				p.currentMetric.Histogram.Bucket,
455
+				&dto.Bucket{
456
+					UpperBound:      proto.Float64(p.currentBucket),
457
+					CumulativeCount: proto.Uint64(uint64(value)),
458
+				},
459
+			)
460
+		}
461
+	default:
462
+		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
463
+	}
464
+	if p.currentByte == '\n' {
465
+		return p.startOfLine
466
+	}
467
+	return p.startTimestamp
468
+}
469
+
470
+// startTimestamp represents the state where the next byte read from p.buf is
471
+// the start of the timestamp (or whitespace leading up to it).
472
+func (p *TextParser) startTimestamp() stateFn {
473
+	if p.skipBlankTab(); p.err != nil {
474
+		return nil // Unexpected end of input.
475
+	}
476
+	if p.readTokenUntilWhitespace(); p.err != nil {
477
+		return nil // Unexpected end of input.
478
+	}
479
+	timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
480
+	if err != nil {
481
+		// Create a more helpful error message.
482
+		p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
483
+		return nil
484
+	}
485
+	p.currentMetric.TimestampMs = proto.Int64(timestamp)
486
+	if p.readTokenUntilNewline(false); p.err != nil {
487
+		return nil // Unexpected end of input.
488
+	}
489
+	if p.currentToken.Len() > 0 {
490
+		p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
491
+		return nil
492
+	}
493
+	return p.startOfLine
494
+}
495
+
496
+// readingHelp represents the state where the last byte read (now in
497
+// p.currentByte) is the first byte of the docstring after 'HELP'.
498
+func (p *TextParser) readingHelp() stateFn {
499
+	if p.currentMF.Help != nil {
500
+		p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
501
+		return nil
502
+	}
503
+	// Rest of line is the docstring.
504
+	if p.readTokenUntilNewline(true); p.err != nil {
505
+		return nil // Unexpected end of input.
506
+	}
507
+	p.currentMF.Help = proto.String(p.currentToken.String())
508
+	return p.startOfLine
509
+}
510
+
511
+// readingType represents the state where the last byte read (now in
512
+// p.currentByte) is the first byte of the type hint after 'HELP'.
513
+func (p *TextParser) readingType() stateFn {
514
+	if p.currentMF.Type != nil {
515
+		p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
516
+		return nil
517
+	}
518
+	// Rest of line is the type.
519
+	if p.readTokenUntilNewline(false); p.err != nil {
520
+		return nil // Unexpected end of input.
521
+	}
522
+	metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
523
+	if !ok {
524
+		p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
525
+		return nil
526
+	}
527
+	p.currentMF.Type = dto.MetricType(metricType).Enum()
528
+	return p.startOfLine
529
+}
530
+
531
+// parseError sets p.err to a ParseError at the current line with the given
532
+// message.
533
+func (p *TextParser) parseError(msg string) {
534
+	p.err = ParseError{
535
+		Line: p.lineCount,
536
+		Msg:  msg,
537
+	}
538
+}
539
+
540
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
541
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
542
+func (p *TextParser) skipBlankTab() {
543
+	for {
544
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
545
+			return
546
+		}
547
+	}
548
+}
549
+
550
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
551
+// anything if p.currentByte is neither ' ' nor '\t'.
552
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
553
+	if isBlankOrTab(p.currentByte) {
554
+		p.skipBlankTab()
555
+	}
556
+}
557
+
558
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The
559
+// first byte considered is the byte already read (now in p.currentByte).  The
560
+// first whitespace byte encountered is still copied into p.currentByte, but not
561
+// into p.currentToken.
562
+func (p *TextParser) readTokenUntilWhitespace() {
563
+	p.currentToken.Reset()
564
+	for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
565
+		p.currentToken.WriteByte(p.currentByte)
566
+		p.currentByte, p.err = p.buf.ReadByte()
567
+	}
568
+}
569
+
570
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first
571
+// byte considered is the byte already read (now in p.currentByte).  The first
572
+// newline byte encountered is still copied into p.currentByte, but not into
573
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
574
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
575
+// All other escape sequences are invalid and cause an error.
576
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
577
+	p.currentToken.Reset()
578
+	escaped := false
579
+	for p.err == nil {
580
+		if recognizeEscapeSequence && escaped {
581
+			switch p.currentByte {
582
+			case '\\':
583
+				p.currentToken.WriteByte(p.currentByte)
584
+			case 'n':
585
+				p.currentToken.WriteByte('\n')
586
+			default:
587
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
588
+				return
589
+			}
590
+			escaped = false
591
+		} else {
592
+			switch p.currentByte {
593
+			case '\n':
594
+				return
595
+			case '\\':
596
+				escaped = true
597
+			default:
598
+				p.currentToken.WriteByte(p.currentByte)
599
+			}
600
+		}
601
+		p.currentByte, p.err = p.buf.ReadByte()
602
+	}
603
+}
604
+
605
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
606
+// The first byte considered is the byte already read (now in p.currentByte).
607
+// The first byte not part of a metric name is still copied into p.currentByte,
608
+// but not into p.currentToken.
609
+func (p *TextParser) readTokenAsMetricName() {
610
+	p.currentToken.Reset()
611
+	if !isValidMetricNameStart(p.currentByte) {
612
+		return
613
+	}
614
+	for {
615
+		p.currentToken.WriteByte(p.currentByte)
616
+		p.currentByte, p.err = p.buf.ReadByte()
617
+		if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
618
+			return
619
+		}
620
+	}
621
+}
622
+
623
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
624
+// The first byte considered is the byte already read (now in p.currentByte).
625
+// The first byte not part of a label name is still copied into p.currentByte,
626
+// but not into p.currentToken.
627
+func (p *TextParser) readTokenAsLabelName() {
628
+	p.currentToken.Reset()
629
+	if !isValidLabelNameStart(p.currentByte) {
630
+		return
631
+	}
632
+	for {
633
+		p.currentToken.WriteByte(p.currentByte)
634
+		p.currentByte, p.err = p.buf.ReadByte()
635
+		if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
636
+			return
637
+		}
638
+	}
639
+}
640
+
641
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
642
+// In contrast to the other 'readTokenAs...' functions, which start with the
643
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
644
+// with reading a new byte from p.buf. The first byte not part of a label value
645
+// is still copied into p.currentByte, but not into p.currentToken.
646
+func (p *TextParser) readTokenAsLabelValue() {
647
+	p.currentToken.Reset()
648
+	escaped := false
649
+	for {
650
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
651
+			return
652
+		}
653
+		if escaped {
654
+			switch p.currentByte {
655
+			case '"', '\\':
656
+				p.currentToken.WriteByte(p.currentByte)
657
+			case 'n':
658
+				p.currentToken.WriteByte('\n')
659
+			default:
660
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
661
+				return
662
+			}
663
+			escaped = false
664
+			continue
665
+		}
666
+		switch p.currentByte {
667
+		case '"':
668
+			return
669
+		case '\n':
670
+			p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
671
+			return
672
+		case '\\':
673
+			escaped = true
674
+		default:
675
+			p.currentToken.WriteByte(p.currentByte)
676
+		}
677
+	}
678
+}
679
+
680
+func (p *TextParser) setOrCreateCurrentMF() {
681
+	p.currentIsSummaryCount = false
682
+	p.currentIsSummarySum = false
683
+	p.currentIsHistogramCount = false
684
+	p.currentIsHistogramSum = false
685
+	name := p.currentToken.String()
686
+	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
687
+		return
688
+	}
689
+	// Try out if this is a _sum or _count for a summary/histogram.
690
+	summaryName := summaryMetricName(name)
691
+	if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
692
+		if p.currentMF.GetType() == dto.MetricType_SUMMARY {
693
+			if isCount(name) {
694
+				p.currentIsSummaryCount = true
695
+			}
696
+			if isSum(name) {
697
+				p.currentIsSummarySum = true
698
+			}
699
+			return
700
+		}
701
+	}
702
+	histogramName := histogramMetricName(name)
703
+	if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
704
+		if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
705
+			if isCount(name) {
706
+				p.currentIsHistogramCount = true
707
+			}
708
+			if isSum(name) {
709
+				p.currentIsHistogramSum = true
710
+			}
711
+			return
712
+		}
713
+	}
714
+	p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
715
+	p.metricFamiliesByName[name] = p.currentMF
716
+}
717
+
718
+func isValidLabelNameStart(b byte) bool {
719
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
720
+}
721
+
722
+func isValidLabelNameContinuation(b byte) bool {
723
+	return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
724
+}
725
+
726
+func isValidMetricNameStart(b byte) bool {
727
+	return isValidLabelNameStart(b) || b == ':'
728
+}
729
+
730
+func isValidMetricNameContinuation(b byte) bool {
731
+	return isValidLabelNameContinuation(b) || b == ':'
732
+}
733
+
734
+func isBlankOrTab(b byte) bool {
735
+	return b == ' ' || b == '\t'
736
+}
737
+
738
+func isCount(name string) bool {
739
+	return len(name) > 6 && name[len(name)-6:] == "_count"
740
+}
741
+
742
+func isSum(name string) bool {
743
+	return len(name) > 4 && name[len(name)-4:] == "_sum"
744
+}
745
+
746
+func isBucket(name string) bool {
747
+	return len(name) > 7 && name[len(name)-7:] == "_bucket"
748
+}
749
+
750
+func summaryMetricName(name string) string {
751
+	switch {
752
+	case isCount(name):
753
+		return name[:len(name)-6]
754
+	case isSum(name):
755
+		return name[:len(name)-4]
756
+	default:
757
+		return name
758
+	}
759
+}
760
+
761
+func histogramMetricName(name string) string {
762
+	switch {
763
+	case isCount(name):
764
+		return name[:len(name)-6]
765
+	case isSum(name):
766
+		return name[:len(name)-4]
767
+	case isBucket(name):
768
+		return name[:len(name)-7]
769
+	default:
770
+		return name
771
+	}
772
+}
773
+
774
+func parseFloat(s string) (float64, error) {
775
+	if strings.ContainsAny(s, "pP_") {
776
+		return 0, fmt.Errorf("unsupported character in float")
777
+	}
778
+	return strconv.ParseFloat(s, 64)
779
+}

+ 67
- 0
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt View File

@@ -0,0 +1,67 @@
1
+PACKAGE
2
+
3
+package goautoneg
4
+import "bitbucket.org/ww/goautoneg"
5
+
6
+HTTP Content-Type Autonegotiation.
7
+
8
+The functions in this package implement the behaviour specified in
9
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
10
+
11
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
12
+All rights reserved.
13
+
14
+Redistribution and use in source and binary forms, with or without
15
+modification, are permitted provided that the following conditions are
16
+met:
17
+
18
+    Redistributions of source code must retain the above copyright
19
+    notice, this list of conditions and the following disclaimer.
20
+
21
+    Redistributions in binary form must reproduce the above copyright
22
+    notice, this list of conditions and the following disclaimer in
23
+    the documentation and/or other materials provided with the
24
+    distribution.
25
+
26
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
27
+    names of its contributors may be used to endorse or promote
28
+    products derived from this software without specific prior written
29
+    permission.
30
+
31
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42
+
43
+
44
+FUNCTIONS
45
+
46
+func Negotiate(header string, alternatives []string) (content_type string)
47
+Negotiate the most appropriate content_type given the accept header
48
+and a list of alternatives.
49
+
50
+func ParseAccept(header string) (accept []Accept)
51
+Parse an Accept Header string returning a sorted list
52
+of clauses
53
+
54
+
55
+TYPES
56
+
57
+type Accept struct {
58
+    Type, SubType string
59
+    Q             float32
60
+    Params        map[string]string
61
+}
62
+Structure to represent a clause in an HTTP Accept Header
63
+
64
+
65
+SUBDIRECTORIES
66
+
67
+	.hg

+ 0
- 0
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go View File


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save