diff --git a/go.mod b/go.mod index 6b56b7c09..ea262d229 100644 --- a/go.mod +++ b/go.mod @@ -20,9 +20,9 @@ require ( go.opentelemetry.io/otel/sdk v1.21.0 golang.org/x/oauth2 v0.15.0 golang.org/x/sys v0.15.0 - google.golang.org/api v0.151.0 - google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b - google.golang.org/grpc v1.59.0 + google.golang.org/api v0.154.0 + google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f + google.golang.org/grpc v1.60.0 google.golang.org/protobuf v1.31.0 gopkg.in/gcfg.v1 v1.2.3 k8s.io/api v0.24.1 @@ -48,6 +48,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -87,6 +88,7 @@ require ( github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect @@ -100,8 +102,8 @@ require ( golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.16.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/go.sum b/go.sum index db1e547f4..3154ca611 100644 --- a/go.sum +++ b/go.sum @@ -47,7 +47,7 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= -cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= @@ -1009,6 +1009,8 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/fgprof v0.9.1/go.mod h1:7/HK6JFtFaARhIljgP2IV8rJLIoHDoOYoUphsnGvqxE= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -2191,6 +2193,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.2 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= @@ -2885,8 +2889,8 @@ google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/ google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= -google.golang.org/api v0.151.0 h1:FhfXLO/NFdJIzQtCqjpysWwqKk8AzGWBUhMIx67cVDU= -google.golang.org/api v0.151.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg= +google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= +google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -3060,16 +3064,16 @@ google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230526161137-0005af68ea54/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= -google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f h1:Vn+VyHU5guc9KjB5KrjI2q0wCOWEOIh0OEsleqakHJg= +google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f/go.mod h1:nWSwAFPb+qfNJXsoeO3Io7zf4tMSfN8EA8RlDA04GhY= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= -google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f h1:2yNACc1O40tTnrsbk9Cv6oxiW8pxI/pXj0wRtdlYmgY= +google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f/go.mod h1:Uy9bTZJqmfrw2rIBxgGLnamc78euZULUBrLZ9XTITKI= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -3122,8 +3126,8 @@ google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCD google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 000000000..e69de29bb diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 000000000..e028b46a9 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 000000000..4e12afdd9 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -race -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 000000000..cf6b42f3d --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![Go Reference](https://pkg.go.dev/badge/github.com/felixge/httpsnoop.svg)](https://pkg.go.dev/github.com/felixge/httpsnoop) +[![Build Status](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml/badge.svg)](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 000000000..bec7b71b3 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,86 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + m := Metrics{Code: http.StatusOK} + m.CaptureMetrics(w, fn) + return m +} + +// CaptureMetrics wraps w and calls fn with the wrapped w and updates +// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn, +// but allows one to customize starting Metrics object. +func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) { + var ( + start = time.Now() + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !(code >= 100 && code <= 199) && !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration += time.Since(start) +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 000000000..203c35b3c --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 000000000..101cedde6 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 000000000..e0951df15 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT. + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 000000000..92b8cf73c --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one. +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, targetURL string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", targetURL, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, targetURL, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", targetURL, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, targetURL string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, targetURL, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 000000000..9509014e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics. +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, milliseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(ScopeName, trace.WithInstrumentationVersion(Version())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 000000000..a1b5b5e5a --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// ScopeName is the instrumentation scope name. +const ScopeName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + ServerName string + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + PublicEndpoint bool + PublicEndpointFn func(*http.Request) bool + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: otel.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + ScopeName, + metric.WithInstrumentationVersion(Version()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.PublicEndpoint = true + }) +} + +// WithPublicEndpointFn runs with every request, and allows conditionnally +// configuring the Handler to link the span with an incoming span context. If +// this option is not provided or returns false, then the association is a +// child association instead of a link. +// Note: WithPublicEndpoint takes precedence over WithPublicEndpointFn. +func WithPublicEndpointFn(fn func(*http.Request) bool) Option { + return optionFunc(func(c *config) { + c.PublicEndpointFn = fn + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents. +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// - ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// - WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name. +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} + +// WithServerName returns an Option that sets the name of the (virtual) server +// handling requests. +func WithServerName(server string) Option { + return optionFunc(func(c *config) { + c.ServerName = server + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 000000000..38c7f01c7 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 000000000..9a8260059 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,287 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + "go.opentelemetry.io/otel/trace" +) + +// middleware is an http middleware which wraps the next handler in a span. +type middleware struct { + operation string + server string + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + counters map[string]metric.Int64Counter + valueRecorders map[string]metric.Float64Histogram + publicEndpoint bool + publicEndpointFn func(*http.Request) bool +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler in a span named after the operation and +// enriches it with metrics. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + return NewMiddleware(operation, opts...)(handler) +} + +// NewMiddleware returns a tracing and metrics instrumentation middleware. +// The handler returned by the middleware wraps a handler +// in a span named after the operation and enriches it with metrics. +func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { + h := middleware{ + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + h.serveHTTP(w, r, next) + }) + } +} + +func (h *middleware) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter + h.publicEndpoint = c.PublicEndpoint + h.publicEndpointFn = c.PublicEndpointFn + h.server = c.ServerName +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *middleware) createMeasures() { + h.counters = make(map[string]metric.Int64Counter) + h.valueRecorders = make(map[string]metric.Float64Histogram) + + requestBytesCounter, err := h.meter.Int64Counter( + RequestContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), + ) + handleErr(err) + + responseBytesCounter, err := h.meter.Int64Counter( + ResponseContentLength, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), + ) + handleErr(err) + + serverLatencyMeasure, err := h.meter.Float64Histogram( + ServerLatency, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of HTTP request handling"), + ) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter + h.counters[ResponseContentLength] = responseBytesCounter + h.valueRecorders[ServerLatency] = serverLatencyMeasure +} + +// serveHTTP sets up tracing and calls the given next http.Handler with the span +// context injected into the request context. +func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + next.ServeHTTP(w, r) + return + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + opts := []trace.SpanStartOption{ + trace.WithAttributes(semconvutil.HTTPServerRequest(h.server, r)...), + } + if h.server != "" { + hostAttr := semconv.NetHostName(h.server) + opts = append(opts, trace.WithAttributes(hostAttr)) + } + opts = append(opts, h.spanStartOptions...) + if h.publicEndpoint || (h.publicEndpointFn != nil && h.publicEndpointFn(r.WithContext(ctx))) { + opts = append(opts, trace.WithNewRoot()) + // Linking incoming span context if any for public endpoint. + if s := trace.SpanContextFromContext(ctx); s.IsValid() && s.IsRemote() { + opts = append(opts, trace.WithLinks(trace.Link{SpanContext: s})) + } + } + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil or NoBody, we don't want to mutate the body as it + // will affect the identity of it in an unforeseeable way because we assert + // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + if r.Body != nil && r.Body != http.NoBody { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ + ResponseWriter: w, + record: writeRecordFunc, + ctx: ctx, + props: h.propagators, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + next.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) + if rww.statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) + } + o := metric.WithAttributes(attributes...) + h.counters[RequestContentLength].Add(ctx, bw.read, o) + h.counters[ResponseContentLength].Add(ctx, rww.written, o) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + span.SetStatus(semconvutil.HTTPServerStatus(statusCode)) + + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates spans and metrics with the provided route name +// with HTTP route attribute. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + attr := semconv.HTTPRouteKey.String(route) + + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(attr) + + labeler, _ := LabelerFromContext(r.Context()) + labeler.Add(attr) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go new file mode 100644 index 000000000..edf4ce3d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/gen.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +// Generate semconvutil package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv_test.go.tmpl "--data={}" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/httpconv.go.tmpl "--data={}" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv_test.go.tmpl "--data={}" --out=netconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconvutil/netconv.go.tmpl "--data={}" --out=netconv.go diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go new file mode 100644 index 000000000..d3dede9eb --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -0,0 +1,552 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/httpconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// HTTPClientResponse returns trace attributes for an HTTP response received by a +// client from a server. It will return the following attributes if the related +// values are defined in resp: "http.status.code", +// "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(HTTPClientResponse(resp), ClientRequest(resp.Request)...) +func HTTPClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// HTTPClientRequest returns trace attributes for an HTTP request made by a client. +// The following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func HTTPClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// HTTPClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func HTTPClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// HTTPServerRequest returns trace attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + +// HTTPServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func HTTPServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// HTTPRequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPRequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// HTTPResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func HTTPResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} + +// httpConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type httpConv struct { + NetConv *netConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +var hc = &httpConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.flavor(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + return attrs +} + +func (c *httpConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + +func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *httpConv) flavor(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *httpConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *httpConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *httpConv) ClientStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *httpConv) ServerStatus(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 500 { + return codes.Error, "" + } + return codes.Unset, "" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go new file mode 100644 index 000000000..bde889343 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -0,0 +1,368 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconvutil/netconv.go.tmpl + +// Copyright The OpenTelemetry Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconvutil // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +// NetTransport returns a trace attribute describing the transport protocol of the +// passed network. See the net.Dial for information about acceptable network +// values. +func NetTransport(network string) attribute.KeyValue { + return nc.Transport(network) +} + +// NetClient returns trace attributes for a client network connection to address. +// See net.Dial for information about acceptable address values, address should +// be the same as the one used to create conn. If conn is nil, only network +// peer attributes will be returned that describe address. Otherwise, the +// socket level information about conn will also be included. +func NetClient(address string, conn net.Conn) []attribute.KeyValue { + return nc.Client(address, conn) +} + +// NetServer returns trace attributes for a network listener listening at address. +// See net.Listen for information about acceptable address values, address +// should be the same as the one used to create ln. If ln is nil, only network +// host attributes will be returned that describe address. Otherwise, the +// socket level information about ln will also be included. +func NetServer(address string, ln net.Listener) []attribute.KeyValue { + return nc.Server(address, ln) +} + +// netConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type netConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +var nc = &netConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockFamilyKey: semconv.NetSockFamilyKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetSockHostAddrKey: semconv.NetSockHostAddrKey, + NetSockHostPortKey: semconv.NetSockHostPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, +} + +func (c *netConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *netConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *netConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *netConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *netConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *netConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *netConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *netConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *netConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *netConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *netConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 000000000..26a51a180 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 000000000..e835cac12 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,193 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper. +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. + span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconvutil.HTTPClientResponse(res)...) + span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + if wb.body != nil { + return wb.body.Close() + } + return nil +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 000000000..bd41c1804 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.46.1" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +// +// Deprecated: Use [Version] instead. +func SemVersion() string { + return Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 000000000..11a35ed16 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json index d10eb13e8..d5eff67a6 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-api.json @@ -9192,7 +9192,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -11098,6 +11098,55 @@ "https://www.googleapis.com/auth/compute" ] }, + "addNetworkInterface": { + "description": "Adds a network interface to an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + "httpMethod": "POST", + "id": "compute.instances.addNetworkInterface", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + "request": { + "$ref": "InstancesAddNetworkInterfaceRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "addResourcePolicies": { "description": "Adds existing resource policies to an instance. You can only add one policy right now which will be applied to this instance for scheduling live migrations.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies", @@ -13379,7 +13428,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -13436,7 +13485,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -26215,7 +26264,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -40966,7 +41015,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", @@ -43552,7 +43601,7 @@ } } }, - "revision": "20231031", + "revision": "20231128", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -44807,6 +44856,10 @@ "description": "Whether to enable UEFI networking for instance creation.", "type": "boolean" }, + "enableWatchdogTimer": { + "description": "Whether to enable the watchdog timer.", + "type": "boolean" + }, "numaNodeCount": { "description": "The number of vNUMA nodes.", "format": "int32", @@ -46752,7 +46805,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -46787,7 +46840,7 @@ }, "connectionTrackingPolicy": { "$ref": "BackendServiceConnectionTrackingPolicy", - "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing." + "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers." }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", @@ -46820,18 +46873,22 @@ "type": "string" }, "enableCDN": { - "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.", + "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.", "type": "boolean" }, "failoverPolicy": { "$ref": "BackendServiceFailoverPolicy", - "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." + "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.", "format": "byte", "type": "string" }, + "haPolicy": { + "$ref": "BackendServiceHAPolicy", + "description": "Configuring haPolicy is not supported." + }, "healthChecks": { "description": "The list of URLs to the healthChecks, httpHealthChecks (legacy), or httpsHealthChecks (legacy) resource for health checking this backend service. Not all backend services support legacy health checks. See Load balancer guide. Currently, at most one health check can be specified for each backend service. Backend services with instance group or zonal NEG backends must have a health check. Backend services with internet or serverless NEG backends must not have a health check.", "items": { @@ -46841,7 +46898,7 @@ }, "iap": { "$ref": "BackendServiceIAP", - "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -46849,7 +46906,7 @@ "type": "string" }, "ipAddressSelectionPolicy": { - "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", "enum": [ "IPV4_ONLY", "IPV6_ONLY", @@ -46880,10 +46937,10 @@ "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ - "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", - "Signifies that this will be used for External Managed HTTP(S) Load Balancing.", - "Signifies that this will be used for Internal TCP/UDP Load Balancing.", - "Signifies that this will be used for Internal HTTP(S) Load Balancing.", + "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.", + "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.", + "Signifies that this will be used for internal passthrough Network Load Balancers.", + "Signifies that this will be used for internal Application Load Balancers.", "Signifies that this will be used by Traffic Director.", "" ], @@ -46950,12 +47007,12 @@ }, "port": { "deprecated": true, - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.", "format": "int32", "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.", "type": "string" }, "protocol": { @@ -47356,11 +47413,11 @@ "type": "string" }, "enableStrongAffinity": { - "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.", "type": "boolean" }, "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.", "format": "int32", "type": "integer" }, @@ -47382,7 +47439,7 @@ "type": "object" }, "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", "properties": { "disableConnectionDrainOnFailover": { @@ -47390,7 +47447,7 @@ "type": "boolean" }, "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", "type": "boolean" }, "failoverRatio": { @@ -47426,6 +47483,52 @@ }, "type": "object" }, + "BackendServiceHAPolicy": { + "id": "BackendServiceHAPolicy", + "properties": { + "fastIPMove": { + "description": "Enabling fastIPMove is not supported.", + "enum": [ + "DISABLED", + "GARP_RA" + ], + "enumDescriptions": [ + "", + "" + ], + "type": "string" + }, + "leader": { + "$ref": "BackendServiceHAPolicyLeader", + "description": "Setting a leader is not supported." + } + }, + "type": "object" + }, + "BackendServiceHAPolicyLeader": { + "id": "BackendServiceHAPolicyLeader", + "properties": { + "backendGroup": { + "description": "Setting backendGroup is not supported.", + "type": "string" + }, + "networkEndpoint": { + "$ref": "BackendServiceHAPolicyLeaderNetworkEndpoint", + "description": "Setting a network endpoint as leader is not supported." + } + }, + "type": "object" + }, + "BackendServiceHAPolicyLeaderNetworkEndpoint": { + "id": "BackendServiceHAPolicyLeaderNetworkEndpoint", + "properties": { + "instance": { + "description": "Specifying the instance name of a leader is not supported.", + "type": "string" + } + }, + "type": "object" + }, "BackendServiceIAP": { "description": "Identity-Aware Proxy", "id": "BackendServiceIAP", @@ -49927,6 +50030,11 @@ "$ref": "DiskResourceStatus", "description": "[Output Only] Status information for the disk resource." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -52682,7 +52790,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/alpha/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/alpha/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/alpha/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/alpha/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -52718,7 +52826,7 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", "type": "boolean" }, "allowPscGlobalAccess": { @@ -52730,11 +52838,11 @@ "type": "boolean" }, "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.", "type": "string" }, "baseForwardingRule": { - "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.", "type": "string" }, "creationTimestamp": { @@ -52779,7 +52887,7 @@ }, "kind": { "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.", "type": "string" }, "labelFingerprint": { @@ -52827,7 +52935,7 @@ "type": "string" }, "network": { - "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -52864,7 +52972,7 @@ "type": "array" }, "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "description": "[Output Only] The PSC connection id of the PSC forwarding rule.", "format": "uint64", "type": "string" }, @@ -52907,23 +53015,23 @@ "type": "array" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.", "type": "string" }, "sourceIpRanges": { - "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", "items": { "type": "string" }, "type": "array" }, "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { @@ -53254,7 +53362,7 @@ "type": "object" }, "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.", "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { "namespace": { @@ -53266,7 +53374,7 @@ "type": "string" }, "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.", "type": "string" } }, @@ -53410,12 +53518,12 @@ "id": "FutureReservation", "properties": { "autoCreatedReservationsDeleteTime": { - "description": "Future timestamp when the FR auto-created reservations will be deleted by GCE. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", + "description": "Future timestamp when the FR auto-created reservations will be deleted by Compute Engine. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", "type": "string" }, "autoCreatedReservationsDuration": { "$ref": "Duration", - "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by GCE. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." + "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by Compute Engine. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." }, "autoDeleteAutoCreatedReservations": { "description": "Setting for enabling or disabling automatic deletion for auto-created reservation. If set to true, auto-created reservations will be deleted at Future Reservation's end time (default) or at user's defined timestamp if any of the [auto_created_reservations_delete_time, auto_created_reservations_duration] values is specified. For keeping auto-created reservation indefinitely, this value should be set to false.", @@ -54174,7 +54282,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54445,7 +54553,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54512,7 +54620,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54579,7 +54687,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -54630,7 +54738,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) * [Global](/compute/docs/reference/rest/alpha/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/alpha/regionHealthChecks) * [Global](/compute/docs/reference/rest/alpha/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -56913,6 +57021,11 @@ "$ref": "RolloutPolicy", "description": "A rollout policy to apply to this image. When specified, the rollout policy overrides per-zone references to the image via the associated image family. The rollout policy restricts the zones where this image is accessible when using a zonal image family reference. When the rollout policy does not include the user specified zone, or if the zone is rolled out, this image is accessible. The rollout policy for this image is read-only, except for allowlisted users. This field might not be configured. To view the latest non-deprecated image in a specific zone, use the imageFamilyViews.get method." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -57408,6 +57521,10 @@ "$ref": "ResourceStatus", "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." }, + "satisfiesPzi": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -58213,6 +58330,10 @@ }, "type": "array" }, + "params": { + "$ref": "InstanceGroupManagerParams", + "description": "Input only. Additional params passed with the request, but not persisted as part of resource payload." + }, "region": { "description": "[Output Only] The URL of the region where the managed instance group resides (for regional resources).", "type": "string" @@ -58829,6 +58950,20 @@ }, "type": "object" }, + "InstanceGroupManagerParams": { + "description": "Input only additional params for instance group manager creation.", + "id": "InstanceGroupManagerParams", + "properties": { + "resourceManagerTags": { + "additionalProperties": { + "type": "string" + }, + "description": "Resource manager tags to be bound to the instance group manager. Tag keys and values have the same definition as resource manager tags. Keys must be in the format `tagKeys/123`, and values are in the format `tagValues/456`. The field is allowed for INSERT only.", + "type": "object" + } + }, + "type": "object" + }, "InstanceGroupManagerResizeRequest": { "description": "InstanceGroupManagerResizeRequest represents a request to create a number of VMs: either immediately or by queuing the request for the specified time. This resize request is nested under InstanceGroupManager and the VMs created by this request are added to the owning InstanceGroupManager.", "id": "InstanceGroupManagerResizeRequest", @@ -59138,14 +59273,14 @@ "type": "integer" }, "mode": { - "description": "Defines behaviour of using instances from standby pool to resize MIG.", + "description": "Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is `MANUAL`.", "enum": [ "MANUAL", "SCALE_OUT_POOL" ], "enumDescriptions": [ - "MIG does not automatically stop/start or suspend/resume VMs.", - "MIG automatically resumes and starts VMs when it scales out, and replenishes the standby pool afterwards." + "MIG does not automatically resume or start VMs in the standby pool when the group scales out.", + "MIG automatically resumes or starts VMs in the standby pool when the group scales out, and replenishes the standby pool afterwards." ], "type": "string" } @@ -61496,6 +61631,16 @@ }, "type": "object" }, + "InstancesAddNetworkInterfaceRequest": { + "id": "InstancesAddNetworkInterfaceRequest", + "properties": { + "network_interface": { + "$ref": "NetworkInterface", + "description": "The new network interface to add." + } + }, + "type": "object" + }, "InstancesAddResourcePoliciesRequest": { "id": "InstancesAddResourcePoliciesRequest", "properties": { @@ -61959,6 +62104,11 @@ "$ref": "InstantSnapshotResourceStatus", "description": "[Output Only] Status information for the instant snapshot resource." }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -65706,6 +65856,11 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -81536,6 +81691,10 @@ "bfdStatus": { "$ref": "BfdStatus" }, + "enableIpv4": { + "description": "Enable IPv4 traffic over BGP Peer. It is enabled by default if the peerIpAddress is version 4.", + "type": "boolean" + }, "enableIpv6": { "description": "Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.", "type": "boolean" @@ -81544,6 +81703,10 @@ "description": "IP address of the local BGP interface.", "type": "string" }, + "ipv4NexthopAddress": { + "description": "IPv4 address of the local BGP interface.", + "type": "string" + }, "ipv6NexthopAddress": { "description": "IPv6 address of the local BGP interface.", "type": "string" @@ -81569,6 +81732,10 @@ "description": "IP address of the remote BGP interface.", "type": "string" }, + "peerIpv4NexthopAddress": { + "description": "IPv4 address of the remote BGP interface.", + "type": "string" + }, "peerIpv6NexthopAddress": { "description": "IPv6 address of the remote BGP interface.", "type": "string" @@ -81598,11 +81765,17 @@ "statusReason": { "description": "Indicates why particular status was returned.", "enum": [ + "IPV4_PEER_ON_IPV6_ONLY_CONNECTION", + "IPV6_PEER_ON_IPV4_ONLY_CONNECTION", "MD5_AUTH_INTERNAL_PROBLEM", + "MISSING_NETWORK_CONNECTIVITY_CENTER_SPOKE", "STATUS_REASON_UNSPECIFIED" ], "enumDescriptions": [ + "BGP peer disabled because it requires IPv4 but the underlying connection is IPv6-only.", + "BGP peer disabled because it requires IPv6 but the underlying connection is IPv4-only.", "Indicates internal problems with configuration of MD5 authentication. This particular reason can only be returned when md5AuthEnabled is true and status is DOWN.", + "BGP peer disabled because it is not labeled as an NCC spoke. Currently, BGP peers using SD-WAN connectivity will be disabled for this reason.", "" ], "type": "string" @@ -82289,7 +82462,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -83245,10 +83418,58 @@ "format": "float", "type": "number" }, + "detectionAbsoluteQps": { + "format": "float", + "type": "number" + }, + "detectionLoadThreshold": { + "format": "float", + "type": "number" + }, + "detectionRelativeToBaselineQps": { + "format": "float", + "type": "number" + }, "name": { "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" + }, + "trafficGranularityConfigs": { + "description": "Configuration options for enabling Adaptive Protection to operate on specified granular traffic units.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig": { + "description": "Configurations to specifc granular traffic units processed by Adaptive Protection.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig", + "properties": { + "enableEachUniqueValue": { + "description": "If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if `value` is empty.", + "type": "boolean" + }, + "type": { + "description": "Type of this configuration.", + "enum": [ + "HTTP_HEADER_HOST", + "HTTP_PATH", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "value": { + "description": "Requests that match this value constitute a granular traffic unit.", + "type": "string" } }, "type": "object" @@ -83987,7 +84208,7 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -83997,6 +84218,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -84008,6 +84231,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -84019,6 +84244,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -84061,7 +84288,7 @@ "type": "string" }, "enforceOnKeyType": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -84071,6 +84298,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -84082,6 +84311,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -84093,6 +84324,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -85411,6 +85644,11 @@ "description": "[Output Only] URL of the region where the snapshot resides. Only applicable for regional snapshots.", "type": "string" }, + "satisfiesPzi": { + "description": "Output only. Reserved for future use.", + "readOnly": true, + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -87217,7 +87455,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", + "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -89769,7 +90007,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -91656,7 +91894,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", + "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", "id": "TargetPool", "properties": { "backupPool": { @@ -92323,7 +92561,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetSslProxy", "properties": { "certificateMap": { @@ -92702,7 +92940,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { diff --git a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go index 38a0a57be..90af9285d 100644 --- a/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.alpha/compute-gen.go @@ -3068,6 +3068,9 @@ type AdvancedMachineFeatures struct { // creation. EnableUefiNetworking bool `json:"enableUefiNetworking,omitempty"` + // EnableWatchdogTimer: Whether to enable the watchdog timer. + EnableWatchdogTimer bool `json:"enableWatchdogTimer,omitempty"` + // NumaNodeCount: The number of vNUMA nodes. NumaNodeCount int64 `json:"numaNodeCount,omitempty"` @@ -6100,13 +6103,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // For more information, see Backend Services. type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is - // applicable to external and internal HTTP(S) load balancers and - // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session - // affinity. If set to 0, the cookie is non-persistent and lasts only - // until the end of the browser session (or equivalent). The maximum - // allowed value is two weeks (1,209,600). Not supported when the - // backend service is referenced by a URL map that is bound to target - // gRPC proxy that has validateForProxyless field set to true. + // applicable to Application Load Balancers and Traffic Director and + // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to + // 0, the cookie is non-persistent and lasts only until the end of the + // browser session (or equivalent). The maximum allowed value is two + // weeks (1,209,600). Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -6132,8 +6135,8 @@ type BackendService struct { // ConnectionTrackingPolicy: Connection Tracking configuration for this // BackendService. Connection tracking policy settings are only - // available for Network Load Balancing and Internal TCP/UDP Load - // Balancing. + // available for external passthrough Network Load Balancers and + // internal passthrough Network Load Balancers. ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"` // ConsistentHash: Consistent Hash-based load balancing can be used to @@ -6172,15 +6175,15 @@ type BackendService struct { // security policy associated with this backend service. EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"` - // EnableCDN: If true, enables Cloud CDN for the backend service of an - // external HTTP(S) load balancer. + // EnableCDN: If true, enables Cloud CDN for the backend service of a + // global external Application Load Balancer. EnableCDN bool `json:"enableCDN,omitempty"` // FailoverPolicy: Requires at least one backend instance group to be // defined as a backup (failover) backend. For load balancers that have - // configurable failover: Internal TCP/UDP Load Balancing + // configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` @@ -6193,6 +6196,9 @@ type BackendService struct { // BackendService. Fingerprint string `json:"fingerprint,omitempty"` + // HaPolicy: Configuring haPolicy is not supported. + HaPolicy *BackendServiceHAPolicy `json:"haPolicy,omitempty"` + // HealthChecks: The list of URLs to the healthChecks, httpHealthChecks // (legacy), or httpsHealthChecks (legacy) resource for health checking // this backend service. Not all backend services support legacy health @@ -6204,8 +6210,8 @@ type BackendService struct { HealthChecks []string `json:"healthChecks,omitempty"` // Iap: The configurations for Identity-Aware Proxy on this resource. - // Not available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // Not available for internal passthrough Network Load Balancers and + // external passthrough Network Load Balancers. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -6225,13 +6231,13 @@ type BackendService struct { // backend service (Instance Group, Managed Instance Group, Network // Endpoint Group), regardless of traffic from the client to the proxy. // Only IPv6 health checks are used to check the health of the backends. - // This field is applicable to either: - Advanced Global External HTTPS - // Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional - // External HTTPS Load Balancing, - Internal TCP Proxy (load balancing - // scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing - // (load balancing scheme INTERNAL_MANAGED), - Traffic Director with - // Envoy proxies and proxyless gRPC (load balancing scheme - // INTERNAL_SELF_MANAGED). + // This field is applicable to either: - Advanced global external + // Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - + // Regional external Application Load Balancer, - Internal proxy Network + // Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional + // internal Application Load Balancer (load balancing scheme + // INTERNAL_MANAGED), - Traffic Director with Envoy proxies and + // proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). // // Possible values: // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend @@ -6259,14 +6265,16 @@ type BackendService struct { // another. For more information, refer to Choosing a load balancer. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for external HTTP(S), - // SSL Proxy, TCP Proxy, or Network Load Balancing - // "EXTERNAL_MANAGED" - Signifies that this will be used for External - // Managed HTTP(S) Load Balancing. - // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP - // Load Balancing. - // "INTERNAL_MANAGED" - Signifies that this will be used for Internal - // HTTP(S) Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers, global external proxy Network Load + // Balancers, or external passthrough Network Load Balancers. + // "EXTERNAL_MANAGED" - Signifies that this will be used for global + // external Application Load Balancers, regional external Application + // Load Balancers, or regional external proxy Network Load Balancers. + // "INTERNAL" - Signifies that this will be used for internal + // passthrough Network Load Balancers. + // "INTERNAL_MANAGED" - Signifies that this will be used for internal + // Application Load Balancers. // "INTERNAL_SELF_MANAGED" - Signifies that this will be used by // Traffic Director. // "INVALID_LOAD_BALANCING_SCHEME" @@ -6408,16 +6416,18 @@ type BackendService struct { OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. For Internal TCP/UDP Load Balancing - // and Network Load Balancing, omit port. + // backend. The default value is 80. For internal passthrough Network + // Load Balancers and external passthrough Network Load Balancers, omit + // port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. The named // port must be defined on each backend instance group // (https://cloud.google.com/load-balancing/docs/backend-service#named_ports). - // This parameter has no meaning if the backends are NEGs. For Internal - // TCP/UDP Load Balancing and Network Load Balancing, omit port_name. + // This parameter has no meaning if the backends are NEGs. For internal + // passthrough Network Load Balancers and external passthrough Network + // Load Balancers, omit port_name. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with @@ -7028,18 +7038,19 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` - // EnableStrongAffinity: Enable Strong Session Affinity for Network Load - // Balancing. This option is not available publicly. + // EnableStrongAffinity: Enable Strong Session Affinity for external + // passthrough Network Load Balancers. This option is not available + // publicly. EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` // IdleTimeoutSec: Specifies how long to keep a Connection Tracking - // entry while there is no matching traffic (in seconds). For Internal - // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the - // maximum is 16 hours. - It can be set only if Connection Tracking is - // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, - // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For - // Network Load Balancer the default is 60 seconds. This option is not - // available publicly. + // entry while there is no matching traffic (in seconds). For internal + // passthrough Network Load Balancers: - The minimum (default) is 10 + // minutes and the maximum is 16 hours. - It can be set only if + // Connection Tracking is less than 5-tuple (i.e. Session Affinity is + // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking + // Mode is PER_SESSION). For external passthrough Network Load Balancers + // the default is 60 seconds. This option is not available publicly. IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"` // TrackingMode: Specifies the key used for connection tracking. There @@ -7085,9 +7096,9 @@ func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { } // BackendServiceFailoverPolicy: For load balancers that have -// configurable failover: Internal TCP/UDP Load Balancing +// configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) -// and external TCP/UDP Load Balancing +// and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // On failover or failback, this field indicates whether connection // draining will be honored. Google Cloud has a fixed connection @@ -7106,9 +7117,9 @@ type BackendServiceFailoverPolicy struct { // unhealthy.If set to false, connections are distributed among all // primary VMs when all primary and all backup backend VMs are // unhealthy. For load balancers that have configurable failover: - // Internal TCP/UDP Load Balancing + // Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // The default is false. DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` @@ -7204,6 +7215,98 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type BackendServiceHAPolicy struct { + // FastIPMove: Enabling fastIPMove is not supported. + // + // Possible values: + // "DISABLED" + // "GARP_RA" + FastIPMove string `json:"fastIPMove,omitempty"` + + // Leader: Setting a leader is not supported. + Leader *BackendServiceHAPolicyLeader `json:"leader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "FastIPMove") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "FastIPMove") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceHAPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceHAPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceHAPolicyLeader struct { + // BackendGroup: Setting backendGroup is not supported. + BackendGroup string `json:"backendGroup,omitempty"` + + // NetworkEndpoint: Setting a network endpoint as leader is not + // supported. + NetworkEndpoint *BackendServiceHAPolicyLeaderNetworkEndpoint `json:"networkEndpoint,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BackendGroup") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BackendGroup") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceHAPolicyLeader) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceHAPolicyLeader + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BackendServiceHAPolicyLeaderNetworkEndpoint struct { + // Instance: Specifying the instance name of a leader is not supported. + Instance string `json:"instance,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Instance") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Instance") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BackendServiceHAPolicyLeaderNetworkEndpoint) MarshalJSON() ([]byte, error) { + type NoMethod BackendServiceHAPolicyLeaderNetworkEndpoint + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BackendServiceIAP: Identity-Aware Proxy type BackendServiceIAP struct { // Enabled: Whether the serving infrastructure will authenticate and @@ -10707,6 +10810,9 @@ type Disk struct { // resource. ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -14774,10 +14880,10 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // * Regional // (https://cloud.google.com/compute/docs/reference/rest/alpha/forwardingRules) // A forwarding rule and its corresponding IP address represent the -// frontend configuration of a Google Cloud Platform load balancer. -// Forwarding rules can also reference target instances and Cloud VPN -// Classic gateways (targetVpnGateway). For more information, read -// Forwarding rule concepts and Using protocol forwarding. +// frontend configuration of a Google Cloud load balancer. Forwarding +// rules can also reference target instances and Cloud VPN Classic +// gateways (targetVpnGateway). For more information, read Forwarding +// rule concepts and Using protocol forwarding. type ForwardingRule struct { // IPAddress: IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule @@ -14842,8 +14948,9 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for // internal TargetInstance. If set to true, clients can access the - // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load - // Balancer from all regions. If false, only allows access from the + // internal passthrough Network Load Balancers, the regional internal + // Application Load Balancer, and the regional internal proxy Network + // Load Balancer from all regions. If false, only allows access from the // local region the load balancer is located at. Note that for // INTERNAL_MANAGED forwarding rules, this field cannot be changed after // the forwarding rule is created. @@ -14861,16 +14968,16 @@ type ForwardingRule struct { AllowPscPacketInjection bool `json:"allowPscPacketInjection,omitempty"` // BackendService: Identifies the backend service to which the - // forwarding rule sends traffic. Required for Internal TCP/UDP Load - // Balancing and Network Load Balancing; must be omitted for all other + // forwarding rule sends traffic. Required for internal and external + // passthrough Network Load Balancers; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` // BaseForwardingRule: [Output Only] The URL for the corresponding base - // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // forwarding rule. By base forwarding rule, we mean the forwarding rule // that has the same IP address, protocol, and port settings with the - // current Forwarding Rule, but without sourceIPRanges specified. Always - // empty if the current Forwarding Rule does not have sourceIPRanges + // current forwarding rule, but without sourceIPRanges specified. Always + // empty if the current forwarding rule does not have sourceIPRanges // specified. BaseForwardingRule string `json:"baseForwardingRule,omitempty"` @@ -14923,7 +15030,7 @@ type ForwardingRule struct { IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. + // compute#forwardingRule for forwarding rule resources. Kind string `json:"kind,omitempty"` // LabelFingerprint: A fingerprint for the labels being applied to this @@ -14986,10 +15093,10 @@ type ForwardingRule struct { Name string `json:"name,omitempty"` // Network: This field is not used for global external load balancing. - // For Internal TCP/UDP Load Balancing, this field identifies the - // network that the load balanced IP should belong to for this - // Forwarding Rule. If the subnetwork is specified, the network of the - // subnetwork will be used. If neither subnetwork nor this field is + // For internal passthrough Network Load Balancers, this field + // identifies the network that the load balanced IP should belong to for + // this forwarding rule. If the subnetwork is specified, the network of + // the subnetwork will be used. If neither subnetwork nor this field is // specified, the default network will be used. For Private Service // Connect forwarding rules that forward traffic to Google APIs, a // network must be provided. @@ -15058,7 +15165,7 @@ type ForwardingRule struct { Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC - // Forwarding Rule. + // forwarding rule. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Possible values: @@ -15092,7 +15199,7 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, the prefix is the first label of the + // forwarding rule. If specified, the prefix is the first label of the // fully qualified service name. The label must be 1-63 characters long, // and comply with RFC1035. Specifically, the label must be 1-63 // characters long and match the regular expression @@ -15103,25 +15210,26 @@ type ForwardingRule struct { ServiceLabel string `json:"serviceLabel,omitempty"` // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. This field is only used for internal load + // for this forwarding rule. This field is only used for internal load // balancing. ServiceName string `json:"serviceName,omitempty"` - // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // SourceIpRanges: If not empty, this forwarding rule will only forward // the traffic when the source IP address matches one of the IP - // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // addresses or CIDR ranges set here. Note that a forwarding rule can // only have up to 64 source IP ranges, and this field can only be used - // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // with a regional forwarding rule whose scheme is EXTERNAL. Each // source_ip_range entry should be either an IP address (for example, // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). SourceIpRanges []string `json:"sourceIpRanges,omitempty"` // Subnetwork: This field identifies the subnetwork that the load - // balanced IP should belong to for this Forwarding Rule, used in - // internal load balancing and network load balancing with IPv6. If the - // network specified is in auto subnet mode, this field is optional. - // However, a subnetwork must be specified if the network is in custom - // subnet mode or when creating external forwarding rule with IPv6. + // balanced IP should belong to for this forwarding rule, used with + // internal load balancers and external passthrough Network Load + // Balancers with IPv6. If the network specified is in auto subnet mode, + // this field is optional. However, a subnetwork must be specified if + // the network is in custom subnet mode or when creating external + // forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched @@ -15582,9 +15690,9 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { } // ForwardingRuleServiceDirectoryRegistration: Describes the -// auto-registration of the Forwarding Rule to Service Directory. The +// auto-registration of the forwarding rule to Service Directory. The // region and project of the Service Directory resource generated from -// this registration will be the same as this Forwarding Rule. +// this registration will be the same as this forwarding rule. type ForwardingRuleServiceDirectoryRegistration struct { // Namespace: Service Directory namespace to register the forwarding // rule under. @@ -15596,8 +15704,8 @@ type ForwardingRuleServiceDirectoryRegistration struct { // ServiceDirectoryRegion: [Optional] Service Directory region to // register this global forwarding rule under. Default to "us-central1". - // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding - // Rules on the same network should use the same Service Directory + // Only used for PSC for Google APIs. All PSC for Google APIs forwarding + // rules on the same network should use the same Service Directory // region. ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` @@ -15797,16 +15905,17 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { type FutureReservation struct { // AutoCreatedReservationsDeleteTime: Future timestamp when the FR - // auto-created reservations will be deleted by GCE. Format of this - // field must be a valid + // auto-created reservations will be deleted by Compute Engine. Format + // of this field must be a valid // href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339 value. AutoCreatedReservationsDeleteTime string `json:"autoCreatedReservationsDeleteTime,omitempty"` // AutoCreatedReservationsDuration: Specifies the duration of // auto-created reservations. It represents relative time to future // reservation start_time when auto-created reservations will be - // automatically deleted by GCE. Duration time unit is represented as a - // count of seconds and fractions of seconds at nanosecond resolution. + // automatically deleted by Compute Engine. Duration time unit is + // represented as a count of seconds and fractions of seconds at + // nanosecond resolution. AutoCreatedReservationsDuration *Duration `json:"autoCreatedReservationsDuration,omitempty"` // AutoDeleteAutoCreatedReservations: Setting for enabling or disabling @@ -16805,7 +16914,7 @@ type GRPCHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -16815,7 +16924,7 @@ type GRPCHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -17315,7 +17424,7 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -17325,7 +17434,7 @@ type HTTP2HealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -17425,7 +17534,7 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Also supported in // legacy HTTP health checks for target pools. The health check supports // all backends supported by the backend service provided the backend @@ -17535,7 +17644,7 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -17545,7 +17654,7 @@ type HTTPSHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -17631,20 +17740,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/alpha/regionHealthChecks) * Global // (/compute/docs/reference/rest/alpha/healthChecks) These health check // resources can be used for load balancing and for autohealing VMs in a -// managed instance group (MIG). **Load balancing** The following load -// balancer can use either regional or global health check: * Internal -// TCP/UDP load balancer The following load balancers require regional -// health check: * Internal HTTP(S) load balancer * Backend -// service-based network load balancer Traffic Director and the -// following load balancers require global health check: * External -// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load -// balancer The following load balancer require legacy HTTP health -// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target -// pool-based network load balancer **Autohealing in MIGs** The health -// checks that you use for autohealing VMs in a MIG can be either -// regional or global. For more information, see Set up an application -// health check and autohealing. For more information, see Health checks -// overview. +// managed instance group (MIG). **Load balancing** Health check +// requirements vary depending on the type of load balancer. For details +// about the type of health check supported for each load balancer and +// corresponding backend type, see Health checks overview: Load balancer +// guide. **Autohealing in MIGs** The health checks that you use for +// autohealing VMs in a MIG can be either regional or global. For more +// information, see Set up an application health check and autohealing. +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -20964,6 +21067,9 @@ type Image struct { // imageFamilyViews.get method. RolloutOverride *RolloutPolicy `json:"rolloutOverride,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -21617,6 +21723,9 @@ type Instance struct { // corresponding input only field. ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: [Output Only] Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -22604,6 +22713,10 @@ type InstanceGroupManager struct { // complementary to this Instance Group Manager. NamedPorts []*NamedPort `json:"namedPorts,omitempty"` + // Params: Input only. Additional params passed with the request, but + // not persisted as part of resource payload. + Params *InstanceGroupManagerParams `json:"params,omitempty"` + // Region: [Output Only] The URL of the region where the managed // instance group resides (for regional resources). Region string `json:"region,omitempty"` @@ -23505,6 +23618,40 @@ func (s *InstanceGroupManagerListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// InstanceGroupManagerParams: Input only additional params for instance +// group manager creation. +type InstanceGroupManagerParams struct { + // ResourceManagerTags: Resource manager tags to be bound to the + // instance group manager. Tag keys and values have the same definition + // as resource manager tags. Keys must be in the format `tagKeys/123`, + // and values are in the format `tagValues/456`. The field is allowed + // for INSERT only. + ResourceManagerTags map[string]string `json:"resourceManagerTags,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ResourceManagerTags") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ResourceManagerTags") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceGroupManagerParams) MarshalJSON() ([]byte, error) { + type NoMethod InstanceGroupManagerParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // InstanceGroupManagerResizeRequest: InstanceGroupManagerResizeRequest // represents a request to create a number of VMs: either immediately or // by queuing the request for the specified time. This resize request is @@ -23939,14 +24086,15 @@ func (s *InstanceGroupManagerResizeRequestsListResponseWarningData) MarshalJSON( type InstanceGroupManagerStandbyPolicy struct { InitialDelaySec int64 `json:"initialDelaySec,omitempty"` - // Mode: Defines behaviour of using instances from standby pool to - // resize MIG. + // Mode: Defines how a MIG resumes or starts VMs from a standby pool + // when the group scales out. The default mode is `MANUAL`. // // Possible values: - // "MANUAL" - MIG does not automatically stop/start or suspend/resume - // VMs. - // "SCALE_OUT_POOL" - MIG automatically resumes and starts VMs when it - // scales out, and replenishes the standby pool afterwards. + // "MANUAL" - MIG does not automatically resume or start VMs in the + // standby pool when the group scales out. + // "SCALE_OUT_POOL" - MIG automatically resumes or starts VMs in the + // standby pool when the group scales out, and replenishes the standby + // pool afterwards. Mode string `json:"mode,omitempty"` // ForceSendFields is a list of field names (e.g. "InitialDelaySec") to @@ -27494,6 +27642,34 @@ func (s *InstanceWithNamedPorts) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesAddNetworkInterfaceRequest struct { + // NetworkInterface: The new network interface to add. + NetworkInterface *NetworkInterface `json:"network_interface,omitempty"` + + // ForceSendFields is a list of field names (e.g. "NetworkInterface") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NetworkInterface") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstancesAddNetworkInterfaceRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesAddNetworkInterfaceRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesAddResourcePoliciesRequest struct { // ResourcePolicies: Resource policies to be added to this instance. ResourcePolicies []string `json:"resourcePolicies,omitempty"` @@ -28232,6 +28408,9 @@ type InstantSnapshot struct { // snapshot resource. ResourceStatus *InstantSnapshotResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -33465,6 +33644,9 @@ type MachineImage struct { // last character, which cannot be a dash. Name string `json:"name,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -54993,6 +55175,10 @@ type RouterStatusBgpPeerStatus struct { BfdStatus *BfdStatus `json:"bfdStatus,omitempty"` + // EnableIpv4: Enable IPv4 traffic over BGP Peer. It is enabled by + // default if the peerIpAddress is version 4. + EnableIpv4 bool `json:"enableIpv4,omitempty"` + // EnableIpv6: Enable IPv6 traffic over BGP Peer. If not specified, it // is disabled by default. EnableIpv6 bool `json:"enableIpv6,omitempty"` @@ -55000,6 +55186,9 @@ type RouterStatusBgpPeerStatus struct { // IpAddress: IP address of the local BGP interface. IpAddress string `json:"ipAddress,omitempty"` + // Ipv4NexthopAddress: IPv4 address of the local BGP interface. + Ipv4NexthopAddress string `json:"ipv4NexthopAddress,omitempty"` + // Ipv6NexthopAddress: IPv6 address of the local BGP interface. Ipv6NexthopAddress string `json:"ipv6NexthopAddress,omitempty"` @@ -55019,6 +55208,9 @@ type RouterStatusBgpPeerStatus struct { // PeerIpAddress: IP address of the remote BGP interface. PeerIpAddress string `json:"peerIpAddress,omitempty"` + // PeerIpv4NexthopAddress: IPv4 address of the remote BGP interface. + PeerIpv4NexthopAddress string `json:"peerIpv4NexthopAddress,omitempty"` + // PeerIpv6NexthopAddress: IPv6 address of the remote BGP interface. PeerIpv6NexthopAddress string `json:"peerIpv6NexthopAddress,omitempty"` @@ -55043,9 +55235,16 @@ type RouterStatusBgpPeerStatus struct { // StatusReason: Indicates why particular status was returned. // // Possible values: + // "IPV4_PEER_ON_IPV6_ONLY_CONNECTION" - BGP peer disabled because it + // requires IPv4 but the underlying connection is IPv6-only. + // "IPV6_PEER_ON_IPV4_ONLY_CONNECTION" - BGP peer disabled because it + // requires IPv6 but the underlying connection is IPv4-only. // "MD5_AUTH_INTERNAL_PROBLEM" - Indicates internal problems with // configuration of MD5 authentication. This particular reason can only // be returned when md5AuthEnabled is true and status is DOWN. + // "MISSING_NETWORK_CONNECTIVITY_CENTER_SPOKE" - BGP peer disabled + // because it is not labeled as an NCC spoke. Currently, BGP peers using + // SD-WAN connectivity will be disabled for this reason. // "STATUS_REASON_UNSPECIFIED" StatusReason string `json:"statusReason,omitempty"` @@ -55915,7 +56114,7 @@ type SSLHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -55925,7 +56124,7 @@ type SSLHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -57245,10 +57444,20 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfi AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + DetectionAbsoluteQps float64 `json:"detectionAbsoluteQps,omitempty"` + + DetectionLoadThreshold float64 `json:"detectionLoadThreshold,omitempty"` + + DetectionRelativeToBaselineQps float64 `json:"detectionRelativeToBaselineQps,omitempty"` + // Name: The name must be 1-63 characters long, and comply with RFC1035. // The name must be unique within the security policy. Name string `json:"name,omitempty"` + // TrafficGranularityConfigs: Configuration options for enabling + // Adaptive Protection to operate on specified granular traffic units. + TrafficGranularityConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig `json:"trafficGranularityConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. // "AutoDeployConfidenceThreshold") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -57280,6 +57489,9 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + DetectionAbsoluteQps gensupport.JSONFloat64 `json:"detectionAbsoluteQps"` + DetectionLoadThreshold gensupport.JSONFloat64 `json:"detectionLoadThreshold"` + DetectionRelativeToBaselineQps gensupport.JSONFloat64 `json:"detectionRelativeToBaselineQps"` *NoMethod } s1.NoMethod = (*NoMethod)(s) @@ -57289,9 +57501,58 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + s.DetectionAbsoluteQps = float64(s1.DetectionAbsoluteQps) + s.DetectionLoadThreshold = float64(s1.DetectionLoadThreshold) + s.DetectionRelativeToBaselineQps = float64(s1.DetectionRelativeToBaselineQps) return nil } +// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThreshold +// ConfigTrafficGranularityConfig: Configurations to specifc granular +// traffic units processed by Adaptive Protection. +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig struct { + // EnableEachUniqueValue: If enabled, traffic matching each unique value + // for the specified type constitutes a separate traffic unit. It can + // only be set to true if `value` is empty. + EnableEachUniqueValue bool `json:"enableEachUniqueValue,omitempty"` + + // Type: Type of this configuration. + // + // Possible values: + // "HTTP_HEADER_HOST" + // "HTTP_PATH" + // "UNSPECIFIED_TYPE" + Type string `json:"type,omitempty"` + + // Value: Requests that match this value constitute a granular traffic + // unit. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableEachUniqueValue") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableEachUniqueValue") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -58446,7 +58707,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // Server name indication in the TLS session of the HTTPS request. The // key value is truncated to the first 128 bytes. The key type defaults // to ALL on a HTTP session. - REGION_CODE: The country/region from - // which the request originates. + // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL + // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If + // not available, the key type defaults to ALL. - USER_IP: The IP + // address of the originating client, which is resolved based on + // "userIpRequestHeaders" configured with the security policy. If there + // is no "userIpRequestHeaders" configuration or an IP address cannot be + // resolved from it, the key type defaults to IP. // // Possible values: // "ALL" @@ -58457,6 +58724,8 @@ type SecurityPolicyRuleRateLimitOptions struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -58547,7 +58816,14 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // bytes. - SNI: Server name indication in the TLS session of the HTTPS // request. The key value is truncated to the first 128 bytes. The key // type defaults to ALL on a HTTP session. - REGION_CODE: The - // country/region from which the request originates. + // country/region from which the request originates. - + // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects + // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type + // defaults to ALL. - USER_IP: The IP address of the originating client, + // which is resolved based on "userIpRequestHeaders" configured with the + // security policy. If there is no "userIpRequestHeaders" configuration + // or an IP address cannot be resolved from it, the key type defaults to + // IP. // // Possible values: // "ALL" @@ -58558,6 +58834,8 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` @@ -60526,6 +60804,9 @@ type Snapshot struct { // Only applicable for regional snapshots. Region string `json:"region,omitempty"` + // SatisfiesPzi: Output only. Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -62875,9 +63156,9 @@ func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. +// control SSL features, such as versions and cipher suites, that are +// offered by Application Load Balancers and proxy Network Load +// Balancers. For more information, read SSL policies overview. type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -66192,7 +66473,7 @@ type TCPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -66202,7 +66483,7 @@ type TCPHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -68787,10 +69068,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { } // TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. +// with external passthrough Network Load Balancers. A target pool +// references member instances, an associated legacy HttpHealthCheck +// resource, and, optionally, a backup target pool. For more +// information, read Using target pools. type TargetPool struct { // BackupPool: The server-defined URL for the resource. This field is // applicable only when the containing target pool is serving a @@ -69810,10 +70091,10 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target SSL proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field @@ -70327,10 +70608,10 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target TCP proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -115423,8 +115704,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. The `pageToken` query parameter is -// supported only in the alpha and beta API and only if the group's -// `listManagedInstancesResults` field is set to `PAGINATED`. +// supported only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -115610,7 +115891,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -123276,6 +123557,194 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat } +// method id "compute.instances.addNetworkInterface": + +type InstancesAddNetworkInterfaceCall struct { + s *Service + project string + zone string + instance string + instancesaddnetworkinterfacerequest *InstancesAddNetworkInterfaceRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// AddNetworkInterface: Adds a network interface to an instance. +// +// - instance: The instance name for this request stored as resource_id. +// Name should conform to RFC1035 or be an unsigned long integer. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) AddNetworkInterface(project string, zone string, instance string, instancesaddnetworkinterfacerequest *InstancesAddNetworkInterfaceRequest) *InstancesAddNetworkInterfaceCall { + c := &InstancesAddNetworkInterfaceCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancesaddnetworkinterfacerequest = instancesaddnetworkinterfacerequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesAddNetworkInterfaceCall) RequestId(requestId string) *InstancesAddNetworkInterfaceCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesAddNetworkInterfaceCall) Fields(s ...googleapi.Field) *InstancesAddNetworkInterfaceCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesAddNetworkInterfaceCall) Context(ctx context.Context) *InstancesAddNetworkInterfaceCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesAddNetworkInterfaceCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesAddNetworkInterfaceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesaddnetworkinterfacerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.addNetworkInterface" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesAddNetworkInterfaceCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Adds a network interface to an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + // "httpMethod": "POST", + // "id": "compute.instances.addNetworkInterface", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "The instance name for this request stored as resource_id. Name should conform to RFC1035 or be an unsigned long integer.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/addNetworkInterface", + // "request": { + // "$ref": "InstancesAddNetworkInterfaceRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.addResourcePolicies": type InstancesAddResourcePoliciesCall struct { @@ -131939,9 +132408,11 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -132069,7 +132540,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -132149,9 +132620,11 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -132272,7 +132745,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -185690,9 +186163,8 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. The -// `pageToken` query parameter is supported only in the alpha and beta -// API and only if the group's `listManagedInstancesResults` field is -// set to `PAGINATED`. +// `pageToken` query parameter is supported only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -185877,7 +186349,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -246294,8 +246766,8 @@ type TargetSslProxiesSetSslPolicyCall struct { // SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy // specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. +// connections between clients and the load balancer. They do not affect +// the connection between the load balancer and the backends. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource whose SSL @@ -246417,7 +246889,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json index fae8744d0..25c230b57 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-api.json +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-api.json @@ -9067,7 +9067,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -12986,7 +12986,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -13038,7 +13038,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -24945,7 +24945,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -37938,7 +37938,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", @@ -40215,7 +40215,7 @@ } } }, - "revision": "20231110", + "revision": "20231128", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -43278,7 +43278,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -43313,7 +43313,7 @@ }, "connectionTrackingPolicy": { "$ref": "BackendServiceConnectionTrackingPolicy", - "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing." + "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers." }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", @@ -43346,12 +43346,12 @@ "type": "string" }, "enableCDN": { - "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.", + "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.", "type": "boolean" }, "failoverPolicy": { "$ref": "BackendServiceFailoverPolicy", - "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." + "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.", @@ -43367,7 +43367,7 @@ }, "iap": { "$ref": "BackendServiceIAP", - "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -43375,7 +43375,7 @@ "type": "string" }, "ipAddressSelectionPolicy": { - "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced Global External HTTPS Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional External HTTPS Load Balancing, - Internal TCP Proxy (load balancing scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", + "description": "Specifies a preference for traffic sent from the proxy to the backend (or from the client to the backend for proxyless gRPC). The possible values are: - IPV4_ONLY: Only send IPv4 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv4 health checks are used to check the health of the backends. This is the default setting. - PREFER_IPV6: Prioritize the connection to the endpoint's IPv6 address over its IPv4 address (provided there is a healthy IPv6 address). - IPV6_ONLY: Only send IPv6 traffic to the backends of the backend service (Instance Group, Managed Instance Group, Network Endpoint Group), regardless of traffic from the client to the proxy. Only IPv6 health checks are used to check the health of the backends. This field is applicable to either: - Advanced global external Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - Regional external Application Load Balancer, - Internal proxy Network Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional internal Application Load Balancer (load balancing scheme INTERNAL_MANAGED), - Traffic Director with Envoy proxies and proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). ", "enum": [ "IPV4_ONLY", "IPV6_ONLY", @@ -43406,10 +43406,10 @@ "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ - "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", - "Signifies that this will be used for External Managed HTTP(S) Load Balancing.", - "Signifies that this will be used for Internal TCP/UDP Load Balancing.", - "Signifies that this will be used for Internal HTTP(S) Load Balancing.", + "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.", + "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.", + "Signifies that this will be used for internal passthrough Network Load Balancers.", + "Signifies that this will be used for internal Application Load Balancers.", "Signifies that this will be used by Traffic Director.", "" ], @@ -43476,12 +43476,12 @@ }, "port": { "deprecated": true, - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.", "format": "int32", "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.", "type": "string" }, "protocol": { @@ -43864,11 +43864,11 @@ "type": "string" }, "enableStrongAffinity": { - "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.", "type": "boolean" }, "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.", "format": "int32", "type": "integer" }, @@ -43890,7 +43890,7 @@ "type": "object" }, "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", "properties": { "disableConnectionDrainOnFailover": { @@ -43898,7 +43898,7 @@ "type": "boolean" }, "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", "type": "boolean" }, "failoverRatio": { @@ -48815,7 +48815,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/beta/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/beta/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/beta/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/beta/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -48849,7 +48849,7 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", "type": "boolean" }, "allowPscGlobalAccess": { @@ -48861,11 +48861,11 @@ "type": "boolean" }, "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.", "type": "string" }, "baseForwardingRule": { - "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.", "type": "string" }, "creationTimestamp": { @@ -48906,7 +48906,7 @@ }, "kind": { "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.", "type": "string" }, "labelFingerprint": { @@ -48954,7 +48954,7 @@ "type": "string" }, "network": { - "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -48989,7 +48989,7 @@ "type": "array" }, "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "description": "[Output Only] The PSC connection id of the PSC forwarding rule.", "format": "uint64", "type": "string" }, @@ -49028,23 +49028,23 @@ "type": "array" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.", "type": "string" }, "sourceIpRanges": { - "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", "items": { "type": "string" }, "type": "array" }, "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { @@ -49375,7 +49375,7 @@ "type": "object" }, "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.", "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { "namespace": { @@ -49387,7 +49387,7 @@ "type": "string" }, "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.", "type": "string" } }, @@ -49531,12 +49531,12 @@ "id": "FutureReservation", "properties": { "autoCreatedReservationsDeleteTime": { - "description": "Future timestamp when the FR auto-created reservations will be deleted by GCE. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", + "description": "Future timestamp when the FR auto-created reservations will be deleted by Compute Engine. Format of this field must be a valid href=\"https://www.ietf.org/rfc/rfc3339.txt\"\u003eRFC3339 value.", "type": "string" }, "autoCreatedReservationsDuration": { "$ref": "Duration", - "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by GCE. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." + "description": "Specifies the duration of auto-created reservations. It represents relative time to future reservation start_time when auto-created reservations will be automatically deleted by Compute Engine. Duration time unit is represented as a count of seconds and fractions of seconds at nanosecond resolution." }, "autoDeleteAutoCreatedReservations": { "description": "Setting for enabling or disabling automatic deletion for auto-created reservation. If set to true, auto-created reservations will be deleted at Future Reservation's end time (default) or at user's defined timestamp if any of the [auto_created_reservations_delete_time, auto_created_reservations_duration] values is specified. For keeping auto-created reservation indefinitely, this value should be set to false.", @@ -50295,7 +50295,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50531,7 +50531,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50584,7 +50584,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50637,7 +50637,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -50674,7 +50674,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) * [Global](/compute/docs/reference/rest/beta/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/beta/regionHealthChecks) * [Global](/compute/docs/reference/rest/beta/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -53114,6 +53114,10 @@ "$ref": "ResourceStatus", "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." }, + "satisfiesPzi": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -54227,6 +54231,13 @@ }, "description": "Named instance selections configuring properties that the group will use when creating new VMs.", "type": "object" + }, + "instanceSelections": { + "additionalProperties": { + "$ref": "InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection" + }, + "description": "Named instance selections configuring properties that the group will use when creating new VMs.", + "type": "object" } }, "type": "object" @@ -54732,14 +54743,14 @@ "type": "integer" }, "mode": { - "description": "Defines behaviour of using instances from standby pool to resize MIG.", + "description": "Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is `MANUAL`.", "enum": [ "MANUAL", "SCALE_OUT_POOL" ], "enumDescriptions": [ - "MIG does not automatically stop/start or suspend/resume VMs.", - "MIG automatically resumes and starts VMs when it scales out, and replenishes the standby pool afterwards." + "MIG does not automatically resume or start VMs in the standby pool when the group scales out.", + "MIG automatically resumes or starts VMs in the standby pool when the group scales out, and replenishes the standby pool afterwards." ], "type": "string" } @@ -69607,6 +69618,9 @@ "PREEMPTIBLE_NVIDIA_T4_GPUS", "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", "PREEMPTIBLE_NVIDIA_V100_GPUS", + "PREEMPTIBLE_TPU_LITE_DEVICE_V5", + "PREEMPTIBLE_TPU_LITE_PODSLICE_V5", + "PREEMPTIBLE_TPU_PODSLICE_V4", "PRIVATE_V6_ACCESS_SUBNETWORKS", "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK", "PSC_INTERNAL_LB_FORWARDING_RULES", @@ -69645,6 +69659,9 @@ "TARGET_SSL_PROXIES", "TARGET_TCP_PROXIES", "TARGET_VPN_GATEWAYS", + "TPU_LITE_DEVICE_V5", + "TPU_LITE_PODSLICE_V5", + "TPU_PODSLICE_V4", "URL_MAPS", "VPN_GATEWAYS", "VPN_TUNNELS", @@ -69781,6 +69798,9 @@ "", "", "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -69800,6 +69820,9 @@ "", "", "", + "", + "", + "", "" ], "type": "string" @@ -75031,7 +75054,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -75932,10 +75955,58 @@ "format": "float", "type": "number" }, + "detectionAbsoluteQps": { + "format": "float", + "type": "number" + }, + "detectionLoadThreshold": { + "format": "float", + "type": "number" + }, + "detectionRelativeToBaselineQps": { + "format": "float", + "type": "number" + }, "name": { "description": "The name must be 1-63 characters long, and comply with RFC1035. The name must be unique within the security policy.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" + }, + "trafficGranularityConfigs": { + "description": "Configuration options for enabling Adaptive Protection to operate on specified granular traffic units.", + "items": { + "$ref": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig": { + "description": "Configurations to specifc granular traffic units processed by Adaptive Protection.", + "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig", + "properties": { + "enableEachUniqueValue": { + "description": "If enabled, traffic matching each unique value for the specified type constitutes a separate traffic unit. It can only be set to true if `value` is empty.", + "type": "boolean" + }, + "type": { + "description": "Type of this configuration.", + "enum": [ + "HTTP_HEADER_HOST", + "HTTP_PATH", + "UNSPECIFIED_TYPE" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "value": { + "description": "Requests that match this value constitute a granular traffic unit.", + "type": "string" } }, "type": "object" @@ -76610,7 +76681,7 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -76620,6 +76691,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -76631,6 +76704,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -76642,6 +76717,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -76680,7 +76757,7 @@ "type": "string" }, "enforceOnKeyType": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "ALL_IPS", @@ -76690,6 +76767,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDeprecated": [ @@ -76701,6 +76780,8 @@ false, false, false, + false, + false, false ], "enumDescriptions": [ @@ -76712,6 +76793,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -79409,7 +79492,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", + "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -80487,7 +80570,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -82362,7 +82445,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", + "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", "id": "TargetPool", "properties": { "backupPool": { @@ -83025,7 +83108,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetSslProxy", "properties": { "certificateMap": { @@ -83404,7 +83487,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { diff --git a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go index 58754f570..e5749a780 100644 --- a/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v0.beta/compute-gen.go @@ -5863,13 +5863,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // For more information, see Backend Services. type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is - // applicable to external and internal HTTP(S) load balancers and - // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session - // affinity. If set to 0, the cookie is non-persistent and lasts only - // until the end of the browser session (or equivalent). The maximum - // allowed value is two weeks (1,209,600). Not supported when the - // backend service is referenced by a URL map that is bound to target - // gRPC proxy that has validateForProxyless field set to true. + // applicable to Application Load Balancers and Traffic Director and + // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to + // 0, the cookie is non-persistent and lasts only until the end of the + // browser session (or equivalent). The maximum allowed value is two + // weeks (1,209,600). Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -5895,8 +5895,8 @@ type BackendService struct { // ConnectionTrackingPolicy: Connection Tracking configuration for this // BackendService. Connection tracking policy settings are only - // available for Network Load Balancing and Internal TCP/UDP Load - // Balancing. + // available for external passthrough Network Load Balancers and + // internal passthrough Network Load Balancers. ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"` // ConsistentHash: Consistent Hash-based load balancing can be used to @@ -5935,15 +5935,15 @@ type BackendService struct { // security policy associated with this backend service. EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"` - // EnableCDN: If true, enables Cloud CDN for the backend service of an - // external HTTP(S) load balancer. + // EnableCDN: If true, enables Cloud CDN for the backend service of a + // global external Application Load Balancer. EnableCDN bool `json:"enableCDN,omitempty"` // FailoverPolicy: Requires at least one backend instance group to be // defined as a backup (failover) backend. For load balancers that have - // configurable failover: Internal TCP/UDP Load Balancing + // configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` @@ -5967,8 +5967,8 @@ type BackendService struct { HealthChecks []string `json:"healthChecks,omitempty"` // Iap: The configurations for Identity-Aware Proxy on this resource. - // Not available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // Not available for internal passthrough Network Load Balancers and + // external passthrough Network Load Balancers. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -5988,13 +5988,13 @@ type BackendService struct { // backend service (Instance Group, Managed Instance Group, Network // Endpoint Group), regardless of traffic from the client to the proxy. // Only IPv6 health checks are used to check the health of the backends. - // This field is applicable to either: - Advanced Global External HTTPS - // Load Balancing (load balancing scheme EXTERNAL_MANAGED), - Regional - // External HTTPS Load Balancing, - Internal TCP Proxy (load balancing - // scheme INTERNAL_MANAGED), - Regional Internal HTTPS Load Balancing - // (load balancing scheme INTERNAL_MANAGED), - Traffic Director with - // Envoy proxies and proxyless gRPC (load balancing scheme - // INTERNAL_SELF_MANAGED). + // This field is applicable to either: - Advanced global external + // Application Load Balancer (load balancing scheme EXTERNAL_MANAGED), - + // Regional external Application Load Balancer, - Internal proxy Network + // Load Balancer (load balancing scheme INTERNAL_MANAGED), - Regional + // internal Application Load Balancer (load balancing scheme + // INTERNAL_MANAGED), - Traffic Director with Envoy proxies and + // proxyless gRPC (load balancing scheme INTERNAL_SELF_MANAGED). // // Possible values: // "IPV4_ONLY" - Only send IPv4 traffic to the backends of the Backend @@ -6022,14 +6022,16 @@ type BackendService struct { // another. For more information, refer to Choosing a load balancer. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for external HTTP(S), - // SSL Proxy, TCP Proxy, or Network Load Balancing - // "EXTERNAL_MANAGED" - Signifies that this will be used for External - // Managed HTTP(S) Load Balancing. - // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP - // Load Balancing. - // "INTERNAL_MANAGED" - Signifies that this will be used for Internal - // HTTP(S) Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers, global external proxy Network Load + // Balancers, or external passthrough Network Load Balancers. + // "EXTERNAL_MANAGED" - Signifies that this will be used for global + // external Application Load Balancers, regional external Application + // Load Balancers, or regional external proxy Network Load Balancers. + // "INTERNAL" - Signifies that this will be used for internal + // passthrough Network Load Balancers. + // "INTERNAL_MANAGED" - Signifies that this will be used for internal + // Application Load Balancers. // "INTERNAL_SELF_MANAGED" - Signifies that this will be used by // Traffic Director. // "INVALID_LOAD_BALANCING_SCHEME" @@ -6171,16 +6173,18 @@ type BackendService struct { OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. For Internal TCP/UDP Load Balancing - // and Network Load Balancing, omit port. + // backend. The default value is 80. For internal passthrough Network + // Load Balancers and external passthrough Network Load Balancers, omit + // port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. The named // port must be defined on each backend instance group // (https://cloud.google.com/load-balancing/docs/backend-service#named_ports). - // This parameter has no meaning if the backends are NEGs. For Internal - // TCP/UDP Load Balancing and Network Load Balancing, omit port_name. + // This parameter has no meaning if the backends are NEGs. For internal + // passthrough Network Load Balancers and external passthrough Network + // Load Balancers, omit port_name. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with @@ -6769,18 +6773,19 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` - // EnableStrongAffinity: Enable Strong Session Affinity for Network Load - // Balancing. This option is not available publicly. + // EnableStrongAffinity: Enable Strong Session Affinity for external + // passthrough Network Load Balancers. This option is not available + // publicly. EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` // IdleTimeoutSec: Specifies how long to keep a Connection Tracking - // entry while there is no matching traffic (in seconds). For Internal - // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the - // maximum is 16 hours. - It can be set only if Connection Tracking is - // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, - // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For - // Network Load Balancer the default is 60 seconds. This option is not - // available publicly. + // entry while there is no matching traffic (in seconds). For internal + // passthrough Network Load Balancers: - The minimum (default) is 10 + // minutes and the maximum is 16 hours. - It can be set only if + // Connection Tracking is less than 5-tuple (i.e. Session Affinity is + // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking + // Mode is PER_SESSION). For external passthrough Network Load Balancers + // the default is 60 seconds. This option is not available publicly. IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"` // TrackingMode: Specifies the key used for connection tracking. There @@ -6826,9 +6831,9 @@ func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { } // BackendServiceFailoverPolicy: For load balancers that have -// configurable failover: Internal TCP/UDP Load Balancing +// configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) -// and external TCP/UDP Load Balancing +// and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // On failover or failback, this field indicates whether connection // draining will be honored. Google Cloud has a fixed connection @@ -6847,9 +6852,9 @@ type BackendServiceFailoverPolicy struct { // unhealthy.If set to false, connections are distributed among all // primary VMs when all primary and all backup backend VMs are // unhealthy. For load balancers that have configurable failover: - // Internal TCP/UDP Load Balancing + // Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // The default is false. DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` @@ -13944,10 +13949,10 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // * Regional // (https://cloud.google.com/compute/docs/reference/rest/beta/forwardingRules) // A forwarding rule and its corresponding IP address represent the -// frontend configuration of a Google Cloud Platform load balancer. -// Forwarding rules can also reference target instances and Cloud VPN -// Classic gateways (targetVpnGateway). For more information, read -// Forwarding rule concepts and Using protocol forwarding. +// frontend configuration of a Google Cloud load balancer. Forwarding +// rules can also reference target instances and Cloud VPN Classic +// gateways (targetVpnGateway). For more information, read Forwarding +// rule concepts and Using protocol forwarding. type ForwardingRule struct { // IPAddress: IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule @@ -14011,8 +14016,9 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for // internal TargetInstance. If set to true, clients can access the - // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load - // Balancer from all regions. If false, only allows access from the + // internal passthrough Network Load Balancers, the regional internal + // Application Load Balancer, and the regional internal proxy Network + // Load Balancer from all regions. If false, only allows access from the // local region the load balancer is located at. Note that for // INTERNAL_MANAGED forwarding rules, this field cannot be changed after // the forwarding rule is created. @@ -14030,16 +14036,16 @@ type ForwardingRule struct { AllowPscPacketInjection bool `json:"allowPscPacketInjection,omitempty"` // BackendService: Identifies the backend service to which the - // forwarding rule sends traffic. Required for Internal TCP/UDP Load - // Balancing and Network Load Balancing; must be omitted for all other + // forwarding rule sends traffic. Required for internal and external + // passthrough Network Load Balancers; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` // BaseForwardingRule: [Output Only] The URL for the corresponding base - // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // forwarding rule. By base forwarding rule, we mean the forwarding rule // that has the same IP address, protocol, and port settings with the - // current Forwarding Rule, but without sourceIPRanges specified. Always - // empty if the current Forwarding Rule does not have sourceIPRanges + // current forwarding rule, but without sourceIPRanges specified. Always + // empty if the current forwarding rule does not have sourceIPRanges // specified. BaseForwardingRule string `json:"baseForwardingRule,omitempty"` @@ -14082,7 +14088,7 @@ type ForwardingRule struct { IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. + // compute#forwardingRule for forwarding rule resources. Kind string `json:"kind,omitempty"` // LabelFingerprint: A fingerprint for the labels being applied to this @@ -14145,10 +14151,10 @@ type ForwardingRule struct { Name string `json:"name,omitempty"` // Network: This field is not used for global external load balancing. - // For Internal TCP/UDP Load Balancing, this field identifies the - // network that the load balanced IP should belong to for this - // Forwarding Rule. If the subnetwork is specified, the network of the - // subnetwork will be used. If neither subnetwork nor this field is + // For internal passthrough Network Load Balancers, this field + // identifies the network that the load balanced IP should belong to for + // this forwarding rule. If the subnetwork is specified, the network of + // the subnetwork will be used. If neither subnetwork nor this field is // specified, the default network will be used. For Private Service // Connect forwarding rules that forward traffic to Google APIs, a // network must be provided. @@ -14215,7 +14221,7 @@ type ForwardingRule struct { Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC - // Forwarding Rule. + // forwarding rule. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Possible values: @@ -14245,7 +14251,7 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, the prefix is the first label of the + // forwarding rule. If specified, the prefix is the first label of the // fully qualified service name. The label must be 1-63 characters long, // and comply with RFC1035. Specifically, the label must be 1-63 // characters long and match the regular expression @@ -14256,25 +14262,26 @@ type ForwardingRule struct { ServiceLabel string `json:"serviceLabel,omitempty"` // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. This field is only used for internal load + // for this forwarding rule. This field is only used for internal load // balancing. ServiceName string `json:"serviceName,omitempty"` - // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // SourceIpRanges: If not empty, this forwarding rule will only forward // the traffic when the source IP address matches one of the IP - // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // addresses or CIDR ranges set here. Note that a forwarding rule can // only have up to 64 source IP ranges, and this field can only be used - // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // with a regional forwarding rule whose scheme is EXTERNAL. Each // source_ip_range entry should be either an IP address (for example, // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). SourceIpRanges []string `json:"sourceIpRanges,omitempty"` // Subnetwork: This field identifies the subnetwork that the load - // balanced IP should belong to for this Forwarding Rule, used in - // internal load balancing and network load balancing with IPv6. If the - // network specified is in auto subnet mode, this field is optional. - // However, a subnetwork must be specified if the network is in custom - // subnet mode or when creating external forwarding rule with IPv6. + // balanced IP should belong to for this forwarding rule, used with + // internal load balancers and external passthrough Network Load + // Balancers with IPv6. If the network specified is in auto subnet mode, + // this field is optional. However, a subnetwork must be specified if + // the network is in custom subnet mode or when creating external + // forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched @@ -14735,9 +14742,9 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { } // ForwardingRuleServiceDirectoryRegistration: Describes the -// auto-registration of the Forwarding Rule to Service Directory. The +// auto-registration of the forwarding rule to Service Directory. The // region and project of the Service Directory resource generated from -// this registration will be the same as this Forwarding Rule. +// this registration will be the same as this forwarding rule. type ForwardingRuleServiceDirectoryRegistration struct { // Namespace: Service Directory namespace to register the forwarding // rule under. @@ -14749,8 +14756,8 @@ type ForwardingRuleServiceDirectoryRegistration struct { // ServiceDirectoryRegion: [Optional] Service Directory region to // register this global forwarding rule under. Default to "us-central1". - // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding - // Rules on the same network should use the same Service Directory + // Only used for PSC for Google APIs. All PSC for Google APIs forwarding + // rules on the same network should use the same Service Directory // region. ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` @@ -14950,16 +14957,17 @@ func (s *ForwardingRulesScopedListWarningData) MarshalJSON() ([]byte, error) { type FutureReservation struct { // AutoCreatedReservationsDeleteTime: Future timestamp when the FR - // auto-created reservations will be deleted by GCE. Format of this - // field must be a valid + // auto-created reservations will be deleted by Compute Engine. Format + // of this field must be a valid // href="https://www.ietf.org/rfc/rfc3339.txt">RFC3339 value. AutoCreatedReservationsDeleteTime string `json:"autoCreatedReservationsDeleteTime,omitempty"` // AutoCreatedReservationsDuration: Specifies the duration of // auto-created reservations. It represents relative time to future // reservation start_time when auto-created reservations will be - // automatically deleted by GCE. Duration time unit is represented as a - // count of seconds and fractions of seconds at nanosecond resolution. + // automatically deleted by Compute Engine. Duration time unit is + // represented as a count of seconds and fractions of seconds at + // nanosecond resolution. AutoCreatedReservationsDuration *Duration `json:"autoCreatedReservationsDuration,omitempty"` // AutoDeleteAutoCreatedReservations: Setting for enabling or disabling @@ -15958,7 +15966,7 @@ type GRPCHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -15968,7 +15976,7 @@ type GRPCHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -16398,7 +16406,7 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -16408,7 +16416,7 @@ type HTTP2HealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -16492,7 +16500,7 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Also supported in // legacy HTTP health checks for target pools. The health check supports // all backends supported by the backend service provided the backend @@ -16586,7 +16594,7 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -16596,7 +16604,7 @@ type HTTPSHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -16666,20 +16674,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/beta/regionHealthChecks) * Global // (/compute/docs/reference/rest/beta/healthChecks) These health check // resources can be used for load balancing and for autohealing VMs in a -// managed instance group (MIG). **Load balancing** The following load -// balancer can use either regional or global health check: * Internal -// TCP/UDP load balancer The following load balancers require regional -// health check: * Internal HTTP(S) load balancer * Backend -// service-based network load balancer Traffic Director and the -// following load balancers require global health check: * External -// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load -// balancer The following load balancer require legacy HTTP health -// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target -// pool-based network load balancer **Autohealing in MIGs** The health -// checks that you use for autohealing VMs in a MIG can be either -// regional or global. For more information, see Set up an application -// health check and autohealing. For more information, see Health checks -// overview. +// managed instance group (MIG). **Load balancing** Health check +// requirements vary depending on the type of load balancer. For details +// about the type of health check supported for each load balancer and +// corresponding backend type, see Health checks overview: Load balancer +// guide. **Autohealing in MIGs** The health checks that you use for +// autohealing VMs in a MIG can be either regional or global. For more +// information, see Set up an application health check and autohealing. +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -20242,6 +20244,9 @@ type Instance struct { // corresponding input only field. ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: [Output Only] Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -21671,6 +21676,10 @@ type InstanceGroupManagerInstanceFlexibilityPolicy struct { // properties that the group will use when creating new VMs. InstanceSelectionLists map[string]InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection `json:"instanceSelectionLists,omitempty"` + // InstanceSelections: Named instance selections configuring properties + // that the group will use when creating new VMs. + InstanceSelections map[string]InstanceGroupManagerInstanceFlexibilityPolicyInstanceSelection `json:"instanceSelections,omitempty"` + // ForceSendFields is a list of field names (e.g. // "InstanceSelectionLists") to unconditionally include in API requests. // By default, fields with empty or default values are omitted from API @@ -22403,14 +22412,15 @@ func (s *InstanceGroupManagerResizeRequestsListResponseWarningData) MarshalJSON( type InstanceGroupManagerStandbyPolicy struct { InitialDelaySec int64 `json:"initialDelaySec,omitempty"` - // Mode: Defines behaviour of using instances from standby pool to - // resize MIG. + // Mode: Defines how a MIG resumes or starts VMs from a standby pool + // when the group scales out. The default mode is `MANUAL`. // // Possible values: - // "MANUAL" - MIG does not automatically stop/start or suspend/resume - // VMs. - // "SCALE_OUT_POOL" - MIG automatically resumes and starts VMs when it - // scales out, and replenishes the standby pool afterwards. + // "MANUAL" - MIG does not automatically resume or start VMs in the + // standby pool when the group scales out. + // "SCALE_OUT_POOL" - MIG automatically resumes or starts VMs in the + // standby pool when the group scales out, and replenishes the standby + // pool afterwards. Mode string `json:"mode,omitempty"` // ForceSendFields is a list of field names (e.g. "InitialDelaySec") to @@ -43364,6 +43374,9 @@ type Quota struct { // "PREEMPTIBLE_NVIDIA_T4_GPUS" // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "PREEMPTIBLE_TPU_LITE_DEVICE_V5" + // "PREEMPTIBLE_TPU_LITE_PODSLICE_V5" + // "PREEMPTIBLE_TPU_PODSLICE_V4" // "PRIVATE_V6_ACCESS_SUBNETWORKS" // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" // "PSC_INTERNAL_LB_FORWARDING_RULES" @@ -43403,6 +43416,9 @@ type Quota struct { // "TARGET_SSL_PROXIES" // "TARGET_TCP_PROXIES" // "TARGET_VPN_GATEWAYS" + // "TPU_LITE_DEVICE_V5" + // "TPU_LITE_PODSLICE_V5" + // "TPU_PODSLICE_V4" // "URL_MAPS" // "VPN_GATEWAYS" // "VPN_TUNNELS" @@ -50695,7 +50711,7 @@ type SSLHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -50705,7 +50721,7 @@ type SSLHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -51932,10 +51948,20 @@ type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfi AutoDeployLoadThreshold float64 `json:"autoDeployLoadThreshold,omitempty"` + DetectionAbsoluteQps float64 `json:"detectionAbsoluteQps,omitempty"` + + DetectionLoadThreshold float64 `json:"detectionLoadThreshold,omitempty"` + + DetectionRelativeToBaselineQps float64 `json:"detectionRelativeToBaselineQps,omitempty"` + // Name: The name must be 1-63 characters long, and comply with RFC1035. // The name must be unique within the security policy. Name string `json:"name,omitempty"` + // TrafficGranularityConfigs: Configuration options for enabling + // Adaptive Protection to operate on specified granular traffic units. + TrafficGranularityConfigs []*SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig `json:"trafficGranularityConfigs,omitempty"` + // ForceSendFields is a list of field names (e.g. // "AutoDeployConfidenceThreshold") to unconditionally include in API // requests. By default, fields with empty or default values are omitted @@ -51967,6 +51993,9 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC AutoDeployConfidenceThreshold gensupport.JSONFloat64 `json:"autoDeployConfidenceThreshold"` AutoDeployImpactedBaselineThreshold gensupport.JSONFloat64 `json:"autoDeployImpactedBaselineThreshold"` AutoDeployLoadThreshold gensupport.JSONFloat64 `json:"autoDeployLoadThreshold"` + DetectionAbsoluteQps gensupport.JSONFloat64 `json:"detectionAbsoluteQps"` + DetectionLoadThreshold gensupport.JSONFloat64 `json:"detectionLoadThreshold"` + DetectionRelativeToBaselineQps gensupport.JSONFloat64 `json:"detectionRelativeToBaselineQps"` *NoMethod } s1.NoMethod = (*NoMethod)(s) @@ -51976,9 +52005,58 @@ func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdC s.AutoDeployConfidenceThreshold = float64(s1.AutoDeployConfidenceThreshold) s.AutoDeployImpactedBaselineThreshold = float64(s1.AutoDeployImpactedBaselineThreshold) s.AutoDeployLoadThreshold = float64(s1.AutoDeployLoadThreshold) + s.DetectionAbsoluteQps = float64(s1.DetectionAbsoluteQps) + s.DetectionLoadThreshold = float64(s1.DetectionLoadThreshold) + s.DetectionRelativeToBaselineQps = float64(s1.DetectionRelativeToBaselineQps) return nil } +// SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThreshold +// ConfigTrafficGranularityConfig: Configurations to specifc granular +// traffic units processed by Adaptive Protection. +type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig struct { + // EnableEachUniqueValue: If enabled, traffic matching each unique value + // for the specified type constitutes a separate traffic unit. It can + // only be set to true if `value` is empty. + EnableEachUniqueValue bool `json:"enableEachUniqueValue,omitempty"` + + // Type: Type of this configuration. + // + // Possible values: + // "HTTP_HEADER_HOST" + // "HTTP_PATH" + // "UNSPECIFIED_TYPE" + Type string `json:"type,omitempty"` + + // Value: Requests that match this value constitute a granular traffic + // unit. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "EnableEachUniqueValue") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnableEachUniqueValue") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfigThresholdConfigTrafficGranularityConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyAdvancedOptionsConfig struct { // JsonCustomConfig: Custom configuration to apply the JSON parsing. // Only applicable when json_parsing is set to STANDARD. @@ -53042,7 +53120,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // Server name indication in the TLS session of the HTTPS request. The // key value is truncated to the first 128 bytes. The key type defaults // to ALL on a HTTP session. - REGION_CODE: The country/region from - // which the request originates. + // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL + // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If + // not available, the key type defaults to ALL. - USER_IP: The IP + // address of the originating client, which is resolved based on + // "userIpRequestHeaders" configured with the security policy. If there + // is no "userIpRequestHeaders" configuration or an IP address cannot be + // resolved from it, the key type defaults to IP. // // Possible values: // "ALL" @@ -53053,6 +53137,8 @@ type SecurityPolicyRuleRateLimitOptions struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -53139,7 +53225,14 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // bytes. - SNI: Server name indication in the TLS session of the HTTPS // request. The key value is truncated to the first 128 bytes. The key // type defaults to ALL on a HTTP session. - REGION_CODE: The - // country/region from which the request originates. + // country/region from which the request originates. - + // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects + // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type + // defaults to ALL. - USER_IP: The IP address of the originating client, + // which is resolved based on "userIpRequestHeaders" configured with the + // security policy. If there is no "userIpRequestHeaders" configuration + // or an IP address cannot be resolved from it, the key type defaults to + // IP. // // Possible values: // "ALL" @@ -53150,6 +53243,8 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` @@ -56873,9 +56968,9 @@ func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. +// control SSL features, such as versions and cipher suites, that are +// offered by Application Load Balancers and proxy Network Load +// Balancers. For more information, read SSL policies overview. type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -58401,7 +58496,7 @@ type TCPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -58411,7 +58506,7 @@ type TCPHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -60984,10 +61079,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { } // TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. +// with external passthrough Network Load Balancers. A target pool +// references member instances, an associated legacy HttpHealthCheck +// resource, and, optionally, a backup target pool. For more +// information, read Using target pools. type TargetPool struct { // BackupPool: The server-defined URL for the resource. This field is // applicable only when the containing target pool is serving a @@ -62003,10 +62098,10 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target SSL proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field @@ -62520,10 +62615,10 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target TCP proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -106823,8 +106918,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. The `pageToken` query parameter is -// supported only in the alpha and beta API and only if the group's -// `listManagedInstancesResults` field is set to `PAGINATED`. +// supported only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -107010,7 +107105,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -122451,9 +122546,11 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -122574,7 +122671,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -122649,9 +122746,11 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -122772,7 +122871,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -172611,9 +172710,8 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. The -// `pageToken` query parameter is supported only in the alpha and beta -// API and only if the group's `listManagedInstancesResults` field is -// set to `PAGINATED`. +// `pageToken` query parameter is supported only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -172798,7 +172896,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -226187,8 +226285,8 @@ type TargetSslProxiesSetSslPolicyCall struct { // SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy // specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. +// connections between clients and the load balancer. They do not affect +// the connection between the load balancer and the backends. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource whose SSL @@ -226310,7 +226408,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 2411236d4..0ba58f564 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -8066,7 +8066,7 @@ ] }, "listManagedInstances": { - "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.instanceGroupManagers.listManagedInstances", @@ -11374,7 +11374,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -11426,7 +11426,7 @@ ], "parameters": { "discardLocalSsd": { - "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", "location": "query", "type": "boolean" }, @@ -21869,7 +21869,7 @@ ] }, "listManagedInstances": { - "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", "httpMethod": "POST", "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -26547,6 +26547,73 @@ } } }, + "regionZones": { + "methods": { + "list": { + "description": "Retrieves the list of Zone resources under the specific region available to the specified project.", + "flatPath": "projects/{project}/regions/{region}/zones", + "httpMethod": "GET", + "id": "compute.regionZones.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/zones", + "response": { + "$ref": "ZoneList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "regions": { "methods": { "get": { @@ -33196,7 +33263,7 @@ ] }, "setSslPolicy": { - "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", "httpMethod": "POST", "id": "compute.targetSslProxies.setSslPolicy", @@ -35267,7 +35334,7 @@ } } }, - "revision": "20231031", + "revision": "20231128", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -36528,6 +36595,80 @@ }, "type": "object" }, + "AllocationAggregateReservation": { + "description": "This reservation type is specified by total resource amounts (e.g. total count of CPUs) and can account for multiple instance SKUs. In other words, one can create instances of varying shapes against this reservation.", + "id": "AllocationAggregateReservation", + "properties": { + "inUseResources": { + "description": "[Output only] List of resources currently in use.", + "items": { + "$ref": "AllocationAggregateReservationReservedResourceInfo" + }, + "type": "array" + }, + "reservedResources": { + "description": "List of reserved resources (CPUs, memory, accelerators).", + "items": { + "$ref": "AllocationAggregateReservationReservedResourceInfo" + }, + "type": "array" + }, + "vmFamily": { + "description": "The VM family that all instances scheduled against this reservation must belong to.", + "enum": [ + "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L", + "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP", + "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P" + ], + "enumDescriptions": [ + "", + "", + "" + ], + "type": "string" + }, + "workloadType": { + "description": "The workload type of the instances that will target this reservation.", + "enum": [ + "BATCH", + "SERVING", + "UNSPECIFIED" + ], + "enumDescriptions": [ + "Reserved resources will be optimized for BATCH workloads, such as ML training.", + "Reserved resources will be optimized for SERVING workloads, such as ML inference.", + "" + ], + "type": "string" + } + }, + "type": "object" + }, + "AllocationAggregateReservationReservedResourceInfo": { + "id": "AllocationAggregateReservationReservedResourceInfo", + "properties": { + "accelerator": { + "$ref": "AllocationAggregateReservationReservedResourceInfoAccelerator", + "description": "Properties of accelerator resources in this reservation." + } + }, + "type": "object" + }, + "AllocationAggregateReservationReservedResourceInfoAccelerator": { + "id": "AllocationAggregateReservationReservedResourceInfoAccelerator", + "properties": { + "acceleratorCount": { + "description": "Number of accelerators of specified type.", + "format": "int32", + "type": "integer" + }, + "acceleratorType": { + "description": "Full or partial URL to accelerator type. e.g. \"projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l\"", + "type": "string" + } + }, + "type": "object" + }, "AllocationResourceStatus": { "description": "[Output Only] Contains output only fields.", "id": "AllocationResourceStatus", @@ -36801,6 +36942,10 @@ "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example: https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/pd-standard For a full list of acceptable values, see Persistent disk types. If you specify this field when creating a VM, you can provide either the full or partial URL. For example, the following values are valid: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /diskTypes/diskType - projects/project/zones/zone/diskTypes/diskType - zones/zone/diskTypes/diskType If you specify this field when creating or updating an instance template or all-instances configuration, specify the type of the disk, not the URL. For example: pd-standard.", "type": "string" }, + "enableConfidentialCompute": { + "description": "Whether this disk is using confidential compute mode.", + "type": "boolean" + }, "labels": { "additionalProperties": { "type": "string" @@ -38172,7 +38317,7 @@ "id": "BackendService", "properties": { "affinityCookieTtlSec": { - "description": "Lifetime of cookies in seconds. This setting is applicable to external and internal HTTP(S) load balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", + "description": "Lifetime of cookies in seconds. This setting is applicable to Application Load Balancers and Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value is two weeks (1,209,600). Not supported when the backend service is referenced by a URL map that is bound to target gRPC proxy that has validateForProxyless field set to true.", "format": "int32", "type": "integer" }, @@ -38207,7 +38352,7 @@ }, "connectionTrackingPolicy": { "$ref": "BackendServiceConnectionTrackingPolicy", - "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for Network Load Balancing and Internal TCP/UDP Load Balancing." + "description": "Connection Tracking configuration for this BackendService. Connection tracking policy settings are only available for external passthrough Network Load Balancers and internal passthrough Network Load Balancers." }, "consistentHash": { "$ref": "ConsistentHashLoadBalancerSettings", @@ -38240,12 +38385,12 @@ "type": "string" }, "enableCDN": { - "description": "If true, enables Cloud CDN for the backend service of an external HTTP(S) load balancer.", + "description": "If true, enables Cloud CDN for the backend service of a global external Application Load Balancer.", "type": "boolean" }, "failoverPolicy": { "$ref": "BackendServiceFailoverPolicy", - "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." + "description": "Requires at least one backend instance group to be defined as a backup (failover) backend. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview)." }, "fingerprint": { "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a BackendService.", @@ -38261,7 +38406,7 @@ }, "iap": { "$ref": "BackendServiceIAP", - "description": "The configurations for Identity-Aware Proxy on this resource. Not available for Internal TCP/UDP Load Balancing and Network Load Balancing." + "description": "The configurations for Identity-Aware Proxy on this resource. Not available for internal passthrough Network Load Balancers and external passthrough Network Load Balancers." }, "id": { "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -38284,10 +38429,10 @@ "INVALID_LOAD_BALANCING_SCHEME" ], "enumDescriptions": [ - "Signifies that this will be used for external HTTP(S), SSL Proxy, TCP Proxy, or Network Load Balancing", - "Signifies that this will be used for External Managed HTTP(S) Load Balancing.", - "Signifies that this will be used for Internal TCP/UDP Load Balancing.", - "Signifies that this will be used for Internal HTTP(S) Load Balancing.", + "Signifies that this will be used for classic Application Load Balancers, global external proxy Network Load Balancers, or external passthrough Network Load Balancers.", + "Signifies that this will be used for global external Application Load Balancers, regional external Application Load Balancers, or regional external proxy Network Load Balancers.", + "Signifies that this will be used for internal passthrough Network Load Balancers.", + "Signifies that this will be used for internal Application Load Balancers.", "Signifies that this will be used by Traffic Director.", "" ], @@ -38354,12 +38499,12 @@ }, "port": { "deprecated": true, - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port.", "format": "int32", "type": "integer" }, "portName": { - "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For Internal TCP/UDP Load Balancing and Network Load Balancing, omit port_name.", + "description": "A named port on a backend instance group representing the port for communication to the backend VMs in that group. The named port must be [defined on each backend instance group](https://cloud.google.com/load-balancing/docs/backend-service#named_ports). This parameter has no meaning if the backends are NEGs. For internal passthrough Network Load Balancers and external passthrough Network Load Balancers, omit port_name.", "type": "string" }, "protocol": { @@ -38738,11 +38883,11 @@ "type": "string" }, "enableStrongAffinity": { - "description": "Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly.", + "description": "Enable Strong Session Affinity for external passthrough Network Load Balancers. This option is not available publicly.", "type": "boolean" }, "idleTimeoutSec": { - "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For Internal TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For Network Load Balancer the default is 60 seconds. This option is not available publicly.", + "description": "Specifies how long to keep a Connection Tracking entry while there is no matching traffic (in seconds). For internal passthrough Network Load Balancers: - The minimum (default) is 10 minutes and the maximum is 16 hours. - It can be set only if Connection Tracking is less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For external passthrough Network Load Balancers the default is 60 seconds. This option is not available publicly.", "format": "int32", "type": "integer" }, @@ -38764,7 +38909,7 @@ "type": "object" }, "BackendServiceFailoverPolicy": { - "description": "For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", + "description": "For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). On failover or failback, this field indicates whether connection draining will be honored. Google Cloud has a fixed connection draining timeout of 10 minutes. A setting of true terminates existing TCP connections to the active pool during failover and failback, immediately draining traffic. A setting of false allows existing TCP connections to persist, even on VMs no longer in the active pool, for up to the duration of the connection draining timeout (10 minutes).", "id": "BackendServiceFailoverPolicy", "properties": { "disableConnectionDrainOnFailover": { @@ -38772,7 +38917,7 @@ "type": "boolean" }, "dropTrafficIfUnhealthy": { - "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external TCP/UDP Load Balancing](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", + "description": "If set to true, connections to the load balancer are dropped when all primary and all backup backend VMs are unhealthy.If set to false, connections are distributed among all primary VMs when all primary and all backup backend VMs are unhealthy. For load balancers that have configurable failover: [Internal passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/internal/failover-overview) and [external passthrough Network Load Balancers](https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). The default is false.", "type": "boolean" }, "failoverRatio": { @@ -40783,6 +40928,10 @@ "$ref": "CustomerEncryptionKey", "description": "Encrypts the disk using a customer-supplied encryption key or a customer-managed encryption key. Encryption keys do not protect access to metadata of the disk. After you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later. For example, to create a disk snapshot, to create a disk image, to create a machine image, or to attach the disk to a virtual machine. After you encrypt a disk with a customer-managed key, the diskEncryptionKey.kmsKeyName is set to a key *version* name once the disk is created. The disk is encrypted with this version of the key. In the response, diskEncryptionKey.kmsKeyName appears in the following format: \"diskEncryptionKey.kmsKeyName\": \"projects/kms_project_id/locations/region/keyRings/ key_region/cryptoKeys/key /cryptoKeysVersions/version If you do not provide an encryption key when creating the disk, then the disk is encrypted using an automatically generated key and you don't need to provide a key to use the disk later." }, + "enableConfidentialCompute": { + "description": "Whether this disk is using confidential compute mode.", + "type": "boolean" + }, "guestOsFeatures": { "description": "A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -43535,7 +43684,7 @@ "type": "object" }, "ForwardingRule": { - "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud Platform load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", + "description": "Represents a Forwarding Rule resource. Forwarding rule resources in Google Cloud can be either regional or global in scope: * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) A forwarding rule and its corresponding IP address represent the frontend configuration of a Google Cloud load balancer. Forwarding rules can also reference target instances and Cloud VPN Classic gateways (targetVpnGateway). For more information, read Forwarding rule concepts and Using protocol forwarding.", "id": "ForwardingRule", "properties": { "IPAddress": { @@ -43569,7 +43718,7 @@ "type": "boolean" }, "allowGlobalAccess": { - "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", + "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If set to true, clients can access the internal passthrough Network Load Balancers, the regional internal Application Load Balancer, and the regional internal proxy Network Load Balancer from all regions. If false, only allows access from the local region the load balancer is located at. Note that for INTERNAL_MANAGED forwarding rules, this field cannot be changed after the forwarding rule is created.", "type": "boolean" }, "allowPscGlobalAccess": { @@ -43577,11 +43726,11 @@ "type": "boolean" }, "backendService": { - "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", + "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for internal and external passthrough Network Load Balancers; must be omitted for all other load balancer types.", "type": "string" }, "baseForwardingRule": { - "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "description": "[Output Only] The URL for the corresponding base forwarding rule. By base forwarding rule, we mean the forwarding rule that has the same IP address, protocol, and port settings with the current forwarding rule, but without sourceIPRanges specified. Always empty if the current forwarding rule does not have sourceIPRanges specified.", "type": "string" }, "creationTimestamp": { @@ -43622,7 +43771,7 @@ }, "kind": { "default": "compute#forwardingRule", - "description": "[Output Only] Type of the resource. Always compute#forwardingRule for Forwarding Rule resources.", + "description": "[Output Only] Type of the resource. Always compute#forwardingRule for forwarding rule resources.", "type": "string" }, "labelFingerprint": { @@ -43670,7 +43819,7 @@ "type": "string" }, "network": { - "description": "This field is not used for global external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for global external load balancing. For internal passthrough Network Load Balancers, this field identifies the network that the load balanced IP should belong to for this forwarding rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -43705,7 +43854,7 @@ "type": "array" }, "pscConnectionId": { - "description": "[Output Only] The PSC connection id of the PSC Forwarding Rule.", + "description": "[Output Only] The PSC connection id of the PSC forwarding rule.", "format": "uint64", "type": "string" }, @@ -43744,23 +43893,23 @@ "type": "array" }, "serviceLabel": { - "description": "An optional prefix to the service name for this Forwarding Rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", + "description": "An optional prefix to the service name for this forwarding rule. If specified, the prefix is the first label of the fully qualified service name. The label must be 1-63 characters long, and comply with RFC1035. Specifically, the label must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. This field is only used for internal load balancing.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, "serviceName": { - "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", + "description": "[Output Only] The internal fully qualified service name for this forwarding rule. This field is only used for internal load balancing.", "type": "string" }, "sourceIpRanges": { - "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "description": "If not empty, this forwarding rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a forwarding rule can only have up to 64 source IP ranges, and this field can only be used with a regional forwarding rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", "items": { "type": "string" }, "type": "array" }, "subnetwork": { - "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", + "description": "This field identifies the subnetwork that the load balanced IP should belong to for this forwarding rule, used with internal load balancers and external passthrough Network Load Balancers with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { @@ -44091,7 +44240,7 @@ "type": "object" }, "ForwardingRuleServiceDirectoryRegistration": { - "description": "Describes the auto-registration of the Forwarding Rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this Forwarding Rule.", + "description": "Describes the auto-registration of the forwarding rule to Service Directory. The region and project of the Service Directory resource generated from this registration will be the same as this forwarding rule.", "id": "ForwardingRuleServiceDirectoryRegistration", "properties": { "namespace": { @@ -44103,7 +44252,7 @@ "type": "string" }, "serviceDirectoryRegion": { - "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs Forwarding Rules on the same network should use the same Service Directory region.", + "description": "[Optional] Service Directory region to register this global forwarding rule under. Default to \"us-central1\". Only used for PSC for Google APIs. All PSC for Google APIs forwarding rules on the same network should use the same Service Directory region.", "type": "string" } }, @@ -44260,7 +44409,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44496,7 +44645,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44549,7 +44698,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Also supported in legacy HTTP health checks for target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44602,7 +44751,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -44639,7 +44788,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** The following load balancer can use either regional or global health check: * Internal TCP/UDP load balancer The following load balancers require regional health check: * Internal HTTP(S) load balancer * Backend service-based network load balancer Traffic Director and the following load balancers require global health check: * External HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load balancer The following load balancer require [legacy HTTP health checks](/compute/docs/reference/rest/v1/httpHealthChecks): * Target pool-based network load balancer **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", + "description": "Represents a health check resource. Google Compute Engine has two health check resources: * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) * [Global](/compute/docs/reference/rest/v1/healthChecks) These health check resources can be used for load balancing and for autohealing VMs in a managed instance group (MIG). **Load balancing** Health check requirements vary depending on the type of load balancer. For details about the type of health check supported for each load balancer and corresponding backend type, see Health checks overview: Load balancer guide. **Autohealing in MIGs** The health checks that you use for autohealing VMs in a MIG can be either regional or global. For more information, see Set up an application health check and autohealing. For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -46434,6 +46583,10 @@ "format": "int64", "type": "string" }, + "enableConfidentialCompute": { + "description": "Whether this image is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.", + "type": "boolean" + }, "family": { "description": "The name of the image family to which this image belongs. The image family name can be from a publicly managed image family provided by Compute Engine, or from a custom image family you create. For example, centos-stream-9 is a publicly available image family. For more information, see Image family best practices. When creating disks, you can specify an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", "type": "string" @@ -46976,6 +47129,10 @@ "$ref": "ResourceStatus", "description": "[Output Only] Specifies values set for instance attributes as compared to the values requested by user in the corresponding input only field." }, + "satisfiesPzi": { + "description": "[Output Only] Reserved for future use.", + "type": "boolean" + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -62017,6 +62174,9 @@ "PREEMPTIBLE_NVIDIA_T4_GPUS", "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS", "PREEMPTIBLE_NVIDIA_V100_GPUS", + "PREEMPTIBLE_TPU_LITE_DEVICE_V5", + "PREEMPTIBLE_TPU_LITE_PODSLICE_V5", + "PREEMPTIBLE_TPU_PODSLICE_V4", "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK", "PSC_INTERNAL_LB_FORWARDING_RULES", "PUBLIC_ADVERTISED_PREFIXES", @@ -62054,6 +62214,9 @@ "TARGET_SSL_PROXIES", "TARGET_TCP_PROXIES", "TARGET_VPN_GATEWAYS", + "TPU_LITE_DEVICE_V5", + "TPU_LITE_PODSLICE_V5", + "TPU_PODSLICE_V4", "URL_MAPS", "VPN_GATEWAYS", "VPN_TUNNELS", @@ -62189,6 +62352,9 @@ "", "", "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -62208,6 +62374,9 @@ "", "", "", + "", + "", + "", "" ], "type": "string" @@ -63881,6 +64050,10 @@ "description": "Represents a reservation resource. A reservation ensures that capacity is held in a specific zone even if the reserved VMs are not running. For more information, read Reserving zonal resources.", "id": "Reservation", "properties": { + "aggregateReservation": { + "$ref": "AllocationAggregateReservation", + "description": "Reservation for aggregated resources, providing shape flexibility." + }, "commitment": { "description": "[Output Only] Full or partial URL to a parent commitment. This field displays for reservations that are tied to a commitment.", "type": "string" @@ -67246,7 +67419,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -68399,6 +68572,10 @@ "$ref": "Expr", "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." }, + "exprOptions": { + "$ref": "SecurityPolicyRuleMatcherExprOptions", + "description": "The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr')." + }, "versionedExpr": { "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", "enum": [ @@ -68425,6 +68602,36 @@ }, "type": "object" }, + "SecurityPolicyRuleMatcherExprOptions": { + "id": "SecurityPolicyRuleMatcherExprOptions", + "properties": { + "recaptchaOptions": { + "$ref": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", + "description": "reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field will have no effect." + } + }, + "type": "object" + }, + "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions": { + "id": "SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions", + "properties": { + "actionTokenSiteKeys": { + "description": "A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", + "items": { + "type": "string" + }, + "type": "array" + }, + "sessionTokenSiteKeys": { + "description": "A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "SecurityPolicyRuleNetworkMatcher": { "description": "Represents a match condition that incoming network traffic is evaluated against.", "id": "SecurityPolicyRuleNetworkMatcher", @@ -68609,7 +68816,7 @@ "type": "string" }, "enforceOnKey": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKey\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "HTTP_COOKIE", @@ -68618,6 +68825,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDescriptions": [ @@ -68628,6 +68837,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -68666,7 +68877,7 @@ "type": "string" }, "enforceOnKeyType": { - "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. - USER_IP: The IP address of the originating client, which is resolved based on \"userIpRequestHeaders\" configured with the security policy. If there is no \"userIpRequestHeaders\" configuration or an IP address cannot be resolved from it, the key type defaults to IP. ", "enum": [ "ALL", "HTTP_COOKIE", @@ -68675,6 +68886,8 @@ "IP", "REGION_CODE", "SNI", + "TLS_JA3_FINGERPRINT", + "USER_IP", "XFF_IP" ], "enumDescriptions": [ @@ -68685,6 +68898,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -69699,6 +69914,10 @@ "format": "int64", "type": "string" }, + "enableConfidentialCompute": { + "description": "Whether this snapshot is created from a confidential compute mode disk. [Output Only]: This field is not set by user, but from source disk.", + "type": "boolean" + }, "guestOsFeatures": { "description": "[Output Only] A list of features to enable on the guest operating system. Applicable only for bootable images. Read Enabling guest operating system features to see a list of available options.", "items": { @@ -71227,7 +71446,7 @@ "type": "object" }, "SslPolicy": { - "description": "Represents an SSL Policy resource. Use SSL policies to control the SSL features, such as versions and cipher suites, offered by an HTTPS or SSL Proxy load balancer. For more information, read SSL Policy Concepts.", + "description": "Represents an SSL Policy resource. Use SSL policies to control SSL features, such as versions and cipher suites, that are offered by Application Load Balancers and proxy Network Load Balancers. For more information, read SSL policies overview.", "id": "SslPolicy", "properties": { "creationTimestamp": { @@ -72288,7 +72507,7 @@ "type": "string" }, "portSpecification": { - "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for pass-through load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for pass-through load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", + "description": "Specifies how a port is selected for health checking. Can be one of the following values: USE_FIXED_PORT: Specifies a port number explicitly using the port field in the health check. Supported by backend services for passthrough load balancers and backend services for proxy load balancers. Not supported by target pools. The health check supports all backends supported by the backend service provided the backend can be health checked. For example, GCE_VM_IP network endpoint groups, GCE_VM_IP_PORT network endpoint groups, and instance group backends. USE_NAMED_PORT: Not supported. USE_SERVING_PORT: Provides an indirect method of specifying the health check port by referring to the backend service. Only supported by backend services for proxy load balancers. Not supported by target pools. Not supported by backend services for passthrough load balancers. Supports all backends that can be health checked; for example, GCE_VM_IP_PORT network endpoint groups and instance group backends. For GCE_VM_IP_PORT network endpoint group backends, the health check uses the port number specified for each endpoint in the network endpoint group. For instance group backends, the health check uses the port number determined by looking up the backend service's named port in the instance group's list of named ports.", "enum": [ "USE_FIXED_PORT", "USE_NAMED_PORT", @@ -74018,7 +74237,7 @@ "type": "object" }, "TargetPool": { - "description": "Represents a Target Pool resource. Target pools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", + "description": "Represents a Target Pool resource. Target pools are used with external passthrough Network Load Balancers. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool. For more information, read Using target pools.", "id": "TargetPool", "properties": { "backupPool": { @@ -74681,7 +74900,7 @@ "type": "object" }, "TargetSslProxy": { - "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a SSL Proxy load balancer. Global forwarding rules reference a target SSL proxy, and the target proxy then references an external backend service. For more information, read Using Target Proxies.", + "description": "Represents a Target SSL Proxy resource. A target SSL proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target SSL proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetSslProxy", "properties": { "certificateMap": { @@ -75060,7 +75279,7 @@ "type": "object" }, "TargetTcpProxy": { - "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a TCP Proxy load balancer. Global forwarding rules reference target TCP proxy, and the target proxy then references an external backend service. For more information, read TCP Proxy Load Balancing overview.", + "description": "Represents a Target TCP Proxy resource. A target TCP proxy is a component of a Proxy Network Load Balancer. The forwarding rule references the target TCP proxy, and the target proxy then references a backend service. For more information, read Proxy Network Load Balancer overview.", "id": "TargetTcpProxy", "properties": { "creationTimestamp": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 2f9af3b44..407df4630 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -227,6 +227,7 @@ func New(client *http.Client) (*Service, error) { s.RegionTargetHttpsProxies = NewRegionTargetHttpsProxiesService(s) s.RegionTargetTcpProxies = NewRegionTargetTcpProxiesService(s) s.RegionUrlMaps = NewRegionUrlMapsService(s) + s.RegionZones = NewRegionZonesService(s) s.Regions = NewRegionsService(s) s.Reservations = NewReservationsService(s) s.ResourcePolicies = NewResourcePoliciesService(s) @@ -396,6 +397,8 @@ type Service struct { RegionUrlMaps *RegionUrlMapsService + RegionZones *RegionZonesService + Regions *RegionsService Reservations *ReservationsService @@ -1066,6 +1069,15 @@ type RegionUrlMapsService struct { s *Service } +func NewRegionZonesService(s *Service) *RegionZonesService { + rs := &RegionZonesService{s: s} + return rs +} + +type RegionZonesService struct { + s *Service +} + func NewRegionsService(s *Service) *RegionsService { rs := &RegionsService{s: s} return rs @@ -2968,6 +2980,121 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AllocationAggregateReservation: This reservation type is specified by +// total resource amounts (e.g. total count of CPUs) and can account for +// multiple instance SKUs. In other words, one can create instances of +// varying shapes against this reservation. +type AllocationAggregateReservation struct { + // InUseResources: [Output only] List of resources currently in use. + InUseResources []*AllocationAggregateReservationReservedResourceInfo `json:"inUseResources,omitempty"` + + // ReservedResources: List of reserved resources (CPUs, memory, + // accelerators). + ReservedResources []*AllocationAggregateReservationReservedResourceInfo `json:"reservedResources,omitempty"` + + // VmFamily: The VM family that all instances scheduled against this + // reservation must belong to. + // + // Possible values: + // "VM_FAMILY_CLOUD_TPU_LITE_DEVICE_CT5L" + // "VM_FAMILY_CLOUD_TPU_LITE_POD_SLICE_CT5LP" + // "VM_FAMILY_CLOUD_TPU_POD_SLICE_CT4P" + VmFamily string `json:"vmFamily,omitempty"` + + // WorkloadType: The workload type of the instances that will target + // this reservation. + // + // Possible values: + // "BATCH" - Reserved resources will be optimized for BATCH workloads, + // such as ML training. + // "SERVING" - Reserved resources will be optimized for SERVING + // workloads, such as ML inference. + // "UNSPECIFIED" + WorkloadType string `json:"workloadType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InUseResources") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InUseResources") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationAggregateReservation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationAggregateReservation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AllocationAggregateReservationReservedResourceInfo struct { + // Accelerator: Properties of accelerator resources in this reservation. + Accelerator *AllocationAggregateReservationReservedResourceInfoAccelerator `json:"accelerator,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Accelerator") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Accelerator") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *AllocationAggregateReservationReservedResourceInfo) MarshalJSON() ([]byte, error) { + type NoMethod AllocationAggregateReservationReservedResourceInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type AllocationAggregateReservationReservedResourceInfoAccelerator struct { + // AcceleratorCount: Number of accelerators of specified type. + AcceleratorCount int64 `json:"acceleratorCount,omitempty"` + + // AcceleratorType: Full or partial URL to accelerator type. e.g. + // "projects/{PROJECT}/zones/{ZONE}/acceleratorTypes/ct4l" + AcceleratorType string `json:"acceleratorType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AcceleratorCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AcceleratorCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationAggregateReservationReservedResourceInfoAccelerator) MarshalJSON() ([]byte, error) { + type NoMethod AllocationAggregateReservationReservedResourceInfoAccelerator + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // AllocationResourceStatus: [Output Only] Contains output only fields. type AllocationResourceStatus struct { // SpecificSkuAllocation: Allocation Properties of this reservation. @@ -3375,6 +3502,10 @@ type AttachedDiskInitializeParams struct { // example: pd-standard. DiskType string `json:"diskType,omitempty"` + // EnableConfidentialCompute: Whether this disk is using confidential + // compute mode. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // Labels: Labels to apply to this disk. These can be later modified by // the disks.setLabels method. This field is only applicable for // persistent disks. @@ -5537,13 +5668,13 @@ func (s *BackendBucketListWarningData) MarshalJSON() ([]byte, error) { // For more information, see Backend Services. type BackendService struct { // AffinityCookieTtlSec: Lifetime of cookies in seconds. This setting is - // applicable to external and internal HTTP(S) load balancers and - // Traffic Director and requires GENERATED_COOKIE or HTTP_COOKIE session - // affinity. If set to 0, the cookie is non-persistent and lasts only - // until the end of the browser session (or equivalent). The maximum - // allowed value is two weeks (1,209,600). Not supported when the - // backend service is referenced by a URL map that is bound to target - // gRPC proxy that has validateForProxyless field set to true. + // applicable to Application Load Balancers and Traffic Director and + // requires GENERATED_COOKIE or HTTP_COOKIE session affinity. If set to + // 0, the cookie is non-persistent and lasts only until the end of the + // browser session (or equivalent). The maximum allowed value is two + // weeks (1,209,600). Not supported when the backend service is + // referenced by a URL map that is bound to target gRPC proxy that has + // validateForProxyless field set to true. AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` // Backends: The list of backends that serve this BackendService. @@ -5569,8 +5700,8 @@ type BackendService struct { // ConnectionTrackingPolicy: Connection Tracking configuration for this // BackendService. Connection tracking policy settings are only - // available for Network Load Balancing and Internal TCP/UDP Load - // Balancing. + // available for external passthrough Network Load Balancers and + // internal passthrough Network Load Balancers. ConnectionTrackingPolicy *BackendServiceConnectionTrackingPolicy `json:"connectionTrackingPolicy,omitempty"` // ConsistentHash: Consistent Hash-based load balancing can be used to @@ -5609,15 +5740,15 @@ type BackendService struct { // security policy associated with this backend service. EdgeSecurityPolicy string `json:"edgeSecurityPolicy,omitempty"` - // EnableCDN: If true, enables Cloud CDN for the backend service of an - // external HTTP(S) load balancer. + // EnableCDN: If true, enables Cloud CDN for the backend service of a + // global external Application Load Balancer. EnableCDN bool `json:"enableCDN,omitempty"` // FailoverPolicy: Requires at least one backend instance group to be // defined as a backup (failover) backend. For load balancers that have - // configurable failover: Internal TCP/UDP Load Balancing + // configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). FailoverPolicy *BackendServiceFailoverPolicy `json:"failoverPolicy,omitempty"` @@ -5641,8 +5772,8 @@ type BackendService struct { HealthChecks []string `json:"healthChecks,omitempty"` // Iap: The configurations for Identity-Aware Proxy on this resource. - // Not available for Internal TCP/UDP Load Balancing and Network Load - // Balancing. + // Not available for internal passthrough Network Load Balancers and + // external passthrough Network Load Balancers. Iap *BackendServiceIAP `json:"iap,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -5658,14 +5789,16 @@ type BackendService struct { // another. For more information, refer to Choosing a load balancer. // // Possible values: - // "EXTERNAL" - Signifies that this will be used for external HTTP(S), - // SSL Proxy, TCP Proxy, or Network Load Balancing - // "EXTERNAL_MANAGED" - Signifies that this will be used for External - // Managed HTTP(S) Load Balancing. - // "INTERNAL" - Signifies that this will be used for Internal TCP/UDP - // Load Balancing. - // "INTERNAL_MANAGED" - Signifies that this will be used for Internal - // HTTP(S) Load Balancing. + // "EXTERNAL" - Signifies that this will be used for classic + // Application Load Balancers, global external proxy Network Load + // Balancers, or external passthrough Network Load Balancers. + // "EXTERNAL_MANAGED" - Signifies that this will be used for global + // external Application Load Balancers, regional external Application + // Load Balancers, or regional external proxy Network Load Balancers. + // "INTERNAL" - Signifies that this will be used for internal + // passthrough Network Load Balancers. + // "INTERNAL_MANAGED" - Signifies that this will be used for internal + // Application Load Balancers. // "INTERNAL_SELF_MANAGED" - Signifies that this will be used by // Traffic Director. // "INVALID_LOAD_BALANCING_SCHEME" @@ -5807,16 +5940,18 @@ type BackendService struct { OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"` // Port: Deprecated in favor of portName. The TCP port to connect on the - // backend. The default value is 80. For Internal TCP/UDP Load Balancing - // and Network Load Balancing, omit port. + // backend. The default value is 80. For internal passthrough Network + // Load Balancers and external passthrough Network Load Balancers, omit + // port. Port int64 `json:"port,omitempty"` // PortName: A named port on a backend instance group representing the // port for communication to the backend VMs in that group. The named // port must be defined on each backend instance group // (https://cloud.google.com/load-balancing/docs/backend-service#named_ports). - // This parameter has no meaning if the backends are NEGs. For Internal - // TCP/UDP Load Balancing and Network Load Balancing, omit port_name. + // This parameter has no meaning if the backends are NEGs. For internal + // passthrough Network Load Balancers and external passthrough Network + // Load Balancers, omit port_name. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with @@ -6400,18 +6535,19 @@ type BackendServiceConnectionTrackingPolicy struct { // "NEVER_PERSIST" ConnectionPersistenceOnUnhealthyBackends string `json:"connectionPersistenceOnUnhealthyBackends,omitempty"` - // EnableStrongAffinity: Enable Strong Session Affinity for Network Load - // Balancing. This option is not available publicly. + // EnableStrongAffinity: Enable Strong Session Affinity for external + // passthrough Network Load Balancers. This option is not available + // publicly. EnableStrongAffinity bool `json:"enableStrongAffinity,omitempty"` // IdleTimeoutSec: Specifies how long to keep a Connection Tracking - // entry while there is no matching traffic (in seconds). For Internal - // TCP/UDP Load Balancing: - The minimum (default) is 10 minutes and the - // maximum is 16 hours. - It can be set only if Connection Tracking is - // less than 5-tuple (i.e. Session Affinity is CLIENT_IP_NO_DESTINATION, - // CLIENT_IP or CLIENT_IP_PROTO, and Tracking Mode is PER_SESSION). For - // Network Load Balancer the default is 60 seconds. This option is not - // available publicly. + // entry while there is no matching traffic (in seconds). For internal + // passthrough Network Load Balancers: - The minimum (default) is 10 + // minutes and the maximum is 16 hours. - It can be set only if + // Connection Tracking is less than 5-tuple (i.e. Session Affinity is + // CLIENT_IP_NO_DESTINATION, CLIENT_IP or CLIENT_IP_PROTO, and Tracking + // Mode is PER_SESSION). For external passthrough Network Load Balancers + // the default is 60 seconds. This option is not available publicly. IdleTimeoutSec int64 `json:"idleTimeoutSec,omitempty"` // TrackingMode: Specifies the key used for connection tracking. There @@ -6457,9 +6593,9 @@ func (s *BackendServiceConnectionTrackingPolicy) MarshalJSON() ([]byte, error) { } // BackendServiceFailoverPolicy: For load balancers that have -// configurable failover: Internal TCP/UDP Load Balancing +// configurable failover: Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) -// and external TCP/UDP Load Balancing +// and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // On failover or failback, this field indicates whether connection // draining will be honored. Google Cloud has a fixed connection @@ -6478,9 +6614,9 @@ type BackendServiceFailoverPolicy struct { // unhealthy.If set to false, connections are distributed among all // primary VMs when all primary and all backup backend VMs are // unhealthy. For load balancers that have configurable failover: - // Internal TCP/UDP Load Balancing + // Internal passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/internal/failover-overview) - // and external TCP/UDP Load Balancing + // and external passthrough Network Load Balancers // (https://cloud.google.com/load-balancing/docs/network/networklb-failover-overview). // The default is false. DropTrafficIfUnhealthy bool `json:"dropTrafficIfUnhealthy,omitempty"` @@ -9340,6 +9476,10 @@ type Disk struct { // provide a key to use the disk later. DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // EnableConfidentialCompute: Whether this disk is using confidential + // compute mode. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // GuestOsFeatures: A list of features to enable on the guest operating // system. Applicable only for bootable images. Read Enabling guest // operating system features to see a list of available options. @@ -13317,10 +13457,10 @@ func (s *FixedOrPercent) MarshalJSON() ([]byte, error) { // * Regional // (https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) // A forwarding rule and its corresponding IP address represent the -// frontend configuration of a Google Cloud Platform load balancer. -// Forwarding rules can also reference target instances and Cloud VPN -// Classic gateways (targetVpnGateway). For more information, read -// Forwarding rule concepts and Using protocol forwarding. +// frontend configuration of a Google Cloud load balancer. Forwarding +// rules can also reference target instances and Cloud VPN Classic +// gateways (targetVpnGateway). For more information, read Forwarding +// rule concepts and Using protocol forwarding. type ForwardingRule struct { // IPAddress: IP address for which this forwarding rule accepts traffic. // When a client sends traffic to this IP address, the forwarding rule @@ -13384,8 +13524,9 @@ type ForwardingRule struct { // AllowGlobalAccess: This field is used along with the backend_service // field for internal load balancing or with the target field for // internal TargetInstance. If set to true, clients can access the - // Internal TCP/UDP Load Balancer, Internal HTTP(S) and TCP Proxy Load - // Balancer from all regions. If false, only allows access from the + // internal passthrough Network Load Balancers, the regional internal + // Application Load Balancer, and the regional internal proxy Network + // Load Balancer from all regions. If false, only allows access from the // local region the load balancer is located at. Note that for // INTERNAL_MANAGED forwarding rules, this field cannot be changed after // the forwarding rule is created. @@ -13396,16 +13537,16 @@ type ForwardingRule struct { AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"` // BackendService: Identifies the backend service to which the - // forwarding rule sends traffic. Required for Internal TCP/UDP Load - // Balancing and Network Load Balancing; must be omitted for all other + // forwarding rule sends traffic. Required for internal and external + // passthrough Network Load Balancers; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` // BaseForwardingRule: [Output Only] The URL for the corresponding base - // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // forwarding rule. By base forwarding rule, we mean the forwarding rule // that has the same IP address, protocol, and port settings with the - // current Forwarding Rule, but without sourceIPRanges specified. Always - // empty if the current Forwarding Rule does not have sourceIPRanges + // current forwarding rule, but without sourceIPRanges specified. Always + // empty if the current forwarding rule does not have sourceIPRanges // specified. BaseForwardingRule string `json:"baseForwardingRule,omitempty"` @@ -13448,7 +13589,7 @@ type ForwardingRule struct { IsMirroringCollector bool `json:"isMirroringCollector,omitempty"` // Kind: [Output Only] Type of the resource. Always - // compute#forwardingRule for Forwarding Rule resources. + // compute#forwardingRule for forwarding rule resources. Kind string `json:"kind,omitempty"` // LabelFingerprint: A fingerprint for the labels being applied to this @@ -13511,10 +13652,10 @@ type ForwardingRule struct { Name string `json:"name,omitempty"` // Network: This field is not used for global external load balancing. - // For Internal TCP/UDP Load Balancing, this field identifies the - // network that the load balanced IP should belong to for this - // Forwarding Rule. If the subnetwork is specified, the network of the - // subnetwork will be used. If neither subnetwork nor this field is + // For internal passthrough Network Load Balancers, this field + // identifies the network that the load balanced IP should belong to for + // this forwarding rule. If the subnetwork is specified, the network of + // the subnetwork will be used. If neither subnetwork nor this field is // specified, the default network will be used. For Private Service // Connect forwarding rules that forward traffic to Google APIs, a // network must be provided. @@ -13581,7 +13722,7 @@ type ForwardingRule struct { Ports []string `json:"ports,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC - // Forwarding Rule. + // forwarding rule. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` // Possible values: @@ -13611,7 +13752,7 @@ type ForwardingRule struct { ServiceDirectoryRegistrations []*ForwardingRuleServiceDirectoryRegistration `json:"serviceDirectoryRegistrations,omitempty"` // ServiceLabel: An optional prefix to the service name for this - // Forwarding Rule. If specified, the prefix is the first label of the + // forwarding rule. If specified, the prefix is the first label of the // fully qualified service name. The label must be 1-63 characters long, // and comply with RFC1035. Specifically, the label must be 1-63 // characters long and match the regular expression @@ -13622,25 +13763,26 @@ type ForwardingRule struct { ServiceLabel string `json:"serviceLabel,omitempty"` // ServiceName: [Output Only] The internal fully qualified service name - // for this Forwarding Rule. This field is only used for internal load + // for this forwarding rule. This field is only used for internal load // balancing. ServiceName string `json:"serviceName,omitempty"` - // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // SourceIpRanges: If not empty, this forwarding rule will only forward // the traffic when the source IP address matches one of the IP - // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // addresses or CIDR ranges set here. Note that a forwarding rule can // only have up to 64 source IP ranges, and this field can only be used - // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // with a regional forwarding rule whose scheme is EXTERNAL. Each // source_ip_range entry should be either an IP address (for example, // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). SourceIpRanges []string `json:"sourceIpRanges,omitempty"` // Subnetwork: This field identifies the subnetwork that the load - // balanced IP should belong to for this Forwarding Rule, used in - // internal load balancing and network load balancing with IPv6. If the - // network specified is in auto subnet mode, this field is optional. - // However, a subnetwork must be specified if the network is in custom - // subnet mode or when creating external forwarding rule with IPv6. + // balanced IP should belong to for this forwarding rule, used with + // internal load balancers and external passthrough Network Load + // Balancers with IPv6. If the network specified is in auto subnet mode, + // this field is optional. However, a subnetwork must be specified if + // the network is in custom subnet mode or when creating external + // forwarding rule with IPv6. Subnetwork string `json:"subnetwork,omitempty"` // Target: The URL of the target resource to receive the matched @@ -14101,9 +14243,9 @@ func (s *ForwardingRuleReference) MarshalJSON() ([]byte, error) { } // ForwardingRuleServiceDirectoryRegistration: Describes the -// auto-registration of the Forwarding Rule to Service Directory. The +// auto-registration of the forwarding rule to Service Directory. The // region and project of the Service Directory resource generated from -// this registration will be the same as this Forwarding Rule. +// this registration will be the same as this forwarding rule. type ForwardingRuleServiceDirectoryRegistration struct { // Namespace: Service Directory namespace to register the forwarding // rule under. @@ -14115,8 +14257,8 @@ type ForwardingRuleServiceDirectoryRegistration struct { // ServiceDirectoryRegion: [Optional] Service Directory region to // register this global forwarding rule under. Default to "us-central1". - // Only used for PSC for Google APIs. All PSC for Google APIs Forwarding - // Rules on the same network should use the same Service Directory + // Only used for PSC for Google APIs. All PSC for Google APIs forwarding + // rules on the same network should use the same Service Directory // region. ServiceDirectoryRegion string `json:"serviceDirectoryRegion,omitempty"` @@ -14333,7 +14475,7 @@ type GRPCHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -14343,7 +14485,7 @@ type GRPCHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -14773,7 +14915,7 @@ type HTTP2HealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -14783,7 +14925,7 @@ type HTTP2HealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -14867,7 +15009,7 @@ type HTTPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Also supported in // legacy HTTP health checks for target pools. The health check supports // all backends supported by the backend service provided the backend @@ -14961,7 +15103,7 @@ type HTTPSHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -14971,7 +15113,7 @@ type HTTPSHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -15041,20 +15183,14 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/v1/regionHealthChecks) * Global // (/compute/docs/reference/rest/v1/healthChecks) These health check // resources can be used for load balancing and for autohealing VMs in a -// managed instance group (MIG). **Load balancing** The following load -// balancer can use either regional or global health check: * Internal -// TCP/UDP load balancer The following load balancers require regional -// health check: * Internal HTTP(S) load balancer * Backend -// service-based network load balancer Traffic Director and the -// following load balancers require global health check: * External -// HTTP(S) load balancer * TCP proxy load balancer * SSL proxy load -// balancer The following load balancer require legacy HTTP health -// checks (/compute/docs/reference/rest/v1/httpHealthChecks): * Target -// pool-based network load balancer **Autohealing in MIGs** The health -// checks that you use for autohealing VMs in a MIG can be either -// regional or global. For more information, see Set up an application -// health check and autohealing. For more information, see Health checks -// overview. +// managed instance group (MIG). **Load balancing** Health check +// requirements vary depending on the type of load balancer. For details +// about the type of health check supported for each load balancer and +// corresponding backend type, see Health checks overview: Load balancer +// guide. **Autohealing in MIGs** The health checks that you use for +// autohealing VMs in a MIG can be either regional or global. For more +// information, see Set up an application health check and autohealing. +// For more information, see Health checks overview. type HealthCheck struct { // CheckIntervalSec: How often (in seconds) to send a health check. The // default value is 5 seconds. @@ -17753,6 +17889,11 @@ type Image struct { // (in GB). DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // EnableConfidentialCompute: Whether this image is created from a + // confidential compute mode disk. [Output Only]: This field is not set + // by user, but from source disk. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // Family: The name of the image family to which this image belongs. The // image family name can be from a publicly managed image family // provided by Compute Engine, or from a custom image family you create. @@ -18437,6 +18578,9 @@ type Instance struct { // corresponding input only field. ResourceStatus *ResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzi: [Output Only] Reserved for future use. + SatisfiesPzi bool `json:"satisfiesPzi,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -39302,6 +39446,9 @@ type Quota struct { // "PREEMPTIBLE_NVIDIA_T4_GPUS" // "PREEMPTIBLE_NVIDIA_T4_VWS_GPUS" // "PREEMPTIBLE_NVIDIA_V100_GPUS" + // "PREEMPTIBLE_TPU_LITE_DEVICE_V5" + // "PREEMPTIBLE_TPU_LITE_PODSLICE_V5" + // "PREEMPTIBLE_TPU_PODSLICE_V4" // "PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK" // "PSC_INTERNAL_LB_FORWARDING_RULES" // "PUBLIC_ADVERTISED_PREFIXES" @@ -39340,6 +39487,9 @@ type Quota struct { // "TARGET_SSL_PROXIES" // "TARGET_TCP_PROXIES" // "TARGET_VPN_GATEWAYS" + // "TPU_LITE_DEVICE_V5" + // "TPU_LITE_PODSLICE_V5" + // "TPU_PODSLICE_V4" // "URL_MAPS" // "VPN_GATEWAYS" // "VPN_TUNNELS" @@ -41876,6 +42026,10 @@ func (s *RequestMirrorPolicy) MarshalJSON() ([]byte, error) { // that capacity is held in a specific zone even if the reserved VMs are // not running. For more information, read Reserving zonal resources. type Reservation struct { + // AggregateReservation: Reservation for aggregated resources, providing + // shape flexibility. + AggregateReservation *AllocationAggregateReservation `json:"aggregateReservation,omitempty"` + // Commitment: [Output Only] Full or partial URL to a parent commitment. // This field displays for reservations that are tied to a commitment. Commitment string `json:"commitment,omitempty"` @@ -41956,20 +42110,22 @@ type Reservation struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Commitment") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AggregateReservation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Commitment") to include in - // API requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "AggregateReservation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -46270,7 +46426,7 @@ type SSLHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -46280,7 +46436,7 @@ type SSLHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -47980,6 +48136,10 @@ type SecurityPolicyRuleMatcher struct { // Security Policies. Expr *Expr `json:"expr,omitempty"` + // ExprOptions: The configuration options available when specifying a + // user defined CEVAL expression (i.e., 'expr'). + ExprOptions *SecurityPolicyRuleMatcherExprOptions `json:"exprOptions,omitempty"` + // VersionedExpr: Preconfigured versioned expression. If this field is // specified, config must also be specified. Available preconfigured // expressions along with their requirements are: SRC_IPS_V1 - must @@ -48041,6 +48201,73 @@ func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyRuleMatcherExprOptions struct { + // RecaptchaOptions: reCAPTCHA configuration options to be applied for + // the rule. If the rule does not evaluate reCAPTCHA tokens, this field + // will have no effect. + RecaptchaOptions *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions `json:"recaptchaOptions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RecaptchaOptions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RecaptchaOptions") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleMatcherExprOptions) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherExprOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions struct { + // ActionTokenSiteKeys: A list of site keys to be used during the + // validation of reCAPTCHA action-tokens. The provided site keys need to + // be created from reCAPTCHA API under the same project where the + // security policy is created. + ActionTokenSiteKeys []string `json:"actionTokenSiteKeys,omitempty"` + + // SessionTokenSiteKeys: A list of site keys to be used during the + // validation of reCAPTCHA session-tokens. The provided site keys need + // to be created from reCAPTCHA API under the same project where the + // security policy is created. + SessionTokenSiteKeys []string `json:"sessionTokenSiteKeys,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ActionTokenSiteKeys") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ActionTokenSiteKeys") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // SecurityPolicyRuleNetworkMatcher: Represents a match condition that // incoming network traffic is evaluated against. type SecurityPolicyRuleNetworkMatcher struct { @@ -48296,7 +48523,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // Server name indication in the TLS session of the HTTPS request. The // key value is truncated to the first 128 bytes. The key type defaults // to ALL on a HTTP session. - REGION_CODE: The country/region from - // which the request originates. + // which the request originates. - TLS_JA3_FINGERPRINT: JA3 TLS/SSL + // fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If + // not available, the key type defaults to ALL. - USER_IP: The IP + // address of the originating client, which is resolved based on + // "userIpRequestHeaders" configured with the security policy. If there + // is no "userIpRequestHeaders" configuration or an IP address cannot be + // resolved from it, the key type defaults to IP. // // Possible values: // "ALL" @@ -48306,6 +48539,8 @@ type SecurityPolicyRuleRateLimitOptions struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` @@ -48392,7 +48627,14 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // bytes. - SNI: Server name indication in the TLS session of the HTTPS // request. The key value is truncated to the first 128 bytes. The key // type defaults to ALL on a HTTP session. - REGION_CODE: The - // country/region from which the request originates. + // country/region from which the request originates. - + // TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects + // using HTTPS, HTTP/2 or HTTP/3. If not available, the key type + // defaults to ALL. - USER_IP: The IP address of the originating client, + // which is resolved based on "userIpRequestHeaders" configured with the + // security policy. If there is no "userIpRequestHeaders" configuration + // or an IP address cannot be resolved from it, the key type defaults to + // IP. // // Possible values: // "ALL" @@ -48402,6 +48644,8 @@ type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { // "IP" // "REGION_CODE" // "SNI" + // "TLS_JA3_FINGERPRINT" + // "USER_IP" // "XFF_IP" EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` @@ -49884,6 +50128,11 @@ type Snapshot struct { // snapshot to a disk. DownloadBytes int64 `json:"downloadBytes,omitempty,string"` + // EnableConfidentialCompute: Whether this snapshot is created from a + // confidential compute mode disk. [Output Only]: This field is not set + // by user, but from source disk. + EnableConfidentialCompute bool `json:"enableConfidentialCompute,omitempty"` + // GuestOsFeatures: [Output Only] A list of features to enable on the // guest operating system. Applicable only for bootable images. Read // Enabling guest operating system features to see a list of available @@ -51872,9 +52121,9 @@ func (s *SslPoliciesScopedListWarningData) MarshalJSON() ([]byte, error) { } // SslPolicy: Represents an SSL Policy resource. Use SSL policies to -// control the SSL features, such as versions and cipher suites, offered -// by an HTTPS or SSL Proxy load balancer. For more information, read -// SSL Policy Concepts. +// control SSL features, such as versions and cipher suites, that are +// offered by Application Load Balancers and proxy Network Load +// Balancers. For more information, read SSL policies overview. type SslPolicy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -53362,7 +53611,7 @@ type TCPHealthCheck struct { // PortSpecification: Specifies how a port is selected for health // checking. Can be one of the following values: USE_FIXED_PORT: // Specifies a port number explicitly using the port field in the health - // check. Supported by backend services for pass-through load balancers + // check. Supported by backend services for passthrough load balancers // and backend services for proxy load balancers. Not supported by // target pools. The health check supports all backends supported by the // backend service provided the backend can be health checked. For @@ -53372,7 +53621,7 @@ type TCPHealthCheck struct { // specifying the health check port by referring to the backend service. // Only supported by backend services for proxy load balancers. Not // supported by target pools. Not supported by backend services for - // pass-through load balancers. Supports all backends that can be health + // passthrough load balancers. Supports all backends that can be health // checked; for example, GCE_VM_IP_PORT network endpoint groups and // instance group backends. For GCE_VM_IP_PORT network endpoint group // backends, the health check uses the port number specified for each @@ -55767,10 +56016,10 @@ func (s *TargetInstancesScopedListWarningData) MarshalJSON() ([]byte, error) { } // TargetPool: Represents a Target Pool resource. Target pools are used -// for network TCP/UDP load balancing. A target pool references member -// instances, an associated legacy HttpHealthCheck resource, and, -// optionally, a backup target pool. For more information, read Using -// target pools. +// with external passthrough Network Load Balancers. A target pool +// references member instances, an associated legacy HttpHealthCheck +// resource, and, optionally, a backup target pool. For more +// information, read Using target pools. type TargetPool struct { // BackupPool: The server-defined URL for the resource. This field is // applicable only when the containing target pool is serving a @@ -56786,10 +57035,10 @@ func (s *TargetSslProxiesSetSslCertificatesRequest) MarshalJSON() ([]byte, error } // TargetSslProxy: Represents a Target SSL Proxy resource. A target SSL -// proxy is a component of a SSL Proxy load balancer. Global forwarding -// rules reference a target SSL proxy, and the target proxy then -// references an external backend service. For more information, read -// Using Target Proxies. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target SSL proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field @@ -57303,10 +57552,10 @@ func (s *TargetTcpProxiesSetProxyHeaderRequest) MarshalJSON() ([]byte, error) { } // TargetTcpProxy: Represents a Target TCP Proxy resource. A target TCP -// proxy is a component of a TCP Proxy load balancer. Global forwarding -// rules reference target TCP proxy, and the target proxy then -// references an external backend service. For more information, read -// TCP Proxy Load Balancing overview. +// proxy is a component of a Proxy Network Load Balancer. The forwarding +// rule references the target TCP proxy, and the target proxy then +// references a backend service. For more information, read Proxy +// Network Load Balancer overview. type TargetTcpProxy struct { // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. @@ -97528,8 +97777,8 @@ type InstanceGroupManagersListManagedInstancesCall struct { // instance, the currentAction is CREATING. If a previous action failed, // the list displays the errors for that failed action. The orderBy // query parameter is not supported. The `pageToken` query parameter is -// supported only in the alpha and beta API and only if the group's -// `listManagedInstancesResults` field is set to `PAGINATED`. +// supported only if the group's `listManagedInstancesResults` field is +// set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -97715,7 +97964,7 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal } return ret, nil // { - // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists all of the instances in the managed instance group. Each instance in the list has a currentAction, which indicates the action that the managed instance group is performing on the instance. For example, if the group is still creating an instance, the currentAction is CREATING. If a previous action failed, the list displays the errors for that failed action. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.instanceGroupManagers.listManagedInstances", @@ -110699,9 +110948,11 @@ func (r *InstancesService) Stop(project string, zone string, instance string) *I return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesStopCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesStopCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -110822,7 +111073,7 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -110897,9 +111148,11 @@ func (r *InstancesService) Suspend(project string, zone string, instance string) return c } -// DiscardLocalSsd sets the optional parameter "discardLocalSsd": If -// true, discard the contents of any attached localSSD partitions. -// Default value is false. +// DiscardLocalSsd sets the optional parameter "discardLocalSsd": This +// property is required if the instance has any attached Local SSD +// disks. If false, Local SSD data will be preserved when the instance +// is suspended. If true, the contents of any attached Local SSD disks +// will be discarded. func (c *InstancesSuspendCall) DiscardLocalSsd(discardLocalSsd bool) *InstancesSuspendCall { c.urlParams_.Set("discardLocalSsd", fmt.Sprint(discardLocalSsd)) return c @@ -111020,7 +111273,7 @@ func (c *InstancesSuspendCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "discardLocalSsd": { - // "description": "If true, discard the contents of any attached localSSD partitions. Default value is false.", + // "description": "This property is required if the instance has any attached Local SSD disks. If false, Local SSD data will be preserved when the instance is suspended. If true, the contents of any attached Local SSD disks will be discarded.", // "location": "query", // "type": "boolean" // }, @@ -154556,9 +154809,8 @@ type RegionInstanceGroupManagersListManagedInstancesCall struct { // group and instances that are scheduled to be created. The list // includes any current actions that the group has scheduled for its // instances. The orderBy query parameter is not supported. The -// `pageToken` query parameter is supported only in the alpha and beta -// API and only if the group's `listManagedInstancesResults` field is -// set to `PAGINATED`. +// `pageToken` query parameter is supported only if the group's +// `listManagedInstancesResults` field is set to `PAGINATED`. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -154743,7 +154995,7 @@ func (c *RegionInstanceGroupManagersListManagedInstancesCall) Do(opts ...googlea } return ret, nil // { - // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only in the alpha and beta API and only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", + // "description": "Lists the instances in the managed instance group and instances that are scheduled to be created. The list includes any current actions that the group has scheduled for its instances. The orderBy query parameter is not supported. The `pageToken` query parameter is supported only if the group's `listManagedInstancesResults` field is set to `PAGINATED`.", // "flatPath": "projects/{project}/regions/{region}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances", // "httpMethod": "POST", // "id": "compute.regionInstanceGroupManagers.listManagedInstances", @@ -173203,6 +173455,298 @@ func (c *RegionUrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsVa } +// method id "compute.regionZones.list": + +type RegionZonesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of Zone resources under the specific region +// available to the specified project. +// +// - project: Project ID for this request. +// - region: Region for this request. +func (r *RegionZonesService) List(project string, region string) *RegionZonesListCall { + c := &RegionZonesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. These two types of filter expressions cannot be +// mixed in one request. If you want to use AIP-160, your expression +// must specify the field name, an operator, and the value that you want +// to use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:*` comparison can be used to test +// whether a key has been defined. For example, to find all objects with +// `owner` label use: ``` labels.owner:* ``` You can also filter nested +// fields. For example, you could specify `scheduling.automaticRestart = +// false` to include instances only if they are not scheduled for +// automatic restarts. You can use filtering on nested fields to filter +// based on resource labels. To filter on multiple expressions, provide +// each separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. You cannot combine constraints on multiple fields using +// regular expressions. +func (c *RegionZonesListCall) Filter(filter string) *RegionZonesListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionZonesListCall) MaxResults(maxResults int64) *RegionZonesListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionZonesListCall) OrderBy(orderBy string) *RegionZonesListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionZonesListCall) PageToken(pageToken string) *RegionZonesListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionZonesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionZonesListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionZonesListCall) Fields(s ...googleapi.Field) *RegionZonesListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionZonesListCall) IfNoneMatch(entityTag string) *RegionZonesListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionZonesListCall) Context(ctx context.Context) *RegionZonesListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionZonesListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionZonesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/zones") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionZones.list" call. +// Exactly one of *ZoneList or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *ZoneList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ZoneList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of Zone resources under the specific region available to the specified project.", + // "flatPath": "projects/{project}/regions/{region}/zones", + // "httpMethod": "GET", + // "id": "compute.regionZones.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. These two types of filter expressions cannot be mixed in one request. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`. You cannot combine constraints on multiple fields using regular expressions.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/zones", + // "response": { + // "$ref": "ZoneList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionZonesListCall) Pages(ctx context.Context, f func(*ZoneList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.regions.get": type RegionsGetCall struct { @@ -201589,8 +202133,8 @@ type TargetSslProxiesSetSslPolicyCall struct { // SetSslPolicy: Sets the SSL policy for TargetSslProxy. The SSL policy // specifies the server-side support for SSL features. This affects -// connections between clients and the SSL proxy load balancer. They do -// not affect the connection between the load balancer and the backends. +// connections between clients and the load balancer. They do not affect +// the connection between the load balancer and the backends. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource whose SSL @@ -201712,7 +202256,7 @@ func (c *TargetSslProxiesSetSslPolicyCall) Do(opts ...googleapi.CallOption) (*Op } return ret, nil // { - // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the SSL proxy load balancer. They do not affect the connection between the load balancer and the backends.", + // "description": "Sets the SSL policy for TargetSslProxy. The SSL policy specifies the server-side support for SSL features. This affects connections between clients and the load balancer. They do not affect the connection between the load balancer and the backends.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}/setSslPolicy", // "httpMethod": "POST", // "id": "compute.targetSslProxies.setSslPolicy", diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 84f9302dc..3356fa97b 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -55,6 +55,8 @@ type DialSettings struct { EnableDirectPathXds bool EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool + UniverseDomain string + DefaultUniverseDomain string // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 028e96ede..104a91132 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.151.0" +const Version = "0.154.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index b2b249eec..3fdee095c 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -126,6 +126,22 @@ func (w withDefaultScopes) Apply(o *internal.DialSettings) { copy(o.DefaultScopes, w) } +// WithDefaultUniverseDomain returns a ClientOption that sets the default universe domain. +// +// It should only be used internally by generated clients. +// +// This is similar to the public WithUniverse, but allows us to determine whether the user has +// overridden the default universe. +func WithDefaultUniverseDomain(ud string) option.ClientOption { + return withDefaultUniverseDomain(ud) +} + +type withDefaultUniverseDomain string + +func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { + o.DefaultUniverseDomain = string(w) +} + // EnableJwtWithScope returns a ClientOption that specifies if scope can be used // with self-signed JWT. func EnableJwtWithScope() option.ClientOption { diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index b2085a194..c882c1eb4 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -343,3 +343,16 @@ func (w *withCreds) Apply(o *internal.DialSettings) { func WithCredentials(creds *google.Credentials) ClientOption { return (*withCreds)(creds) } + +// WithUniverseDomain returns a ClientOption that sets the universe domain. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithUniverseDomain(ud string) ClientOption { + return withUniverseDomain(ud) +} + +type withUniverseDomain string + +func (w withUniverseDomain) Apply(o *internal.DialSettings) { + o.UniverseDomain = string(w) +} diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 87a22f758..34f9ba8bd 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -18,6 +18,7 @@ import ( "cloud.google.com/go/compute/metadata" "go.opencensus.io/plugin/ocgrpc" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "golang.org/x/oauth2" "golang.org/x/time/rate" "google.golang.org/api/internal" @@ -43,6 +44,9 @@ var timeoutDialerOption grpc.DialOption // Log rate limiter var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second} +// Assign to var for unit test replacement +var dialContext = grpc.DialContext + // Dial returns a GRPC connection for use communicating with a Google cloud // service, configured with the given ClientOptions. func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) { @@ -195,12 +199,13 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. grpcOpts = addOCStatsHandler(grpcOpts, o) + grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, o) grpcOpts = append(grpcOpts, o.GRPCDialOpts...) if o.UserAgent != "" { grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent)) } - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return dialContext(ctx, endpoint, grpcOpts...) } func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { @@ -210,6 +215,13 @@ func addOCStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) return append(opts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) } +func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.DialSettings) []grpc.DialOption { + if settings.TelemetryDisabled { + return opts + } + return append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler())) +} + // grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource. type grpcTokenSource struct { oauth.TokenSource diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index a07362ffd..7e322a17c 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,6 +16,7 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" @@ -69,6 +70,9 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport + // Give OpenTelemetry precedence over OpenCensus in case user configuration + // causes both to write the same header (`X-Cloud-Trace-Context`). + trans = addOpenTelemetryTransport(trans, settings) trans = addOCTransport(trans, settings) switch { case settings.NoAuth: @@ -203,6 +207,13 @@ func fallbackBaseTransport() *http.Transport { } } +func addOpenTelemetryTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { + if settings.TelemetryDisabled { + return trans + } + return otelhttp.NewTransport(trans) +} + func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper { if settings.TelemetryDisabled { return trans diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 57% rename from vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to vendor/google.golang.org/grpc/balancer_wrapper.go index a4411c22b..b5e30cff0 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,87 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - // Even though it is optional for balancers, gracefulswitch ensures - // opts.StateListener is set, so this cannot ever be nil. - sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -151,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -160,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -187,115 +168,49 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done() - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish before closing the balancer. - <-done - b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err @@ -316,10 +231,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -328,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -364,6 +289,20 @@ type acBalancerWrapper struct { producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -377,20 +316,7 @@ func (acbw *acBalancerWrapper) Connect() { } func (acbw *acBalancerWrapper) Shutdown() { - ccb := acbw.ccb - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } // NewStream begins a streaming RPC on the addrConn. If the addrConn is not diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 429c389e4..e6f2625b6 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -33,9 +33,7 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" @@ -48,9 +46,9 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( @@ -119,23 +117,8 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), @@ -143,23 +126,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -177,21 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -205,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -231,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -320,8 +306,8 @@ func (cc *ClientConn) addTraceEvent(msg string) { type idler ClientConn -func (i *idler) EnterIdleMode() error { - return (*ClientConn)(i).enterIdleMode() +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() } func (i *idler) ExitIdleMode() error { @@ -329,117 +315,71 @@ func (i *idler) ExitIdleMode() error { } // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() - defer cc.mu.Unlock() if cc.conns == nil { - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - return nil + cc.mu.Unlock() + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle cc.addTraceEvent("entering idle mode") - go func() { - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() + cc.initIdleStateLocked() - return nil + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -649,66 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idle.Manager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - -func (s ccIdlenessState) String() string { - switch s { - case ccIdlenessStateActive: - return "active" - case ccIdlenessStateIdle: - return "idle" - case ccIdlenessStateExitingIdle: - return "exitingIdle" - default: - return "unknown" - } -} - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -748,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -804,11 +699,11 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } - internal.EnterIdleModeForTesting = func(cc *ClientConn) error { - return cc.enterIdleMode() + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() } internal.ExitIdleModeForTesting = func(cc *ClientConn) error { - return cc.exitIdleMode() + return cc.idlenessMgr.ExitIdleMode() } } @@ -824,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -872,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -894,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -910,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -932,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -947,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -968,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1174,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1216,12 +1103,12 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1253,40 +1140,32 @@ func (cc *ClientConn) Close() error { <-cc.csMgr.pubSub.Done() }() + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.Close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1307,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1345,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1849,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -2007,32 +1886,3 @@ func (cc *ClientConn) determineAuthority() error { channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go index 11b106182..08476ad1f 100644 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/vendor/google.golang.org/grpc/credentials/alts/alts.go b/vendor/google.golang.org/grpc/credentials/alts/alts.go index 579adf210..afcdb8a0d 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/alts.go +++ b/vendor/google.golang.org/grpc/credentials/alts/alts.go @@ -43,7 +43,7 @@ import ( const ( // hypervisorHandshakerServiceAddress represents the default ALTS gRPC // handshaker service address in the hypervisor. - hypervisorHandshakerServiceAddress = "metadata.google.internal.:8080" + hypervisorHandshakerServiceAddress = "dns:///metadata.google.internal.:8080" // defaultTimeout specifies the server handshake timeout. defaultTimeout = 30.0 * time.Second // The following constants specify the minimum and maximum acceptable diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21..5dafd34ed 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index cfc9fd85e..ba2426180 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -250,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -413,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -487,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -637,14 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, - idleTimeout: 30 * time.Minute, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -705,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 4399c3df4..11f91668a 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -36,6 +39,7 @@ import "sync" type Unbounded struct { c chan any closed bool + closing bool mu sync.Mutex backlog []any } @@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded { return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t any) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. +// is closed after all data is drained. func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 5395e7752..fc094f344 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -31,6 +31,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -58,6 +59,12 @@ func TurnOn() { } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { return atomic.LoadInt32(&curState) == 1 diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3cf10ddfb..685a3cb41 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,9 +36,6 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy. - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) // LeastRequestLB is set if we should support the least_request_experimental // LB policy, which can be enabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c..29f234acb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 000000000..7f7044e17 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 900917dbe..f7f40a16a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -38,8 +37,6 @@ type CallbackSerializer struct { done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -65,56 +62,34 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - cs.closedMu.Lock() - defer cs.closedMu.Unlock() - - if cs.closed { - return false - } - cs.callbacks.Put(f) - return true + return cs.callbacks.Put(f) == nil } func (cs *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) - defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-cs.callbacks.Get(): - if !ok { - return - } + case cb := <-cs.callbacks.Get(): cs.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing cs.done. - cs.closedMu.Lock() - cs.closed = true - backlog = cs.fetchPendingCallbacks() + // Close the buffer to prevent new callbacks from being added. cs.callbacks.Close() - cs.closedMu.Unlock() - for _, b := range backlog { - b(ctx) - } -} -func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-cs.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - cs.callbacks.Load() - default: - return backlog - } + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go index 6c272476e..fe49cb74c 100644 --- a/vendor/google.golang.org/grpc/internal/idle/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -26,8 +26,6 @@ import ( "sync" "sync/atomic" "time" - - "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { // and exit from idle mode. type Enforcer interface { ExitIdleMode() error - EnterIdleMode() error -} - -// Manager defines the functionality required to track RPC activity on a -// channel. -type Manager interface { - OnCallBegin() error - OnCallEnd() - Close() + EnterIdleMode() } -type noopManager struct{} - -func (noopManager) OnCallBegin() error { return nil } -func (noopManager) OnCallEnd() {} -func (noopManager) Close() {} - -// manager implements the Manager interface. It uses atomic operations to -// synchronize access to shared state and a mutex to guarantee mutual exclusion -// in a critical section. -type manager struct { +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -69,8 +52,7 @@ type manager struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. enforcer Enforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - logger grpclog.LoggerV2 + timeout time.Duration // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: @@ -88,57 +70,48 @@ type manager struct { timer *time.Timer } -// ManagerOptions is a collection of options used by -// NewManager. -type ManagerOptions struct { - Enforcer Enforcer - Timeout time.Duration - Logger grpclog.LoggerV2 +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } } -// NewManager creates a new idleness manager implementation for the -// given idle timeout. -func NewManager(opts ManagerOptions) Manager { - if opts.Timeout == 0 { - return noopManager{} +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return } - m := &manager{ - enforcer: opts.Enforcer, - timeout: int64(opts.Timeout), - logger: opts.Logger, + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() } - m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) - return m + m.timer = timeAfterFunc(d, m.handleIdleTimeout) } -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (m *manager) resetIdleTimer(d time.Duration) { +func (m *Manager) resetIdleTimer(d time.Duration) { m.idleMu.Lock() defer m.idleMu.Unlock() - - if m.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - m.timer.Reset(d) + m.resetIdleTimerLocked(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (m *manager) handleIdleTimeout() { +func (m *Manager) handleIdleTimeout() { if m.isClosed() { return } if atomic.LoadInt32(&m.activeCallsCount) > 0 { - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) return } @@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) - m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) return } - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - m.resetIdleTimer(time.Duration(m.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return @@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) - m.resetIdleTimer(time.Duration(m.timeout)) + m.resetIdleTimer(m.timeout) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -185,36 +145,49 @@ func (m *manager) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (m *manager) tryEnterIdleMode() bool { +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + m.idleMu.Lock() defer m.idleMu.Unlock() if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we + // A very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) return false } - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := m.enforcer.EnterIdleMode(); err != nil { - m.logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() m.actuallyIdle = true return true } +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + // OnCallBegin is invoked at the start of every RPC. -func (m *manager) OnCallBegin() error { +func (m *Manager) OnCallBegin() error { if m.isClosed() { return nil } @@ -227,7 +200,7 @@ func (m *manager) OnCallBegin() error { // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := m.exitIdleMode(); err != nil { + if err := m.ExitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. atomic.AddInt32(&m.activeCallsCount, -1) @@ -238,28 +211,30 @@ func (m *manager) OnCallBegin() error { return nil } -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (m *manager) exitIdleMode() error { +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. m.idleMu.Lock() defer m.idleMu.Unlock() - if !m.actuallyIdle { - // This can happen in two scenarios: + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. // - // Either way, nothing to do here. + // In any case, there is nothing to do here. return nil } if err := m.enforcer.ExitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) + return fmt.Errorf("failed to exit idle mode: %w", err) } // Undo the idle entry process. This also respects any new RPC attempts. @@ -267,12 +242,12 @@ func (m *manager) exitIdleMode() error { m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + m.resetIdleTimerLocked(m.timeout) return nil } // OnCallEnd is invoked at the end of every RPC. -func (m *manager) OnCallEnd() { +func (m *Manager) OnCallEnd() { if m.isClosed() { return } @@ -287,15 +262,17 @@ func (m *manager) OnCallEnd() { atomic.AddInt32(&m.activeCallsCount, -1) } -func (m *manager) isClosed() bool { +func (m *Manager) isClosed() bool { return atomic.LoadInt32(&m.closed) == 1 } -func (m *manager) Close() { +func (m *Manager) Close() { atomic.StoreInt32(&m.closed, 1) m.idleMu.Lock() - m.timer.Stop() - m.timer = nil + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 0d94c63e0..2549fe8e3 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -73,6 +73,11 @@ var ( // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. @@ -177,10 +182,12 @@ var ( GRPCResolverSchemeExtraMetadata string = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. - EnterIdleModeForTesting any // func(*grpc.ClientConn) error + EnterIdleModeForTesting any // func(*grpc.ClientConn) // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36..b66dcb213 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 000000000..c7fc557d0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go new file mode 100644 index 000000000..aeffd3e1c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go @@ -0,0 +1,29 @@ +//go:build !unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 000000000..078137b7f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 17f7a21b5..a9d70e2a1 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -347,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -370,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d6f5c4935..59f67655a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -493,8 +494,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 6fa1eb419..680c9eba0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -334,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -342,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -592,18 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -629,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -664,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) { } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -1242,10 +1223,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1311,10 +1288,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1397,11 +1370,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1433,10 +1406,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1461,6 +1436,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 415961987..24fa10325 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aac056e72..b7b8fec18 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,6 +37,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" @@ -265,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -425,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -437,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -698,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -717,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12..494468257 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -153,14 +153,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +205,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,17 +222,16 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go index e01d219ff..a821ff9b2 100644 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 236837f41..bf56faa76 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -37,7 +37,6 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool - idle bool blockingCh chan struct{} picker balancer.Picker statsHandlers []stats.Handler // to record blocking picker calls @@ -53,11 +52,7 @@ func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -210,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 2e9cf66b4..5128f9364 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -25,7 +25,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/pretty" @@ -65,19 +64,6 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - if !envconfig.PickFirstLBConfig { - // Prior to supporting loadbalancing configuration, the pick_first LB - // policy did not implement the balancer.ConfigParser interface. This - // meant that if a non-empty configuration was passed to it, the service - // config unmarshaling code would throw a warning log, but would - // continue using the pick_first LB policy. The code below ensures the - // same behavior is retained if the env var is not set. - if string(js) != "{}" { - logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) - } - return nil, nil - } - var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 000000000..14aa6f20a --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index 0a4262342..f2efa2a2c 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -78,12 +78,12 @@ func (r *Resolver) InitialState(s resolver.State) { func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r.BuildCallback(target, cc, opts) r.mu.Lock() + defer r.mu.Unlock() r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) go r.UpdateStateCallback(err) } - r.mu.Unlock() return r, nil } @@ -105,15 +105,22 @@ func (r *Resolver) Close() { // UpdateState calls CC.UpdateState. func (r *Resolver) UpdateState(s resolver.State) { r.mu.Lock() - err := r.CC.UpdateState(s) + defer r.mu.Unlock() + var err error + if r.CC == nil { + panic("cannot update state as grpc.Dial with resolver has not been called") + } + err = r.CC.UpdateState(s) r.lastSeenState = &s - r.mu.Unlock() r.UpdateStateCallback(err) } // ReportError calls CC.ReportError. func (r *Resolver) ReportError(err error) { r.mu.Lock() + defer r.mu.Unlock() + if r.CC == nil { + panic("cannot report error as grpc.Dial with resolver has not been called") + } r.CC.ReportError(err) - r.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index 804be887d..ada5b9bb7 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []any { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 11384e228..bd1c7d01b 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -240,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -286,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index d68330560..000000000 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,247 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done() - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } - } - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 000000000..c79bab121 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 8f60d4214..2fa694d55 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -70,6 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } @@ -81,6 +85,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") @@ -578,11 +583,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -806,6 +813,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -917,7 +936,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } @@ -971,18 +990,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() s.handleStream(st, stream) } @@ -996,7 +1026,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } go f() }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1040,7 +1069,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1689,6 +1718,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { ctx := stream.Context() + ctx = contextWithServer(ctx, s) var ti *traceInfo if EnableTracing { tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) @@ -1697,7 +1727,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str tr: tr, firstLine: firstLine{ client: false, - remoteAddr: t.RemoteAddr(), + remoteAddr: t.Peer().Addr, }, } if dl, ok := ctx.Deadline(); ok { @@ -1731,6 +1761,22 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { @@ -1820,62 +1866,64 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() - } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } + if s.opts.numServerWorkers > 0 { s.stopServerWorkers() } - s.mu.Lock() + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1884,22 +1932,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1913,11 +1953,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 6d2cadd79..a04793aeb 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.59.0" +const Version = "1.60.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bb480f1f9..896dc38f5 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,16 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' @@ -94,15 +97,14 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" # - Ensure all usages of grpc_testing package are renamed when importing. -not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -110,7 +112,6 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ @@ -119,94 +120,73 @@ for MOD_FILE in $(find . -name 'go.mod'); do done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX TODO: Remove the below deprecation usages: +CloseNotifier +Roots.Subjects +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index 2f3809c6f..98da1871f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -51,6 +51,9 @@ github.com/davecgh/go-spew/spew ## explicit github.com/emicklei/go-restful github.com/emicklei/go-restful/log +# github.com/felixge/httpsnoop v1.0.4 +## explicit; go 1.13 +github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.5.4 ## explicit; go 1.16 github.com/fsnotify/fsnotify @@ -327,6 +330,10 @@ go.opencensus.io/trace/tracestate ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 +## explicit; go 1.20 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil # go.opentelemetry.io/otel v1.21.0 ## explicit; go 1.20 go.opentelemetry.io/otel @@ -472,7 +479,7 @@ golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/api v0.151.0 +# google.golang.org/api v0.154.0 ## explicit; go 1.19 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v0.alpha @@ -503,24 +510,24 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/cloud/kms/v1 google.golang.org/genproto/googleapis/cloud/location google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b +# google.golang.org/genproto/googleapis/api v0.0.0-20231120223509-83a465c0220f ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.59.0 +# google.golang.org/grpc v1.60.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -570,6 +577,7 @@ google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -581,6 +589,7 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats